mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-13 20:33:15 +00:00
Merge branch '3.2-without-smb2' of git://git.samba.org/sfrench/cifs-2.6
* '3.2-without-smb2' of git://git.samba.org/sfrench/cifs-2.6: (52 commits) Fix build break when freezer not configured Add definition for share encryption CIFS: Make cifs_push_locks send as many locks at once as possible CIFS: Send as many mandatory unlock ranges at once as possible CIFS: Implement caching mechanism for posix brlocks CIFS: Implement caching mechanism for mandatory brlocks CIFS: Fix DFS handling in cifs_get_file_info CIFS: Fix error handling in cifs_readv_complete [CIFS] Fixup trivial checkpatch warning [CIFS] Show nostrictsync and noperm mount options in /proc/mounts cifs, freezer: add wait_event_freezekillable and have cifs use it cifs: allow cifs_max_pending to be readable under /sys/module/cifs/parameters cifs: tune bdi.ra_pages in accordance with the rsize cifs: allow for larger rsize= options and change defaults cifs: convert cifs_readpages to use async reads cifs: add cifs_async_readv cifs: fix protocol definition for READ_RSP cifs: add a callback function to receive the rest of the frame cifs: break out 3rd receive phase into separate function cifs: find mid earlier in receive codepath ...
This commit is contained in:
commit
dabcbb1bae
@ -745,4 +745,18 @@ installed and something like the following lines should be added to the
|
||||
create cifs.spnego * * /usr/local/sbin/cifs.upcall %k
|
||||
create dns_resolver * * /usr/local/sbin/cifs.upcall %k
|
||||
|
||||
CIFS kernel module parameters
|
||||
=============================
|
||||
These module parameters can be specified or modified either during the time of
|
||||
module loading or during the runtime by using the interface
|
||||
/proc/module/cifs/parameters/<param>
|
||||
|
||||
i.e. echo "value" > /sys/module/cifs/parameters/<param>
|
||||
|
||||
1. echo_retries - The number of echo attempts before giving up and
|
||||
reconnecting to the server. The default is 5. The value 0
|
||||
means never reconnect.
|
||||
|
||||
2. enable_oplocks - Enable or disable oplocks. Oplocks are enabled by default.
|
||||
[Y/y/1]. To disable use any of [N/n/0].
|
||||
|
||||
|
@ -511,7 +511,7 @@ static const struct file_operations cifsFYI_proc_fops = {
|
||||
|
||||
static int cifs_oplock_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
seq_printf(m, "%d\n", oplockEnabled);
|
||||
seq_printf(m, "%d\n", enable_oplocks);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -526,13 +526,16 @@ static ssize_t cifs_oplock_proc_write(struct file *file,
|
||||
char c;
|
||||
int rc;
|
||||
|
||||
printk(KERN_WARNING "CIFS: The /proc/fs/cifs/OplockEnabled interface "
|
||||
"will be removed in kernel version 3.4. Please migrate to "
|
||||
"using the 'enable_oplocks' module parameter in cifs.ko.\n");
|
||||
rc = get_user(c, buffer);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (c == '0' || c == 'n' || c == 'N')
|
||||
oplockEnabled = 0;
|
||||
enable_oplocks = false;
|
||||
else if (c == '1' || c == 'y' || c == 'Y')
|
||||
oplockEnabled = 1;
|
||||
enable_oplocks = true;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -43,6 +43,8 @@
|
||||
#define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */
|
||||
#define CIFS_MOUNT_RWPIDFORWARD 0x80000 /* use pid forwarding for rw */
|
||||
#define CIFS_MOUNT_POSIXACL 0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */
|
||||
#define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */
|
||||
#define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */
|
||||
|
||||
struct cifs_sb_info {
|
||||
struct rb_root tlink_tree;
|
||||
@ -55,6 +57,8 @@ struct cifs_sb_info {
|
||||
atomic_t active;
|
||||
uid_t mnt_uid;
|
||||
gid_t mnt_gid;
|
||||
uid_t mnt_backupuid;
|
||||
gid_t mnt_backupgid;
|
||||
mode_t mnt_file_mode;
|
||||
mode_t mnt_dir_mode;
|
||||
unsigned int mnt_cifs_flags;
|
||||
|
@ -91,9 +91,76 @@ cifs_idmap_shrinker(struct shrinker *shrink, struct shrink_control *sc)
|
||||
shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
|
||||
spin_unlock(&sidgidlock);
|
||||
|
||||
root = &siduidtree;
|
||||
spin_lock(&uidsidlock);
|
||||
shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
|
||||
spin_unlock(&uidsidlock);
|
||||
|
||||
root = &sidgidtree;
|
||||
spin_lock(&gidsidlock);
|
||||
shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
|
||||
spin_unlock(&gidsidlock);
|
||||
|
||||
return nr_rem;
|
||||
}
|
||||
|
||||
static void
|
||||
sid_rb_insert(struct rb_root *root, unsigned long cid,
|
||||
struct cifs_sid_id **psidid, char *typestr)
|
||||
{
|
||||
char *strptr;
|
||||
struct rb_node *node = root->rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
struct rb_node **linkto = &(root->rb_node);
|
||||
struct cifs_sid_id *lsidid;
|
||||
|
||||
while (node) {
|
||||
lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
|
||||
parent = node;
|
||||
if (cid > lsidid->id) {
|
||||
linkto = &(node->rb_left);
|
||||
node = node->rb_left;
|
||||
}
|
||||
if (cid < lsidid->id) {
|
||||
linkto = &(node->rb_right);
|
||||
node = node->rb_right;
|
||||
}
|
||||
}
|
||||
|
||||
(*psidid)->id = cid;
|
||||
(*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
|
||||
(*psidid)->refcount = 0;
|
||||
|
||||
sprintf((*psidid)->sidstr, "%s", typestr);
|
||||
strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
|
||||
sprintf(strptr, "%ld", cid);
|
||||
|
||||
clear_bit(SID_ID_PENDING, &(*psidid)->state);
|
||||
clear_bit(SID_ID_MAPPED, &(*psidid)->state);
|
||||
|
||||
rb_link_node(&(*psidid)->rbnode, parent, linkto);
|
||||
rb_insert_color(&(*psidid)->rbnode, root);
|
||||
}
|
||||
|
||||
static struct cifs_sid_id *
|
||||
sid_rb_search(struct rb_root *root, unsigned long cid)
|
||||
{
|
||||
struct rb_node *node = root->rb_node;
|
||||
struct cifs_sid_id *lsidid;
|
||||
|
||||
while (node) {
|
||||
lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
|
||||
if (cid > lsidid->id)
|
||||
node = node->rb_left;
|
||||
else if (cid < lsidid->id)
|
||||
node = node->rb_right;
|
||||
else /* node found */
|
||||
return lsidid;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct shrinker cifs_shrinker = {
|
||||
.shrink = cifs_idmap_shrinker,
|
||||
.seeks = DEFAULT_SEEKS,
|
||||
@ -110,6 +177,7 @@ cifs_idmap_key_instantiate(struct key *key, const void *data, size_t datalen)
|
||||
|
||||
memcpy(payload, data, datalen);
|
||||
key->payload.data = payload;
|
||||
key->datalen = datalen;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -223,6 +291,120 @@ sidid_pending_wait(void *unused)
|
||||
return signal_pending(current) ? -ERESTARTSYS : 0;
|
||||
}
|
||||
|
||||
static int
|
||||
id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
|
||||
{
|
||||
int rc = 0;
|
||||
struct key *sidkey;
|
||||
const struct cred *saved_cred;
|
||||
struct cifs_sid *lsid;
|
||||
struct cifs_sid_id *psidid, *npsidid;
|
||||
struct rb_root *cidtree;
|
||||
spinlock_t *cidlock;
|
||||
|
||||
if (sidtype == SIDOWNER) {
|
||||
cidlock = &siduidlock;
|
||||
cidtree = &uidtree;
|
||||
} else if (sidtype == SIDGROUP) {
|
||||
cidlock = &sidgidlock;
|
||||
cidtree = &gidtree;
|
||||
} else
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(cidlock);
|
||||
psidid = sid_rb_search(cidtree, cid);
|
||||
|
||||
if (!psidid) { /* node does not exist, allocate one & attempt adding */
|
||||
spin_unlock(cidlock);
|
||||
npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
|
||||
if (!npsidid)
|
||||
return -ENOMEM;
|
||||
|
||||
npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
|
||||
if (!npsidid->sidstr) {
|
||||
kfree(npsidid);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
spin_lock(cidlock);
|
||||
psidid = sid_rb_search(cidtree, cid);
|
||||
if (psidid) { /* node happened to get inserted meanwhile */
|
||||
++psidid->refcount;
|
||||
spin_unlock(cidlock);
|
||||
kfree(npsidid->sidstr);
|
||||
kfree(npsidid);
|
||||
} else {
|
||||
psidid = npsidid;
|
||||
sid_rb_insert(cidtree, cid, &psidid,
|
||||
sidtype == SIDOWNER ? "oi:" : "gi:");
|
||||
++psidid->refcount;
|
||||
spin_unlock(cidlock);
|
||||
}
|
||||
} else {
|
||||
++psidid->refcount;
|
||||
spin_unlock(cidlock);
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are here, it is safe to access psidid and its fields
|
||||
* since a reference was taken earlier while holding the spinlock.
|
||||
* A reference on the node is put without holding the spinlock
|
||||
* and it is OK to do so in this case, shrinker will not erase
|
||||
* this node until all references are put and we do not access
|
||||
* any fields of the node after a reference is put .
|
||||
*/
|
||||
if (test_bit(SID_ID_MAPPED, &psidid->state)) {
|
||||
memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
|
||||
psidid->time = jiffies; /* update ts for accessing */
|
||||
goto id_sid_out;
|
||||
}
|
||||
|
||||
if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) {
|
||||
rc = -EINVAL;
|
||||
goto id_sid_out;
|
||||
}
|
||||
|
||||
if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
|
||||
saved_cred = override_creds(root_cred);
|
||||
sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
|
||||
if (IS_ERR(sidkey)) {
|
||||
rc = -EINVAL;
|
||||
cFYI(1, "%s: Can't map and id to a SID", __func__);
|
||||
} else {
|
||||
lsid = (struct cifs_sid *)sidkey->payload.data;
|
||||
memcpy(&psidid->sid, lsid,
|
||||
sidkey->datalen < sizeof(struct cifs_sid) ?
|
||||
sidkey->datalen : sizeof(struct cifs_sid));
|
||||
memcpy(ssid, &psidid->sid,
|
||||
sidkey->datalen < sizeof(struct cifs_sid) ?
|
||||
sidkey->datalen : sizeof(struct cifs_sid));
|
||||
set_bit(SID_ID_MAPPED, &psidid->state);
|
||||
key_put(sidkey);
|
||||
kfree(psidid->sidstr);
|
||||
}
|
||||
psidid->time = jiffies; /* update ts for accessing */
|
||||
revert_creds(saved_cred);
|
||||
clear_bit(SID_ID_PENDING, &psidid->state);
|
||||
wake_up_bit(&psidid->state, SID_ID_PENDING);
|
||||
} else {
|
||||
rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
|
||||
sidid_pending_wait, TASK_INTERRUPTIBLE);
|
||||
if (rc) {
|
||||
cFYI(1, "%s: sidid_pending_wait interrupted %d",
|
||||
__func__, rc);
|
||||
--psidid->refcount;
|
||||
return rc;
|
||||
}
|
||||
if (test_bit(SID_ID_MAPPED, &psidid->state))
|
||||
memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
|
||||
else
|
||||
rc = -EINVAL;
|
||||
}
|
||||
id_sid_out:
|
||||
--psidid->refcount;
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
|
||||
struct cifs_fattr *fattr, uint sidtype)
|
||||
@ -383,6 +565,10 @@ init_cifs_idmap(void)
|
||||
spin_lock_init(&sidgidlock);
|
||||
gidtree = RB_ROOT;
|
||||
|
||||
spin_lock_init(&uidsidlock);
|
||||
siduidtree = RB_ROOT;
|
||||
spin_lock_init(&gidsidlock);
|
||||
sidgidtree = RB_ROOT;
|
||||
register_shrinker(&cifs_shrinker);
|
||||
|
||||
cFYI(1, "cifs idmap keyring: %d\n", key_serial(keyring));
|
||||
@ -422,6 +608,18 @@ cifs_destroy_idmaptrees(void)
|
||||
while ((node = rb_first(root)))
|
||||
rb_erase(node, root);
|
||||
spin_unlock(&sidgidlock);
|
||||
|
||||
root = &siduidtree;
|
||||
spin_lock(&uidsidlock);
|
||||
while ((node = rb_first(root)))
|
||||
rb_erase(node, root);
|
||||
spin_unlock(&uidsidlock);
|
||||
|
||||
root = &sidgidtree;
|
||||
spin_lock(&gidsidlock);
|
||||
while ((node = rb_first(root)))
|
||||
rb_erase(node, root);
|
||||
spin_unlock(&gidsidlock);
|
||||
}
|
||||
|
||||
/* if the two SIDs (roughly equivalent to a UUID for a user or group) are
|
||||
@ -706,7 +904,7 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
|
||||
acl_size = sizeof(struct cifs_acl);
|
||||
|
||||
num_aces = le32_to_cpu(pdacl->num_aces);
|
||||
if (num_aces > 0) {
|
||||
if (num_aces > 0) {
|
||||
umode_t user_mask = S_IRWXU;
|
||||
umode_t group_mask = S_IRWXG;
|
||||
umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
|
||||
@ -868,52 +1066,82 @@ static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
|
||||
else
|
||||
cFYI(1, "no ACL"); /* BB grant all or default perms? */
|
||||
|
||||
/* cifscred->uid = owner_sid_ptr->rid;
|
||||
cifscred->gid = group_sid_ptr->rid;
|
||||
memcpy((void *)(&(cifscred->osid)), (void *)owner_sid_ptr,
|
||||
sizeof(struct cifs_sid));
|
||||
memcpy((void *)(&(cifscred->gsid)), (void *)group_sid_ptr,
|
||||
sizeof(struct cifs_sid)); */
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
/* Convert permission bits from mode to equivalent CIFS ACL */
|
||||
static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
|
||||
struct inode *inode, __u64 nmode)
|
||||
__u32 secdesclen, __u64 nmode, uid_t uid, gid_t gid, int *aclflag)
|
||||
{
|
||||
int rc = 0;
|
||||
__u32 dacloffset;
|
||||
__u32 ndacloffset;
|
||||
__u32 sidsoffset;
|
||||
struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
|
||||
struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
|
||||
struct cifs_acl *dacl_ptr = NULL; /* no need for SACL ptr */
|
||||
struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
|
||||
|
||||
if ((inode == NULL) || (pntsd == NULL) || (pnntsd == NULL))
|
||||
return -EIO;
|
||||
|
||||
owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
|
||||
if (nmode != NO_CHANGE_64) { /* chmod */
|
||||
owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
|
||||
le32_to_cpu(pntsd->osidoffset));
|
||||
group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
|
||||
group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
|
||||
le32_to_cpu(pntsd->gsidoffset));
|
||||
dacloffset = le32_to_cpu(pntsd->dacloffset);
|
||||
dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
|
||||
ndacloffset = sizeof(struct cifs_ntsd);
|
||||
ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
|
||||
ndacl_ptr->revision = dacl_ptr->revision;
|
||||
ndacl_ptr->size = 0;
|
||||
ndacl_ptr->num_aces = 0;
|
||||
|
||||
dacloffset = le32_to_cpu(pntsd->dacloffset);
|
||||
dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
|
||||
|
||||
ndacloffset = sizeof(struct cifs_ntsd);
|
||||
ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
|
||||
ndacl_ptr->revision = dacl_ptr->revision;
|
||||
ndacl_ptr->size = 0;
|
||||
ndacl_ptr->num_aces = 0;
|
||||
|
||||
rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr, nmode);
|
||||
|
||||
sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
|
||||
|
||||
/* copy security descriptor control portion and owner and group sid */
|
||||
copy_sec_desc(pntsd, pnntsd, sidsoffset);
|
||||
rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr,
|
||||
nmode);
|
||||
sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
|
||||
/* copy sec desc control portion & owner and group sids */
|
||||
copy_sec_desc(pntsd, pnntsd, sidsoffset);
|
||||
*aclflag = CIFS_ACL_DACL;
|
||||
} else {
|
||||
memcpy(pnntsd, pntsd, secdesclen);
|
||||
if (uid != NO_CHANGE_32) { /* chown */
|
||||
owner_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
|
||||
le32_to_cpu(pnntsd->osidoffset));
|
||||
nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid),
|
||||
GFP_KERNEL);
|
||||
if (!nowner_sid_ptr)
|
||||
return -ENOMEM;
|
||||
rc = id_to_sid(uid, SIDOWNER, nowner_sid_ptr);
|
||||
if (rc) {
|
||||
cFYI(1, "%s: Mapping error %d for owner id %d",
|
||||
__func__, rc, uid);
|
||||
kfree(nowner_sid_ptr);
|
||||
return rc;
|
||||
}
|
||||
memcpy(owner_sid_ptr, nowner_sid_ptr,
|
||||
sizeof(struct cifs_sid));
|
||||
kfree(nowner_sid_ptr);
|
||||
*aclflag = CIFS_ACL_OWNER;
|
||||
}
|
||||
if (gid != NO_CHANGE_32) { /* chgrp */
|
||||
group_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
|
||||
le32_to_cpu(pnntsd->gsidoffset));
|
||||
ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid),
|
||||
GFP_KERNEL);
|
||||
if (!ngroup_sid_ptr)
|
||||
return -ENOMEM;
|
||||
rc = id_to_sid(gid, SIDGROUP, ngroup_sid_ptr);
|
||||
if (rc) {
|
||||
cFYI(1, "%s: Mapping error %d for group id %d",
|
||||
__func__, rc, gid);
|
||||
kfree(ngroup_sid_ptr);
|
||||
return rc;
|
||||
}
|
||||
memcpy(group_sid_ptr, ngroup_sid_ptr,
|
||||
sizeof(struct cifs_sid));
|
||||
kfree(ngroup_sid_ptr);
|
||||
*aclflag = CIFS_ACL_GROUP;
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -945,7 +1173,7 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
|
||||
{
|
||||
struct cifs_ntsd *pntsd = NULL;
|
||||
int oplock = 0;
|
||||
int xid, rc;
|
||||
int xid, rc, create_options = 0;
|
||||
__u16 fid;
|
||||
struct cifs_tcon *tcon;
|
||||
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
|
||||
@ -956,9 +1184,12 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
|
||||
tcon = tlink_tcon(tlink);
|
||||
xid = GetXid();
|
||||
|
||||
rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL, 0,
|
||||
&fid, &oplock, NULL, cifs_sb->local_nls,
|
||||
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
|
||||
if (backup_cred(cifs_sb))
|
||||
create_options |= CREATE_OPEN_BACKUP_INTENT;
|
||||
|
||||
rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL,
|
||||
create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
|
||||
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
|
||||
if (!rc) {
|
||||
rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
|
||||
CIFSSMBClose(xid, tcon, fid);
|
||||
@ -991,13 +1222,15 @@ struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
|
||||
return pntsd;
|
||||
}
|
||||
|
||||
static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
|
||||
struct cifs_ntsd *pnntsd, u32 acllen)
|
||||
/* Set an ACL on the server */
|
||||
int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
|
||||
struct inode *inode, const char *path, int aclflag)
|
||||
{
|
||||
int oplock = 0;
|
||||
int xid, rc;
|
||||
int xid, rc, access_flags, create_options = 0;
|
||||
__u16 fid;
|
||||
struct cifs_tcon *tcon;
|
||||
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
|
||||
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
|
||||
|
||||
if (IS_ERR(tlink))
|
||||
@ -1006,15 +1239,23 @@ static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
|
||||
tcon = tlink_tcon(tlink);
|
||||
xid = GetXid();
|
||||
|
||||
rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, WRITE_DAC, 0,
|
||||
&fid, &oplock, NULL, cifs_sb->local_nls,
|
||||
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
|
||||
if (backup_cred(cifs_sb))
|
||||
create_options |= CREATE_OPEN_BACKUP_INTENT;
|
||||
|
||||
if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
|
||||
access_flags = WRITE_OWNER;
|
||||
else
|
||||
access_flags = WRITE_DAC;
|
||||
|
||||
rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, access_flags,
|
||||
create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
|
||||
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
|
||||
if (rc) {
|
||||
cERROR(1, "Unable to open file to set ACL");
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen);
|
||||
rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen, aclflag);
|
||||
cFYI(DBG2, "SetCIFSACL rc = %d", rc);
|
||||
|
||||
CIFSSMBClose(xid, tcon, fid);
|
||||
@ -1024,17 +1265,6 @@ out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Set an ACL on the server */
|
||||
int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
|
||||
struct inode *inode, const char *path)
|
||||
{
|
||||
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
|
||||
|
||||
cFYI(DBG2, "set ACL for %s from mode 0x%x", path, inode->i_mode);
|
||||
|
||||
return set_cifs_acl_by_path(cifs_sb, path, pnntsd, acllen);
|
||||
}
|
||||
|
||||
/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
|
||||
int
|
||||
cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
|
||||
@ -1066,9 +1296,12 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
|
||||
}
|
||||
|
||||
/* Convert mode bits to an ACL so we can update the ACL on the server */
|
||||
int mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode)
|
||||
int
|
||||
id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
|
||||
uid_t uid, gid_t gid)
|
||||
{
|
||||
int rc = 0;
|
||||
int aclflag = CIFS_ACL_DACL; /* default flag to set */
|
||||
__u32 secdesclen = 0;
|
||||
struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
|
||||
struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
|
||||
@ -1098,13 +1331,15 @@ int mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
rc = build_sec_desc(pntsd, pnntsd, inode, nmode);
|
||||
rc = build_sec_desc(pntsd, pnntsd, secdesclen, nmode, uid, gid,
|
||||
&aclflag);
|
||||
|
||||
cFYI(DBG2, "build_sec_desc rc: %d", rc);
|
||||
|
||||
if (!rc) {
|
||||
/* Set the security descriptor */
|
||||
rc = set_cifs_acl(pnntsd, secdesclen, inode, path);
|
||||
rc = set_cifs_acl(pnntsd, secdesclen, inode,
|
||||
path, aclflag);
|
||||
cFYI(DBG2, "set_cifs_acl rc: %d", rc);
|
||||
}
|
||||
|
||||
|
@ -37,83 +37,8 @@
|
||||
* the sequence number before this function is called. Also, this function
|
||||
* should be called with the server->srv_mutex held.
|
||||
*/
|
||||
static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
|
||||
struct TCP_Server_Info *server, char *signature)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (cifs_pdu == NULL || signature == NULL || server == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (!server->secmech.sdescmd5) {
|
||||
cERROR(1, "%s: Can't generate signature\n", __func__);
|
||||
return -1;
|
||||
}
|
||||
|
||||
rc = crypto_shash_init(&server->secmech.sdescmd5->shash);
|
||||
if (rc) {
|
||||
cERROR(1, "%s: Could not init md5\n", __func__);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = crypto_shash_update(&server->secmech.sdescmd5->shash,
|
||||
server->session_key.response, server->session_key.len);
|
||||
if (rc) {
|
||||
cERROR(1, "%s: Could not update with response\n", __func__);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = crypto_shash_update(&server->secmech.sdescmd5->shash,
|
||||
cifs_pdu->Protocol, be32_to_cpu(cifs_pdu->smb_buf_length));
|
||||
if (rc) {
|
||||
cERROR(1, "%s: Could not update with payload\n", __func__);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = crypto_shash_final(&server->secmech.sdescmd5->shash, signature);
|
||||
if (rc)
|
||||
cERROR(1, "%s: Could not generate md5 hash\n", __func__);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* must be called with server->srv_mutex held */
|
||||
int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
|
||||
__u32 *pexpected_response_sequence_number)
|
||||
{
|
||||
int rc = 0;
|
||||
char smb_signature[20];
|
||||
|
||||
if ((cifs_pdu == NULL) || (server == NULL))
|
||||
return -EINVAL;
|
||||
|
||||
if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) ||
|
||||
server->tcpStatus == CifsNeedNegotiate)
|
||||
return rc;
|
||||
|
||||
if (!server->session_estab) {
|
||||
strncpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);
|
||||
return rc;
|
||||
}
|
||||
|
||||
cifs_pdu->Signature.Sequence.SequenceNumber =
|
||||
cpu_to_le32(server->sequence_number);
|
||||
cifs_pdu->Signature.Sequence.Reserved = 0;
|
||||
|
||||
*pexpected_response_sequence_number = server->sequence_number++;
|
||||
server->sequence_number++;
|
||||
|
||||
rc = cifs_calculate_signature(cifs_pdu, server, smb_signature);
|
||||
if (rc)
|
||||
memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
|
||||
else
|
||||
memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
|
||||
struct TCP_Server_Info *server, char *signature)
|
||||
static int cifs_calc_signature(const struct kvec *iov, int n_vec,
|
||||
struct TCP_Server_Info *server, char *signature)
|
||||
{
|
||||
int i;
|
||||
int rc;
|
||||
@ -179,7 +104,7 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
|
||||
{
|
||||
int rc = 0;
|
||||
char smb_signature[20];
|
||||
struct smb_hdr *cifs_pdu = iov[0].iov_base;
|
||||
struct smb_hdr *cifs_pdu = (struct smb_hdr *)iov[0].iov_base;
|
||||
|
||||
if ((cifs_pdu == NULL) || (server == NULL))
|
||||
return -EINVAL;
|
||||
@ -189,7 +114,7 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
|
||||
return rc;
|
||||
|
||||
if (!server->session_estab) {
|
||||
strncpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);
|
||||
memcpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);
|
||||
return rc;
|
||||
}
|
||||
|
||||
@ -200,7 +125,7 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
|
||||
*pexpected_response_sequence_number = server->sequence_number++;
|
||||
server->sequence_number++;
|
||||
|
||||
rc = cifs_calc_signature2(iov, n_vec, server, smb_signature);
|
||||
rc = cifs_calc_signature(iov, n_vec, server, smb_signature);
|
||||
if (rc)
|
||||
memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
|
||||
else
|
||||
@ -209,13 +134,27 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
|
||||
return rc;
|
||||
}
|
||||
|
||||
int cifs_verify_signature(struct smb_hdr *cifs_pdu,
|
||||
/* must be called with server->srv_mutex held */
|
||||
int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
|
||||
__u32 *pexpected_response_sequence_number)
|
||||
{
|
||||
struct kvec iov;
|
||||
|
||||
iov.iov_base = cifs_pdu;
|
||||
iov.iov_len = be32_to_cpu(cifs_pdu->smb_buf_length) + 4;
|
||||
|
||||
return cifs_sign_smb2(&iov, 1, server,
|
||||
pexpected_response_sequence_number);
|
||||
}
|
||||
|
||||
int cifs_verify_signature(struct kvec *iov, unsigned int nr_iov,
|
||||
struct TCP_Server_Info *server,
|
||||
__u32 expected_sequence_number)
|
||||
{
|
||||
unsigned int rc;
|
||||
char server_response_sig[8];
|
||||
char what_we_think_sig_should_be[20];
|
||||
struct smb_hdr *cifs_pdu = (struct smb_hdr *)iov[0].iov_base;
|
||||
|
||||
if (cifs_pdu == NULL || server == NULL)
|
||||
return -EINVAL;
|
||||
@ -247,8 +186,8 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
|
||||
cifs_pdu->Signature.Sequence.Reserved = 0;
|
||||
|
||||
mutex_lock(&server->srv_mutex);
|
||||
rc = cifs_calculate_signature(cifs_pdu, server,
|
||||
what_we_think_sig_should_be);
|
||||
rc = cifs_calc_signature(iov, nr_iov, server,
|
||||
what_we_think_sig_should_be);
|
||||
mutex_unlock(&server->srv_mutex);
|
||||
|
||||
if (rc)
|
||||
|
@ -53,7 +53,7 @@
|
||||
int cifsFYI = 0;
|
||||
int cifsERROR = 1;
|
||||
int traceSMB = 0;
|
||||
unsigned int oplockEnabled = 1;
|
||||
bool enable_oplocks = true;
|
||||
unsigned int linuxExtEnabled = 1;
|
||||
unsigned int lookupCacheEnabled = 1;
|
||||
unsigned int multiuser_mount = 0;
|
||||
@ -74,7 +74,7 @@ module_param(cifs_min_small, int, 0);
|
||||
MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
|
||||
"Range: 2 to 256");
|
||||
unsigned int cifs_max_pending = CIFS_MAX_REQ;
|
||||
module_param(cifs_max_pending, int, 0);
|
||||
module_param(cifs_max_pending, int, 0444);
|
||||
MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
|
||||
"Default: 50 Range: 2 to 256");
|
||||
unsigned short echo_retries = 5;
|
||||
@ -82,6 +82,10 @@ module_param(echo_retries, ushort, 0644);
|
||||
MODULE_PARM_DESC(echo_retries, "Number of echo attempts before giving up and "
|
||||
"reconnecting server. Default: 5. 0 means "
|
||||
"never reconnect.");
|
||||
module_param(enable_oplocks, bool, 0644);
|
||||
MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks (bool). Default:"
|
||||
"y/Y/1");
|
||||
|
||||
extern mempool_t *cifs_sm_req_poolp;
|
||||
extern mempool_t *cifs_req_poolp;
|
||||
extern mempool_t *cifs_mid_poolp;
|
||||
@ -132,12 +136,12 @@ cifs_read_super(struct super_block *sb)
|
||||
else
|
||||
sb->s_d_op = &cifs_dentry_ops;
|
||||
|
||||
#ifdef CIFS_NFSD_EXPORT
|
||||
#ifdef CONFIG_CIFS_NFSD_EXPORT
|
||||
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
|
||||
cFYI(1, "export ops supported");
|
||||
sb->s_export_op = &cifs_export_ops;
|
||||
}
|
||||
#endif /* CIFS_NFSD_EXPORT */
|
||||
#endif /* CONFIG_CIFS_NFSD_EXPORT */
|
||||
|
||||
return 0;
|
||||
|
||||
@ -432,6 +436,12 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
|
||||
seq_printf(s, ",mfsymlinks");
|
||||
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
|
||||
seq_printf(s, ",fsc");
|
||||
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
|
||||
seq_printf(s, ",nostrictsync");
|
||||
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
|
||||
seq_printf(s, ",noperm");
|
||||
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
|
||||
seq_printf(s, ",strictcache");
|
||||
|
||||
seq_printf(s, ",rsize=%d", cifs_sb->rsize);
|
||||
seq_printf(s, ",wsize=%d", cifs_sb->wsize);
|
||||
@ -530,7 +540,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
|
||||
char *full_path = NULL;
|
||||
char *s, *p;
|
||||
char sep;
|
||||
int xid;
|
||||
|
||||
full_path = cifs_build_path_to_root(vol, cifs_sb,
|
||||
cifs_sb_master_tcon(cifs_sb));
|
||||
@ -539,7 +548,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
|
||||
|
||||
cFYI(1, "Get root dentry for %s", full_path);
|
||||
|
||||
xid = GetXid();
|
||||
sep = CIFS_DIR_SEP(cifs_sb);
|
||||
dentry = dget(sb->s_root);
|
||||
p = s = full_path;
|
||||
@ -570,7 +578,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
|
||||
dput(dentry);
|
||||
dentry = child;
|
||||
} while (!IS_ERR(dentry));
|
||||
_FreeXid(xid);
|
||||
kfree(full_path);
|
||||
return dentry;
|
||||
}
|
||||
@ -942,7 +949,8 @@ cifs_init_once(void *inode)
|
||||
struct cifsInodeInfo *cifsi = inode;
|
||||
|
||||
inode_init_once(&cifsi->vfs_inode);
|
||||
INIT_LIST_HEAD(&cifsi->lockList);
|
||||
INIT_LIST_HEAD(&cifsi->llist);
|
||||
mutex_init(&cifsi->lock_mutex);
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -121,9 +121,9 @@ extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
|
||||
extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
|
||||
extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
|
||||
|
||||
#ifdef CIFS_NFSD_EXPORT
|
||||
#ifdef CONFIG_CIFS_NFSD_EXPORT
|
||||
extern const struct export_operations cifs_export_ops;
|
||||
#endif /* CIFS_NFSD_EXPORT */
|
||||
#endif /* CONFIG_CIFS_NFSD_EXPORT */
|
||||
|
||||
#define CIFS_VERSION "1.75"
|
||||
#endif /* _CIFSFS_H */
|
||||
|
@ -167,6 +167,8 @@ struct smb_vol {
|
||||
uid_t cred_uid;
|
||||
uid_t linux_uid;
|
||||
gid_t linux_gid;
|
||||
uid_t backupuid;
|
||||
gid_t backupgid;
|
||||
mode_t file_mode;
|
||||
mode_t dir_mode;
|
||||
unsigned secFlg;
|
||||
@ -179,6 +181,8 @@ struct smb_vol {
|
||||
bool noperm:1;
|
||||
bool no_psx_acl:1; /* set if posix acl support should be disabled */
|
||||
bool cifs_acl:1;
|
||||
bool backupuid_specified; /* mount option backupuid is specified */
|
||||
bool backupgid_specified; /* mount option backupgid is specified */
|
||||
bool no_xattr:1; /* set if xattr (EA) support should be disabled*/
|
||||
bool server_ino:1; /* use inode numbers from server ie UniqueId */
|
||||
bool direct_io:1;
|
||||
@ -219,7 +223,8 @@ struct smb_vol {
|
||||
CIFS_MOUNT_OVERR_GID | CIFS_MOUNT_DYNPERM | \
|
||||
CIFS_MOUNT_NOPOSIXBRL | CIFS_MOUNT_NOSSYNC | \
|
||||
CIFS_MOUNT_FSCACHE | CIFS_MOUNT_MF_SYMLINKS | \
|
||||
CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO)
|
||||
CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO | \
|
||||
CIFS_MOUNT_CIFS_BACKUPUID | CIFS_MOUNT_CIFS_BACKUPGID)
|
||||
|
||||
#define CIFS_MS_MASK (MS_RDONLY | MS_MANDLOCK | MS_NOEXEC | MS_NOSUID | \
|
||||
MS_NODEV | MS_SYNCHRONOUS)
|
||||
@ -286,7 +291,13 @@ struct TCP_Server_Info {
|
||||
bool sec_kerberosu2u; /* supports U2U Kerberos */
|
||||
bool sec_kerberos; /* supports plain Kerberos */
|
||||
bool sec_mskerberos; /* supports legacy MS Kerberos */
|
||||
bool large_buf; /* is current buffer large? */
|
||||
struct delayed_work echo; /* echo ping workqueue job */
|
||||
struct kvec *iov; /* reusable kvec array for receives */
|
||||
unsigned int nr_iov; /* number of kvecs in array */
|
||||
char *smallbuf; /* pointer to current "small" buffer */
|
||||
char *bigbuf; /* pointer to current "big" buffer */
|
||||
unsigned int total_read; /* total amount of data read in this pass */
|
||||
#ifdef CONFIG_CIFS_FSCACHE
|
||||
struct fscache_cookie *fscache; /* client index cache cookie */
|
||||
#endif
|
||||
@ -485,9 +496,13 @@ extern struct cifs_tcon *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb);
|
||||
*/
|
||||
struct cifsLockInfo {
|
||||
struct list_head llist; /* pointer to next cifsLockInfo */
|
||||
struct list_head blist; /* pointer to locks blocked on this */
|
||||
wait_queue_head_t block_q;
|
||||
__u64 offset;
|
||||
__u64 length;
|
||||
__u32 pid;
|
||||
__u8 type;
|
||||
__u16 netfid;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -520,8 +535,6 @@ struct cifsFileInfo {
|
||||
struct dentry *dentry;
|
||||
unsigned int f_flags;
|
||||
struct tcon_link *tlink;
|
||||
struct mutex lock_mutex;
|
||||
struct list_head llist; /* list of byte range locks we have. */
|
||||
bool invalidHandle:1; /* file closed via session abend */
|
||||
bool oplock_break_cancelled:1;
|
||||
int count; /* refcount protected by cifs_file_list_lock */
|
||||
@ -554,7 +567,9 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
|
||||
*/
|
||||
|
||||
struct cifsInodeInfo {
|
||||
struct list_head lockList;
|
||||
struct list_head llist; /* brlocks for this inode */
|
||||
bool can_cache_brlcks;
|
||||
struct mutex lock_mutex; /* protect two fields above */
|
||||
/* BB add in lists for dirty pages i.e. write caching info for oplock */
|
||||
struct list_head openFileList;
|
||||
__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
|
||||
@ -643,8 +658,24 @@ static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon,
|
||||
struct mid_q_entry;
|
||||
|
||||
/*
|
||||
* This is the prototype for the mid callback function. When creating one,
|
||||
* take special care to avoid deadlocks. Things to bear in mind:
|
||||
* This is the prototype for the mid receive function. This function is for
|
||||
* receiving the rest of the SMB frame, starting with the WordCount (which is
|
||||
* just after the MID in struct smb_hdr). Note:
|
||||
*
|
||||
* - This will be called by cifsd, with no locks held.
|
||||
* - The mid will still be on the pending_mid_q.
|
||||
* - mid->resp_buf will point to the current buffer.
|
||||
*
|
||||
* Returns zero on a successful receive, or an error. The receive state in
|
||||
* the TCP_Server_Info will also be updated.
|
||||
*/
|
||||
typedef int (mid_receive_t)(struct TCP_Server_Info *server,
|
||||
struct mid_q_entry *mid);
|
||||
|
||||
/*
|
||||
* This is the prototype for the mid callback function. This is called once the
|
||||
* mid has been received off of the socket. When creating one, take special
|
||||
* care to avoid deadlocks. Things to bear in mind:
|
||||
*
|
||||
* - it will be called by cifsd, with no locks held
|
||||
* - the mid will be removed from any lists
|
||||
@ -662,9 +693,10 @@ struct mid_q_entry {
|
||||
unsigned long when_sent; /* time when smb send finished */
|
||||
unsigned long when_received; /* when demux complete (taken off wire) */
|
||||
#endif
|
||||
mid_receive_t *receive; /* call receive callback */
|
||||
mid_callback_t *callback; /* call completion callback */
|
||||
void *callback_data; /* general purpose pointer for callback */
|
||||
struct smb_hdr *resp_buf; /* response buffer */
|
||||
struct smb_hdr *resp_buf; /* pointer to received SMB header */
|
||||
int midState; /* wish this were enum but can not pass to wait_event */
|
||||
__u8 command; /* smb command code */
|
||||
bool largeBuf:1; /* if valid response, is pointer to large buf */
|
||||
@ -964,7 +996,8 @@ GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
|
||||
to be established on existing mount if we
|
||||
have the uid/password or Kerberos credential
|
||||
or equivalent for current user */
|
||||
GLOBAL_EXTERN unsigned int oplockEnabled;
|
||||
/* enable or disable oplocks */
|
||||
GLOBAL_EXTERN bool enable_oplocks;
|
||||
GLOBAL_EXTERN unsigned int lookupCacheEnabled;
|
||||
GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent
|
||||
with more secure ntlmssp2 challenge/resp */
|
||||
@ -978,10 +1011,16 @@ GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/
|
||||
/* reconnect after this many failed echo attempts */
|
||||
GLOBAL_EXTERN unsigned short echo_retries;
|
||||
|
||||
#ifdef CONFIG_CIFS_ACL
|
||||
GLOBAL_EXTERN struct rb_root uidtree;
|
||||
GLOBAL_EXTERN struct rb_root gidtree;
|
||||
GLOBAL_EXTERN spinlock_t siduidlock;
|
||||
GLOBAL_EXTERN spinlock_t sidgidlock;
|
||||
GLOBAL_EXTERN struct rb_root siduidtree;
|
||||
GLOBAL_EXTERN struct rb_root sidgidtree;
|
||||
GLOBAL_EXTERN spinlock_t uidsidlock;
|
||||
GLOBAL_EXTERN spinlock_t gidsidlock;
|
||||
#endif /* CONFIG_CIFS_ACL */
|
||||
|
||||
void cifs_oplock_break(struct work_struct *work);
|
||||
|
||||
|
@ -1089,9 +1089,7 @@ typedef struct smb_com_read_rsp {
|
||||
__le16 DataLengthHigh;
|
||||
__u64 Reserved2;
|
||||
__u16 ByteCount;
|
||||
__u8 Pad; /* BB check for whether padded to DWORD
|
||||
boundary and optimum performance here */
|
||||
char Data[1];
|
||||
/* read response data immediately follows */
|
||||
} __attribute__((packed)) READ_RSP;
|
||||
|
||||
typedef struct locking_andx_range {
|
||||
@ -1913,6 +1911,10 @@ typedef struct whoami_rsp_data { /* Query level 0x202 */
|
||||
|
||||
/* SETFSInfo Levels */
|
||||
#define SMB_SET_CIFS_UNIX_INFO 0x200
|
||||
/* level 0x203 is defined above in list of QFS info levels */
|
||||
/* #define SMB_REQUEST_TRANSPORT_ENCRYPTION 0x203 */
|
||||
|
||||
/* Level 0x200 request structure follows */
|
||||
typedef struct smb_com_transaction2_setfsi_req {
|
||||
struct smb_hdr hdr; /* wct = 15 */
|
||||
__le16 TotalParameterCount;
|
||||
@ -1940,13 +1942,39 @@ typedef struct smb_com_transaction2_setfsi_req {
|
||||
__le64 ClientUnixCap; /* Data end */
|
||||
} __attribute__((packed)) TRANSACTION2_SETFSI_REQ;
|
||||
|
||||
/* level 0x203 request structure follows */
|
||||
typedef struct smb_com_transaction2_setfs_enc_req {
|
||||
struct smb_hdr hdr; /* wct = 15 */
|
||||
__le16 TotalParameterCount;
|
||||
__le16 TotalDataCount;
|
||||
__le16 MaxParameterCount;
|
||||
__le16 MaxDataCount;
|
||||
__u8 MaxSetupCount;
|
||||
__u8 Reserved;
|
||||
__le16 Flags;
|
||||
__le32 Timeout;
|
||||
__u16 Reserved2;
|
||||
__le16 ParameterCount; /* 4 */
|
||||
__le16 ParameterOffset;
|
||||
__le16 DataCount; /* 12 */
|
||||
__le16 DataOffset;
|
||||
__u8 SetupCount; /* one */
|
||||
__u8 Reserved3;
|
||||
__le16 SubCommand; /* TRANS2_SET_FS_INFORMATION */
|
||||
__le16 ByteCount;
|
||||
__u8 Pad;
|
||||
__u16 Reserved4; /* Parameters start. */
|
||||
__le16 InformationLevel;/* Parameters end. */
|
||||
/* NTLMSSP Blob, Data start. */
|
||||
} __attribute__((packed)) TRANSACTION2_SETFSI_ENC_REQ;
|
||||
|
||||
/* response for setfsinfo levels 0x200 and 0x203 */
|
||||
typedef struct smb_com_transaction2_setfsi_rsp {
|
||||
struct smb_hdr hdr; /* wct = 10 */
|
||||
struct trans2_resp t2;
|
||||
__u16 ByteCount;
|
||||
} __attribute__((packed)) TRANSACTION2_SETFSI_RSP;
|
||||
|
||||
|
||||
typedef struct smb_com_transaction2_get_dfs_refer_req {
|
||||
struct smb_hdr hdr; /* wct = 15 */
|
||||
__le16 TotalParameterCount;
|
||||
@ -2098,13 +2126,13 @@ typedef struct {
|
||||
#define CIFS_UNIX_PROXY_CAP 0x00000400 /* Proxy cap: 0xACE ioctl and
|
||||
QFS PROXY call */
|
||||
#ifdef CONFIG_CIFS_POSIX
|
||||
/* Can not set pathnames cap yet until we send new posix create SMB since
|
||||
otherwise server can treat such handles opened with older ntcreatex
|
||||
(by a new client which knows how to send posix path ops)
|
||||
as non-posix handles (can affect write behavior with byte range locks.
|
||||
We can add back in POSIX_PATH_OPS cap when Posix Create/Mkdir finished */
|
||||
/* presumably don't need the 0x20 POSIX_PATH_OPS_CAP since we never send
|
||||
LockingX instead of posix locking call on unix sess (and we do not expect
|
||||
LockingX to use different (ie Windows) semantics than posix locking on
|
||||
the same session (if WINE needs to do this later, we can add this cap
|
||||
back in later */
|
||||
/* #define CIFS_UNIX_CAP_MASK 0x000000fb */
|
||||
#define CIFS_UNIX_CAP_MASK 0x000000db
|
||||
#define CIFS_UNIX_CAP_MASK 0x000003db
|
||||
#else
|
||||
#define CIFS_UNIX_CAP_MASK 0x00000013
|
||||
#endif /* CONFIG_CIFS_POSIX */
|
||||
|
@ -69,8 +69,9 @@ extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
|
||||
struct TCP_Server_Info *server);
|
||||
extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
|
||||
extern int cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
|
||||
unsigned int nvec, mid_callback_t *callback,
|
||||
void *cbdata, bool ignore_pend);
|
||||
unsigned int nvec, mid_receive_t *receive,
|
||||
mid_callback_t *callback, void *cbdata,
|
||||
bool ignore_pend);
|
||||
extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
|
||||
struct smb_hdr * /* input */ ,
|
||||
struct smb_hdr * /* out */ ,
|
||||
@ -90,6 +91,7 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
|
||||
extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length);
|
||||
extern bool is_valid_oplock_break(struct smb_hdr *smb,
|
||||
struct TCP_Server_Info *);
|
||||
extern bool backup_cred(struct cifs_sb_info *);
|
||||
extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
|
||||
extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
|
||||
unsigned int bytes_written);
|
||||
@ -145,12 +147,19 @@ extern int cifs_get_inode_info_unix(struct inode **pinode,
|
||||
extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
|
||||
struct cifs_fattr *fattr, struct inode *inode,
|
||||
const char *path, const __u16 *pfid);
|
||||
extern int mode_to_cifs_acl(struct inode *inode, const char *path, __u64);
|
||||
extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64,
|
||||
uid_t, gid_t);
|
||||
extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
|
||||
const char *, u32 *);
|
||||
extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
|
||||
const char *);
|
||||
const char *, int);
|
||||
|
||||
extern void dequeue_mid(struct mid_q_entry *mid, bool malformed);
|
||||
extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
|
||||
unsigned int to_read);
|
||||
extern int cifs_readv_from_socket(struct TCP_Server_Info *server,
|
||||
struct kvec *iov_orig, unsigned int nr_segs,
|
||||
unsigned int to_read);
|
||||
extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
|
||||
struct cifs_sb_info *cifs_sb);
|
||||
extern int cifs_match_super(struct super_block *, void *);
|
||||
@ -359,14 +368,17 @@ extern int CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon,
|
||||
const struct nls_table *nls_codepage,
|
||||
int remap_special_chars);
|
||||
|
||||
extern int cifs_lockv(const int xid, struct cifs_tcon *tcon, const __u16 netfid,
|
||||
const __u8 lock_type, const __u32 num_unlock,
|
||||
const __u32 num_lock, LOCKING_ANDX_RANGE *buf);
|
||||
extern int CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
|
||||
const __u16 netfid, const __u64 len,
|
||||
const __u16 netfid, const __u32 netpid, const __u64 len,
|
||||
const __u64 offset, const __u32 numUnlock,
|
||||
const __u32 numLock, const __u8 lockType,
|
||||
const bool waitFlag, const __u8 oplock_level);
|
||||
extern int CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
|
||||
const __u16 smb_file_id, const int get_flag,
|
||||
const __u64 len, struct file_lock *,
|
||||
const __u16 smb_file_id, const __u32 netpid,
|
||||
const int get_flag, const __u64 len, struct file_lock *,
|
||||
const __u16 lock_type, const bool waitFlag);
|
||||
extern int CIFSSMBTDis(const int xid, struct cifs_tcon *tcon);
|
||||
extern int CIFSSMBEcho(struct TCP_Server_Info *server);
|
||||
@ -380,7 +392,7 @@ extern void tconInfoFree(struct cifs_tcon *);
|
||||
extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
|
||||
extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
|
||||
__u32 *);
|
||||
extern int cifs_verify_signature(struct smb_hdr *,
|
||||
extern int cifs_verify_signature(struct kvec *iov, unsigned int nr_iov,
|
||||
struct TCP_Server_Info *server,
|
||||
__u32 expected_sequence_number);
|
||||
extern int SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *);
|
||||
@ -419,7 +431,7 @@ extern int CIFSSMBSetEA(const int xid, struct cifs_tcon *tcon,
|
||||
extern int CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon,
|
||||
__u16 fid, struct cifs_ntsd **acl_inf, __u32 *buflen);
|
||||
extern int CIFSSMBSetCIFSACL(const int, struct cifs_tcon *, __u16,
|
||||
struct cifs_ntsd *, __u32);
|
||||
struct cifs_ntsd *, __u32, int);
|
||||
extern int CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon,
|
||||
const unsigned char *searchName,
|
||||
char *acl_inf, const int buflen, const int acl_type,
|
||||
@ -440,6 +452,24 @@ extern int E_md4hash(const unsigned char *passwd, unsigned char *p16);
|
||||
extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8,
|
||||
unsigned char *p24);
|
||||
|
||||
/* asynchronous read support */
|
||||
struct cifs_readdata {
|
||||
struct cifsFileInfo *cfile;
|
||||
struct address_space *mapping;
|
||||
__u64 offset;
|
||||
unsigned int bytes;
|
||||
pid_t pid;
|
||||
int result;
|
||||
struct list_head pages;
|
||||
struct work_struct work;
|
||||
unsigned int nr_iov;
|
||||
struct kvec iov[1];
|
||||
};
|
||||
|
||||
struct cifs_readdata *cifs_readdata_alloc(unsigned int nr_pages);
|
||||
void cifs_readdata_free(struct cifs_readdata *rdata);
|
||||
int cifs_async_readv(struct cifs_readdata *rdata);
|
||||
|
||||
/* asynchronous write support */
|
||||
struct cifs_writedata {
|
||||
struct kref refcount;
|
||||
|
@ -33,6 +33,8 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/posix_acl_xattr.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/task_io_accounting_ops.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include "cifspdu.h"
|
||||
#include "cifsglob.h"
|
||||
@ -40,6 +42,7 @@
|
||||
#include "cifsproto.h"
|
||||
#include "cifs_unicode.h"
|
||||
#include "cifs_debug.h"
|
||||
#include "fscache.h"
|
||||
|
||||
#ifdef CONFIG_CIFS_POSIX
|
||||
static struct {
|
||||
@ -83,6 +86,9 @@ static struct {
|
||||
#endif /* CONFIG_CIFS_WEAK_PW_HASH */
|
||||
#endif /* CIFS_POSIX */
|
||||
|
||||
/* Forward declarations */
|
||||
static void cifs_readv_complete(struct work_struct *work);
|
||||
|
||||
/* Mark as invalid, all open files on tree connections since they
|
||||
were closed when session to server was lost */
|
||||
static void mark_open_files_invalid(struct cifs_tcon *pTcon)
|
||||
@ -453,8 +459,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
|
||||
}
|
||||
server->sec_mode = (__u8)le16_to_cpu(rsp->SecurityMode);
|
||||
server->maxReq = le16_to_cpu(rsp->MaxMpxCount);
|
||||
server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize),
|
||||
(__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
|
||||
server->maxBuf = le16_to_cpu(rsp->MaxBufSize);
|
||||
server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
|
||||
/* even though we do not use raw we might as well set this
|
||||
accurately, in case we ever find a need for it */
|
||||
@ -561,8 +566,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
|
||||
little endian */
|
||||
server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount);
|
||||
/* probably no need to store and check maxvcs */
|
||||
server->maxBuf = min(le32_to_cpu(pSMBr->MaxBufferSize),
|
||||
(__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
|
||||
server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize);
|
||||
server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
|
||||
cFYI(DBG2, "Max buf = %d", ses->server->maxBuf);
|
||||
server->capabilities = le32_to_cpu(pSMBr->Capabilities);
|
||||
@ -739,7 +743,8 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
|
||||
iov.iov_base = smb;
|
||||
iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
|
||||
|
||||
rc = cifs_call_async(server, &iov, 1, cifs_echo_callback, server, true);
|
||||
rc = cifs_call_async(server, &iov, 1, NULL, cifs_echo_callback,
|
||||
server, true);
|
||||
if (rc)
|
||||
cFYI(1, "Echo request failed: %d", rc);
|
||||
|
||||
@ -1376,6 +1381,359 @@ openRetry:
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct cifs_readdata *
|
||||
cifs_readdata_alloc(unsigned int nr_pages)
|
||||
{
|
||||
struct cifs_readdata *rdata;
|
||||
|
||||
/* readdata + 1 kvec for each page */
|
||||
rdata = kzalloc(sizeof(*rdata) +
|
||||
sizeof(struct kvec) * nr_pages, GFP_KERNEL);
|
||||
if (rdata != NULL) {
|
||||
INIT_WORK(&rdata->work, cifs_readv_complete);
|
||||
INIT_LIST_HEAD(&rdata->pages);
|
||||
}
|
||||
return rdata;
|
||||
}
|
||||
|
||||
void
|
||||
cifs_readdata_free(struct cifs_readdata *rdata)
|
||||
{
|
||||
cifsFileInfo_put(rdata->cfile);
|
||||
kfree(rdata);
|
||||
}
|
||||
|
||||
/*
|
||||
* Discard any remaining data in the current SMB. To do this, we borrow the
|
||||
* current bigbuf.
|
||||
*/
|
||||
static int
|
||||
cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
|
||||
{
|
||||
READ_RSP *rsp = (READ_RSP *)server->smallbuf;
|
||||
unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length);
|
||||
int remaining = rfclen + 4 - server->total_read;
|
||||
struct cifs_readdata *rdata = mid->callback_data;
|
||||
|
||||
while (remaining > 0) {
|
||||
int length;
|
||||
|
||||
length = cifs_read_from_socket(server, server->bigbuf,
|
||||
min_t(unsigned int, remaining,
|
||||
CIFSMaxBufSize + MAX_CIFS_HDR_SIZE));
|
||||
if (length < 0)
|
||||
return length;
|
||||
server->total_read += length;
|
||||
remaining -= length;
|
||||
}
|
||||
|
||||
dequeue_mid(mid, rdata->result);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
|
||||
{
|
||||
int length, len;
|
||||
unsigned int data_offset, remaining, data_len;
|
||||
struct cifs_readdata *rdata = mid->callback_data;
|
||||
READ_RSP *rsp = (READ_RSP *)server->smallbuf;
|
||||
unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length) + 4;
|
||||
u64 eof;
|
||||
pgoff_t eof_index;
|
||||
struct page *page, *tpage;
|
||||
|
||||
cFYI(1, "%s: mid=%u offset=%llu bytes=%u", __func__,
|
||||
mid->mid, rdata->offset, rdata->bytes);
|
||||
|
||||
/*
|
||||
* read the rest of READ_RSP header (sans Data array), or whatever we
|
||||
* can if there's not enough data. At this point, we've read down to
|
||||
* the Mid.
|
||||
*/
|
||||
len = min_t(unsigned int, rfclen, sizeof(*rsp)) -
|
||||
sizeof(struct smb_hdr) + 1;
|
||||
|
||||
rdata->iov[0].iov_base = server->smallbuf + sizeof(struct smb_hdr) - 1;
|
||||
rdata->iov[0].iov_len = len;
|
||||
|
||||
length = cifs_readv_from_socket(server, rdata->iov, 1, len);
|
||||
if (length < 0)
|
||||
return length;
|
||||
server->total_read += length;
|
||||
|
||||
/* Was the SMB read successful? */
|
||||
rdata->result = map_smb_to_linux_error(&rsp->hdr, false);
|
||||
if (rdata->result != 0) {
|
||||
cFYI(1, "%s: server returned error %d", __func__,
|
||||
rdata->result);
|
||||
return cifs_readv_discard(server, mid);
|
||||
}
|
||||
|
||||
/* Is there enough to get to the rest of the READ_RSP header? */
|
||||
if (server->total_read < sizeof(READ_RSP)) {
|
||||
cFYI(1, "%s: server returned short header. got=%u expected=%zu",
|
||||
__func__, server->total_read, sizeof(READ_RSP));
|
||||
rdata->result = -EIO;
|
||||
return cifs_readv_discard(server, mid);
|
||||
}
|
||||
|
||||
data_offset = le16_to_cpu(rsp->DataOffset) + 4;
|
||||
if (data_offset < server->total_read) {
|
||||
/*
|
||||
* win2k8 sometimes sends an offset of 0 when the read
|
||||
* is beyond the EOF. Treat it as if the data starts just after
|
||||
* the header.
|
||||
*/
|
||||
cFYI(1, "%s: data offset (%u) inside read response header",
|
||||
__func__, data_offset);
|
||||
data_offset = server->total_read;
|
||||
} else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
|
||||
/* data_offset is beyond the end of smallbuf */
|
||||
cFYI(1, "%s: data offset (%u) beyond end of smallbuf",
|
||||
__func__, data_offset);
|
||||
rdata->result = -EIO;
|
||||
return cifs_readv_discard(server, mid);
|
||||
}
|
||||
|
||||
cFYI(1, "%s: total_read=%u data_offset=%u", __func__,
|
||||
server->total_read, data_offset);
|
||||
|
||||
len = data_offset - server->total_read;
|
||||
if (len > 0) {
|
||||
/* read any junk before data into the rest of smallbuf */
|
||||
rdata->iov[0].iov_base = server->smallbuf + server->total_read;
|
||||
rdata->iov[0].iov_len = len;
|
||||
length = cifs_readv_from_socket(server, rdata->iov, 1, len);
|
||||
if (length < 0)
|
||||
return length;
|
||||
server->total_read += length;
|
||||
}
|
||||
|
||||
/* set up first iov for signature check */
|
||||
rdata->iov[0].iov_base = server->smallbuf;
|
||||
rdata->iov[0].iov_len = server->total_read;
|
||||
cFYI(1, "0: iov_base=%p iov_len=%zu",
|
||||
rdata->iov[0].iov_base, rdata->iov[0].iov_len);
|
||||
|
||||
/* how much data is in the response? */
|
||||
data_len = le16_to_cpu(rsp->DataLengthHigh) << 16;
|
||||
data_len += le16_to_cpu(rsp->DataLength);
|
||||
if (data_offset + data_len > rfclen) {
|
||||
/* data_len is corrupt -- discard frame */
|
||||
rdata->result = -EIO;
|
||||
return cifs_readv_discard(server, mid);
|
||||
}
|
||||
|
||||
/* marshal up the page array */
|
||||
len = 0;
|
||||
remaining = data_len;
|
||||
rdata->nr_iov = 1;
|
||||
|
||||
/* determine the eof that the server (probably) has */
|
||||
eof = CIFS_I(rdata->mapping->host)->server_eof;
|
||||
eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
|
||||
cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
|
||||
|
||||
list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
|
||||
if (remaining >= PAGE_CACHE_SIZE) {
|
||||
/* enough data to fill the page */
|
||||
rdata->iov[rdata->nr_iov].iov_base = kmap(page);
|
||||
rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
|
||||
cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
|
||||
rdata->nr_iov, page->index,
|
||||
rdata->iov[rdata->nr_iov].iov_base,
|
||||
rdata->iov[rdata->nr_iov].iov_len);
|
||||
++rdata->nr_iov;
|
||||
len += PAGE_CACHE_SIZE;
|
||||
remaining -= PAGE_CACHE_SIZE;
|
||||
} else if (remaining > 0) {
|
||||
/* enough for partial page, fill and zero the rest */
|
||||
rdata->iov[rdata->nr_iov].iov_base = kmap(page);
|
||||
rdata->iov[rdata->nr_iov].iov_len = remaining;
|
||||
cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
|
||||
rdata->nr_iov, page->index,
|
||||
rdata->iov[rdata->nr_iov].iov_base,
|
||||
rdata->iov[rdata->nr_iov].iov_len);
|
||||
memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
|
||||
'\0', PAGE_CACHE_SIZE - remaining);
|
||||
++rdata->nr_iov;
|
||||
len += remaining;
|
||||
remaining = 0;
|
||||
} else if (page->index > eof_index) {
|
||||
/*
|
||||
* The VFS will not try to do readahead past the
|
||||
* i_size, but it's possible that we have outstanding
|
||||
* writes with gaps in the middle and the i_size hasn't
|
||||
* caught up yet. Populate those with zeroed out pages
|
||||
* to prevent the VFS from repeatedly attempting to
|
||||
* fill them until the writes are flushed.
|
||||
*/
|
||||
zero_user(page, 0, PAGE_CACHE_SIZE);
|
||||
list_del(&page->lru);
|
||||
lru_cache_add_file(page);
|
||||
flush_dcache_page(page);
|
||||
SetPageUptodate(page);
|
||||
unlock_page(page);
|
||||
page_cache_release(page);
|
||||
} else {
|
||||
/* no need to hold page hostage */
|
||||
list_del(&page->lru);
|
||||
lru_cache_add_file(page);
|
||||
unlock_page(page);
|
||||
page_cache_release(page);
|
||||
}
|
||||
}
|
||||
|
||||
/* issue the read if we have any iovecs left to fill */
|
||||
if (rdata->nr_iov > 1) {
|
||||
length = cifs_readv_from_socket(server, &rdata->iov[1],
|
||||
rdata->nr_iov - 1, len);
|
||||
if (length < 0)
|
||||
return length;
|
||||
server->total_read += length;
|
||||
} else {
|
||||
length = 0;
|
||||
}
|
||||
|
||||
rdata->bytes = length;
|
||||
|
||||
cFYI(1, "total_read=%u rfclen=%u remaining=%u", server->total_read,
|
||||
rfclen, remaining);
|
||||
|
||||
/* discard anything left over */
|
||||
if (server->total_read < rfclen)
|
||||
return cifs_readv_discard(server, mid);
|
||||
|
||||
dequeue_mid(mid, false);
|
||||
return length;
|
||||
}
|
||||
|
||||
static void
|
||||
cifs_readv_complete(struct work_struct *work)
|
||||
{
|
||||
struct cifs_readdata *rdata = container_of(work,
|
||||
struct cifs_readdata, work);
|
||||
struct page *page, *tpage;
|
||||
|
||||
list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
|
||||
list_del(&page->lru);
|
||||
lru_cache_add_file(page);
|
||||
|
||||
if (rdata->result == 0) {
|
||||
kunmap(page);
|
||||
flush_dcache_page(page);
|
||||
SetPageUptodate(page);
|
||||
}
|
||||
|
||||
unlock_page(page);
|
||||
|
||||
if (rdata->result == 0)
|
||||
cifs_readpage_to_fscache(rdata->mapping->host, page);
|
||||
|
||||
page_cache_release(page);
|
||||
}
|
||||
cifs_readdata_free(rdata);
|
||||
}
|
||||
|
||||
static void
|
||||
cifs_readv_callback(struct mid_q_entry *mid)
|
||||
{
|
||||
struct cifs_readdata *rdata = mid->callback_data;
|
||||
struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
|
||||
struct TCP_Server_Info *server = tcon->ses->server;
|
||||
|
||||
cFYI(1, "%s: mid=%u state=%d result=%d bytes=%u", __func__,
|
||||
mid->mid, mid->midState, rdata->result, rdata->bytes);
|
||||
|
||||
switch (mid->midState) {
|
||||
case MID_RESPONSE_RECEIVED:
|
||||
/* result already set, check signature */
|
||||
if (server->sec_mode &
|
||||
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
|
||||
if (cifs_verify_signature(rdata->iov, rdata->nr_iov,
|
||||
server, mid->sequence_number + 1))
|
||||
cERROR(1, "Unexpected SMB signature");
|
||||
}
|
||||
/* FIXME: should this be counted toward the initiating task? */
|
||||
task_io_account_read(rdata->bytes);
|
||||
cifs_stats_bytes_read(tcon, rdata->bytes);
|
||||
break;
|
||||
case MID_REQUEST_SUBMITTED:
|
||||
case MID_RETRY_NEEDED:
|
||||
rdata->result = -EAGAIN;
|
||||
break;
|
||||
default:
|
||||
rdata->result = -EIO;
|
||||
}
|
||||
|
||||
queue_work(system_nrt_wq, &rdata->work);
|
||||
DeleteMidQEntry(mid);
|
||||
atomic_dec(&server->inFlight);
|
||||
wake_up(&server->request_q);
|
||||
}
|
||||
|
||||
/* cifs_async_readv - send an async write, and set up mid to handle result */
|
||||
int
|
||||
cifs_async_readv(struct cifs_readdata *rdata)
|
||||
{
|
||||
int rc;
|
||||
READ_REQ *smb = NULL;
|
||||
int wct;
|
||||
struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
|
||||
|
||||
cFYI(1, "%s: offset=%llu bytes=%u", __func__,
|
||||
rdata->offset, rdata->bytes);
|
||||
|
||||
if (tcon->ses->capabilities & CAP_LARGE_FILES)
|
||||
wct = 12;
|
||||
else {
|
||||
wct = 10; /* old style read */
|
||||
if ((rdata->offset >> 32) > 0) {
|
||||
/* can not handle this big offset for old */
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **)&smb);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
smb->hdr.Pid = cpu_to_le16((__u16)rdata->pid);
|
||||
smb->hdr.PidHigh = cpu_to_le16((__u16)(rdata->pid >> 16));
|
||||
|
||||
smb->AndXCommand = 0xFF; /* none */
|
||||
smb->Fid = rdata->cfile->netfid;
|
||||
smb->OffsetLow = cpu_to_le32(rdata->offset & 0xFFFFFFFF);
|
||||
if (wct == 12)
|
||||
smb->OffsetHigh = cpu_to_le32(rdata->offset >> 32);
|
||||
smb->Remaining = 0;
|
||||
smb->MaxCount = cpu_to_le16(rdata->bytes & 0xFFFF);
|
||||
smb->MaxCountHigh = cpu_to_le32(rdata->bytes >> 16);
|
||||
if (wct == 12)
|
||||
smb->ByteCount = 0;
|
||||
else {
|
||||
/* old style read */
|
||||
struct smb_com_readx_req *smbr =
|
||||
(struct smb_com_readx_req *)smb;
|
||||
smbr->ByteCount = 0;
|
||||
}
|
||||
|
||||
/* 4 for RFC1001 length + 1 for BCC */
|
||||
rdata->iov[0].iov_base = smb;
|
||||
rdata->iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
|
||||
|
||||
rc = cifs_call_async(tcon->ses->server, rdata->iov, 1,
|
||||
cifs_readv_receive, cifs_readv_callback,
|
||||
rdata, false);
|
||||
|
||||
if (rc == 0)
|
||||
cifs_stats_inc(&tcon->num_reads);
|
||||
|
||||
cifs_small_buf_release(smb);
|
||||
return rc;
|
||||
}
|
||||
|
||||
int
|
||||
CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes,
|
||||
char **buf, int *pbuf_type)
|
||||
@ -1836,7 +2194,7 @@ cifs_async_writev(struct cifs_writedata *wdata)
|
||||
|
||||
kref_get(&wdata->refcount);
|
||||
rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1,
|
||||
cifs_writev_callback, wdata, false);
|
||||
NULL, cifs_writev_callback, wdata, false);
|
||||
|
||||
if (rc == 0)
|
||||
cifs_stats_inc(&tcon->num_writes);
|
||||
@ -1962,10 +2320,50 @@ CIFSSMBWrite2(const int xid, struct cifs_io_parms *io_parms,
|
||||
return rc;
|
||||
}
|
||||
|
||||
int cifs_lockv(const int xid, struct cifs_tcon *tcon, const __u16 netfid,
|
||||
const __u8 lock_type, const __u32 num_unlock,
|
||||
const __u32 num_lock, LOCKING_ANDX_RANGE *buf)
|
||||
{
|
||||
int rc = 0;
|
||||
LOCK_REQ *pSMB = NULL;
|
||||
struct kvec iov[2];
|
||||
int resp_buf_type;
|
||||
__u16 count;
|
||||
|
||||
cFYI(1, "cifs_lockv num lock %d num unlock %d", num_lock, num_unlock);
|
||||
|
||||
rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
pSMB->Timeout = 0;
|
||||
pSMB->NumberOfLocks = cpu_to_le16(num_lock);
|
||||
pSMB->NumberOfUnlocks = cpu_to_le16(num_unlock);
|
||||
pSMB->LockType = lock_type;
|
||||
pSMB->AndXCommand = 0xFF; /* none */
|
||||
pSMB->Fid = netfid; /* netfid stays le */
|
||||
|
||||
count = (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE);
|
||||
inc_rfc1001_len(pSMB, count);
|
||||
pSMB->ByteCount = cpu_to_le16(count);
|
||||
|
||||
iov[0].iov_base = (char *)pSMB;
|
||||
iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4 -
|
||||
(num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE);
|
||||
iov[1].iov_base = (char *)buf;
|
||||
iov[1].iov_len = (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE);
|
||||
|
||||
cifs_stats_inc(&tcon->num_locks);
|
||||
rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP);
|
||||
if (rc)
|
||||
cFYI(1, "Send error in cifs_lockv = %d", rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int
|
||||
CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
|
||||
const __u16 smb_file_id, const __u64 len,
|
||||
const __u16 smb_file_id, const __u32 netpid, const __u64 len,
|
||||
const __u64 offset, const __u32 numUnlock,
|
||||
const __u32 numLock, const __u8 lockType,
|
||||
const bool waitFlag, const __u8 oplock_level)
|
||||
@ -2001,7 +2399,7 @@ CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
|
||||
pSMB->Fid = smb_file_id; /* netfid stays le */
|
||||
|
||||
if ((numLock != 0) || (numUnlock != 0)) {
|
||||
pSMB->Locks[0].Pid = cpu_to_le16(current->tgid);
|
||||
pSMB->Locks[0].Pid = cpu_to_le16(netpid);
|
||||
/* BB where to store pid high? */
|
||||
pSMB->Locks[0].LengthLow = cpu_to_le32((u32)len);
|
||||
pSMB->Locks[0].LengthHigh = cpu_to_le32((u32)(len>>32));
|
||||
@ -2035,9 +2433,9 @@ CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
|
||||
|
||||
int
|
||||
CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
|
||||
const __u16 smb_file_id, const int get_flag, const __u64 len,
|
||||
struct file_lock *pLockData, const __u16 lock_type,
|
||||
const bool waitFlag)
|
||||
const __u16 smb_file_id, const __u32 netpid, const int get_flag,
|
||||
const __u64 len, struct file_lock *pLockData,
|
||||
const __u16 lock_type, const bool waitFlag)
|
||||
{
|
||||
struct smb_com_transaction2_sfi_req *pSMB = NULL;
|
||||
struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
|
||||
@ -2095,7 +2493,7 @@ CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
|
||||
} else
|
||||
pSMB->Timeout = 0;
|
||||
|
||||
parm_data->pid = cpu_to_le32(current->tgid);
|
||||
parm_data->pid = cpu_to_le32(netpid);
|
||||
parm_data->start = cpu_to_le64(pLockData->fl_start);
|
||||
parm_data->length = cpu_to_le64(len); /* normalize negative numbers */
|
||||
|
||||
@ -2812,8 +3210,7 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifs_tcon *tcon,
|
||||
pSMB->TotalDataCount = 0;
|
||||
pSMB->MaxParameterCount = cpu_to_le32(2);
|
||||
/* BB find exact data count max from sess structure BB */
|
||||
pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf -
|
||||
MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
|
||||
pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00);
|
||||
pSMB->MaxSetupCount = 4;
|
||||
pSMB->Reserved = 0;
|
||||
pSMB->ParameterOffset = 0;
|
||||
@ -3306,8 +3703,7 @@ smb_init_nttransact(const __u16 sub_command, const int setup_count,
|
||||
pSMB->Reserved = 0;
|
||||
pSMB->TotalParameterCount = cpu_to_le32(parm_len);
|
||||
pSMB->TotalDataCount = 0;
|
||||
pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf -
|
||||
MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
|
||||
pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00);
|
||||
pSMB->ParameterCount = pSMB->TotalParameterCount;
|
||||
pSMB->DataCount = pSMB->TotalDataCount;
|
||||
temp_offset = offsetof(struct smb_com_ntransact_req, Parms) +
|
||||
@ -3467,7 +3863,7 @@ qsec_out:
|
||||
|
||||
int
|
||||
CIFSSMBSetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid,
|
||||
struct cifs_ntsd *pntsd, __u32 acllen)
|
||||
struct cifs_ntsd *pntsd, __u32 acllen, int aclflag)
|
||||
{
|
||||
__u16 byte_count, param_count, data_count, param_offset, data_offset;
|
||||
int rc = 0;
|
||||
@ -3504,7 +3900,7 @@ setCifsAclRetry:
|
||||
|
||||
pSMB->Fid = fid; /* file handle always le */
|
||||
pSMB->Reserved2 = 0;
|
||||
pSMB->AclFlags = cpu_to_le32(CIFS_ACL_DACL);
|
||||
pSMB->AclFlags = cpu_to_le32(aclflag);
|
||||
|
||||
if (pntsd && acllen) {
|
||||
memcpy((char *) &pSMBr->hdr.Protocol + data_offset,
|
||||
@ -3977,8 +4373,7 @@ findFirstRetry:
|
||||
params = 12 + name_len /* includes null */ ;
|
||||
pSMB->TotalDataCount = 0; /* no EAs */
|
||||
pSMB->MaxParameterCount = cpu_to_le16(10);
|
||||
pSMB->MaxDataCount = cpu_to_le16((tcon->ses->server->maxBuf -
|
||||
MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
|
||||
pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize & 0xFFFFFF00);
|
||||
pSMB->MaxSetupCount = 0;
|
||||
pSMB->Reserved = 0;
|
||||
pSMB->Flags = 0;
|
||||
@ -4052,8 +4447,7 @@ findFirstRetry:
|
||||
psrch_inf->index_of_last_entry = 2 /* skip . and .. */ +
|
||||
psrch_inf->entries_in_buffer;
|
||||
lnoff = le16_to_cpu(parms->LastNameOffset);
|
||||
if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE <
|
||||
lnoff) {
|
||||
if (CIFSMaxBufSize < lnoff) {
|
||||
cERROR(1, "ignoring corrupt resume name");
|
||||
psrch_inf->last_entry = NULL;
|
||||
return rc;
|
||||
@ -4097,9 +4491,7 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
|
||||
byte_count = 0;
|
||||
pSMB->TotalDataCount = 0; /* no EAs */
|
||||
pSMB->MaxParameterCount = cpu_to_le16(8);
|
||||
pSMB->MaxDataCount =
|
||||
cpu_to_le16((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) &
|
||||
0xFFFFFF00);
|
||||
pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize & 0xFFFFFF00);
|
||||
pSMB->MaxSetupCount = 0;
|
||||
pSMB->Reserved = 0;
|
||||
pSMB->Flags = 0;
|
||||
@ -4181,8 +4573,7 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
|
||||
psrch_inf->index_of_last_entry +=
|
||||
psrch_inf->entries_in_buffer;
|
||||
lnoff = le16_to_cpu(parms->LastNameOffset);
|
||||
if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE <
|
||||
lnoff) {
|
||||
if (CIFSMaxBufSize < lnoff) {
|
||||
cERROR(1, "ignoring corrupt resume name");
|
||||
psrch_inf->last_entry = NULL;
|
||||
return rc;
|
||||
@ -5840,7 +6231,7 @@ QAllEAsRetry:
|
||||
|
||||
if (ea_name) {
|
||||
if (ea_name_len == name_len &&
|
||||
strncmp(ea_name, temp_ptr, name_len) == 0) {
|
||||
memcmp(ea_name, temp_ptr, name_len) == 0) {
|
||||
temp_ptr += name_len + 1;
|
||||
rc = value_len;
|
||||
if (buf_size == 0)
|
||||
@ -6035,12 +6426,7 @@ int CIFSSMBNotify(const int xid, struct cifs_tcon *tcon,
|
||||
pSMB->TotalParameterCount = 0 ;
|
||||
pSMB->TotalDataCount = 0;
|
||||
pSMB->MaxParameterCount = cpu_to_le32(2);
|
||||
/* BB find exact data count max from sess structure BB */
|
||||
pSMB->MaxDataCount = 0; /* same in little endian or be */
|
||||
/* BB VERIFY verify which is correct for above BB */
|
||||
pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf -
|
||||
MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
|
||||
|
||||
pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00);
|
||||
pSMB->MaxSetupCount = 4;
|
||||
pSMB->Reserved = 0;
|
||||
pSMB->ParameterOffset = 0;
|
||||
|
@ -181,7 +181,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
|
||||
-EINVAL = invalid transact2
|
||||
|
||||
*/
|
||||
static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
|
||||
static int check2ndT2(struct smb_hdr *pSMB)
|
||||
{
|
||||
struct smb_t2_rsp *pSMBt;
|
||||
int remaining;
|
||||
@ -214,9 +214,9 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
|
||||
|
||||
cFYI(1, "missing %d bytes from transact2, check next response",
|
||||
remaining);
|
||||
if (total_data_size > maxBufSize) {
|
||||
if (total_data_size > CIFSMaxBufSize) {
|
||||
cERROR(1, "TotalDataSize %d is over maximum buffer %d",
|
||||
total_data_size, maxBufSize);
|
||||
total_data_size, CIFSMaxBufSize);
|
||||
return -EINVAL;
|
||||
}
|
||||
return remaining;
|
||||
@ -320,27 +320,24 @@ requeue_echo:
|
||||
}
|
||||
|
||||
static bool
|
||||
allocate_buffers(char **bigbuf, char **smallbuf, unsigned int size,
|
||||
bool is_large_buf)
|
||||
allocate_buffers(struct TCP_Server_Info *server)
|
||||
{
|
||||
char *bbuf = *bigbuf, *sbuf = *smallbuf;
|
||||
|
||||
if (bbuf == NULL) {
|
||||
bbuf = (char *)cifs_buf_get();
|
||||
if (!bbuf) {
|
||||
if (!server->bigbuf) {
|
||||
server->bigbuf = (char *)cifs_buf_get();
|
||||
if (!server->bigbuf) {
|
||||
cERROR(1, "No memory for large SMB response");
|
||||
msleep(3000);
|
||||
/* retry will check if exiting */
|
||||
return false;
|
||||
}
|
||||
} else if (is_large_buf) {
|
||||
} else if (server->large_buf) {
|
||||
/* we are reusing a dirty large buf, clear its start */
|
||||
memset(bbuf, 0, size);
|
||||
memset(server->bigbuf, 0, sizeof(struct smb_hdr));
|
||||
}
|
||||
|
||||
if (sbuf == NULL) {
|
||||
sbuf = (char *)cifs_small_buf_get();
|
||||
if (!sbuf) {
|
||||
if (!server->smallbuf) {
|
||||
server->smallbuf = (char *)cifs_small_buf_get();
|
||||
if (!server->smallbuf) {
|
||||
cERROR(1, "No memory for SMB response");
|
||||
msleep(1000);
|
||||
/* retry will check if exiting */
|
||||
@ -349,36 +346,116 @@ allocate_buffers(char **bigbuf, char **smallbuf, unsigned int size,
|
||||
/* beginning of smb buffer is cleared in our buf_get */
|
||||
} else {
|
||||
/* if existing small buf clear beginning */
|
||||
memset(sbuf, 0, size);
|
||||
memset(server->smallbuf, 0, sizeof(struct smb_hdr));
|
||||
}
|
||||
|
||||
*bigbuf = bbuf;
|
||||
*smallbuf = sbuf;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int
|
||||
read_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg,
|
||||
struct kvec *iov, unsigned int to_read,
|
||||
unsigned int *ptotal_read, bool is_header_read)
|
||||
static bool
|
||||
server_unresponsive(struct TCP_Server_Info *server)
|
||||
{
|
||||
int length, rc = 0;
|
||||
unsigned int total_read;
|
||||
char *buf = iov->iov_base;
|
||||
if (echo_retries > 0 && server->tcpStatus == CifsGood &&
|
||||
time_after(jiffies, server->lstrp +
|
||||
(echo_retries * SMB_ECHO_INTERVAL))) {
|
||||
cERROR(1, "Server %s has not responded in %d seconds. "
|
||||
"Reconnecting...", server->hostname,
|
||||
(echo_retries * SMB_ECHO_INTERVAL / HZ));
|
||||
cifs_reconnect(server);
|
||||
wake_up(&server->response_q);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* kvec_array_init - clone a kvec array, and advance into it
|
||||
* @new: pointer to memory for cloned array
|
||||
* @iov: pointer to original array
|
||||
* @nr_segs: number of members in original array
|
||||
* @bytes: number of bytes to advance into the cloned array
|
||||
*
|
||||
* This function will copy the array provided in iov to a section of memory
|
||||
* and advance the specified number of bytes into the new array. It returns
|
||||
* the number of segments in the new array. "new" must be at least as big as
|
||||
* the original iov array.
|
||||
*/
|
||||
static unsigned int
|
||||
kvec_array_init(struct kvec *new, struct kvec *iov, unsigned int nr_segs,
|
||||
size_t bytes)
|
||||
{
|
||||
size_t base = 0;
|
||||
|
||||
while (bytes || !iov->iov_len) {
|
||||
int copy = min(bytes, iov->iov_len);
|
||||
|
||||
bytes -= copy;
|
||||
base += copy;
|
||||
if (iov->iov_len == base) {
|
||||
iov++;
|
||||
nr_segs--;
|
||||
base = 0;
|
||||
}
|
||||
}
|
||||
memcpy(new, iov, sizeof(*iov) * nr_segs);
|
||||
new->iov_base += base;
|
||||
new->iov_len -= base;
|
||||
return nr_segs;
|
||||
}
|
||||
|
||||
static struct kvec *
|
||||
get_server_iovec(struct TCP_Server_Info *server, unsigned int nr_segs)
|
||||
{
|
||||
struct kvec *new_iov;
|
||||
|
||||
if (server->iov && nr_segs <= server->nr_iov)
|
||||
return server->iov;
|
||||
|
||||
/* not big enough -- allocate a new one and release the old */
|
||||
new_iov = kmalloc(sizeof(*new_iov) * nr_segs, GFP_NOFS);
|
||||
if (new_iov) {
|
||||
kfree(server->iov);
|
||||
server->iov = new_iov;
|
||||
server->nr_iov = nr_segs;
|
||||
}
|
||||
return new_iov;
|
||||
}
|
||||
|
||||
int
|
||||
cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
|
||||
unsigned int nr_segs, unsigned int to_read)
|
||||
{
|
||||
int length = 0;
|
||||
int total_read;
|
||||
unsigned int segs;
|
||||
struct msghdr smb_msg;
|
||||
struct kvec *iov;
|
||||
|
||||
iov = get_server_iovec(server, nr_segs);
|
||||
if (!iov)
|
||||
return -ENOMEM;
|
||||
|
||||
smb_msg.msg_control = NULL;
|
||||
smb_msg.msg_controllen = 0;
|
||||
|
||||
for (total_read = 0; to_read; total_read += length, to_read -= length) {
|
||||
if (server_unresponsive(server)) {
|
||||
total_read = -EAGAIN;
|
||||
break;
|
||||
}
|
||||
|
||||
segs = kvec_array_init(iov, iov_orig, nr_segs, total_read);
|
||||
|
||||
length = kernel_recvmsg(server->ssocket, &smb_msg,
|
||||
iov, segs, to_read, 0);
|
||||
|
||||
for (total_read = 0; total_read < to_read; total_read += length) {
|
||||
length = kernel_recvmsg(server->ssocket, smb_msg, iov, 1,
|
||||
to_read - total_read, 0);
|
||||
if (server->tcpStatus == CifsExiting) {
|
||||
/* then will exit */
|
||||
rc = 2;
|
||||
total_read = -ESHUTDOWN;
|
||||
break;
|
||||
} else if (server->tcpStatus == CifsNeedReconnect) {
|
||||
cifs_reconnect(server);
|
||||
/* Reconnect wakes up rspns q */
|
||||
/* Now we will reread sock */
|
||||
rc = 1;
|
||||
total_read = -EAGAIN;
|
||||
break;
|
||||
} else if (length == -ERESTARTSYS ||
|
||||
length == -EAGAIN ||
|
||||
@ -390,56 +467,54 @@ read_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg,
|
||||
*/
|
||||
usleep_range(1000, 2000);
|
||||
length = 0;
|
||||
if (!is_header_read)
|
||||
continue;
|
||||
/* Special handling for header read */
|
||||
if (total_read) {
|
||||
iov->iov_base = (to_read - total_read) +
|
||||
buf;
|
||||
iov->iov_len = to_read - total_read;
|
||||
smb_msg->msg_control = NULL;
|
||||
smb_msg->msg_controllen = 0;
|
||||
rc = 3;
|
||||
} else
|
||||
rc = 1;
|
||||
break;
|
||||
continue;
|
||||
} else if (length <= 0) {
|
||||
cERROR(1, "Received no data, expecting %d",
|
||||
to_read - total_read);
|
||||
cFYI(1, "Received no data or error: expecting %d "
|
||||
"got %d", to_read, length);
|
||||
cifs_reconnect(server);
|
||||
rc = 1;
|
||||
total_read = -EAGAIN;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return total_read;
|
||||
}
|
||||
|
||||
*ptotal_read = total_read;
|
||||
return rc;
|
||||
int
|
||||
cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
|
||||
unsigned int to_read)
|
||||
{
|
||||
struct kvec iov;
|
||||
|
||||
iov.iov_base = buf;
|
||||
iov.iov_len = to_read;
|
||||
|
||||
return cifs_readv_from_socket(server, &iov, 1, to_read);
|
||||
}
|
||||
|
||||
static bool
|
||||
check_rfc1002_header(struct TCP_Server_Info *server, char *buf)
|
||||
is_smb_response(struct TCP_Server_Info *server, unsigned char type)
|
||||
{
|
||||
char temp = *buf;
|
||||
unsigned int pdu_length = be32_to_cpu(
|
||||
((struct smb_hdr *)buf)->smb_buf_length);
|
||||
|
||||
/*
|
||||
* The first byte big endian of the length field,
|
||||
* is actually not part of the length but the type
|
||||
* with the most common, zero, as regular data.
|
||||
*/
|
||||
if (temp == (char) RFC1002_SESSION_KEEP_ALIVE) {
|
||||
return false;
|
||||
} else if (temp == (char)RFC1002_POSITIVE_SESSION_RESPONSE) {
|
||||
cFYI(1, "Good RFC 1002 session rsp");
|
||||
return false;
|
||||
} else if (temp == (char)RFC1002_NEGATIVE_SESSION_RESPONSE) {
|
||||
switch (type) {
|
||||
case RFC1002_SESSION_MESSAGE:
|
||||
/* Regular SMB response */
|
||||
return true;
|
||||
case RFC1002_SESSION_KEEP_ALIVE:
|
||||
cFYI(1, "RFC 1002 session keep alive");
|
||||
break;
|
||||
case RFC1002_POSITIVE_SESSION_RESPONSE:
|
||||
cFYI(1, "RFC 1002 positive session response");
|
||||
break;
|
||||
case RFC1002_NEGATIVE_SESSION_RESPONSE:
|
||||
/*
|
||||
* We get this from Windows 98 instead of an error on
|
||||
* SMB negprot response.
|
||||
*/
|
||||
cFYI(1, "Negative RFC1002 Session Response Error 0x%x)",
|
||||
pdu_length);
|
||||
cFYI(1, "RFC 1002 negative session response");
|
||||
/* give server a second to clean up */
|
||||
msleep(1000);
|
||||
/*
|
||||
@ -448,87 +523,89 @@ check_rfc1002_header(struct TCP_Server_Info *server, char *buf)
|
||||
* is since we do not begin with RFC1001 session
|
||||
* initialize frame).
|
||||
*/
|
||||
cifs_set_port((struct sockaddr *)
|
||||
&server->dstaddr, CIFS_PORT);
|
||||
cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT);
|
||||
cifs_reconnect(server);
|
||||
wake_up(&server->response_q);
|
||||
return false;
|
||||
} else if (temp != (char) 0) {
|
||||
cERROR(1, "Unknown RFC 1002 frame");
|
||||
cifs_dump_mem(" Received Data: ", buf, 4);
|
||||
break;
|
||||
default:
|
||||
cERROR(1, "RFC 1002 unknown response type 0x%x", type);
|
||||
cifs_reconnect(server);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* else we have an SMB response */
|
||||
if ((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) ||
|
||||
(pdu_length < sizeof(struct smb_hdr) - 1 - 4)) {
|
||||
cERROR(1, "Invalid size SMB length %d pdu_length %d",
|
||||
4, pdu_length+4);
|
||||
cifs_reconnect(server);
|
||||
wake_up(&server->response_q);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct mid_q_entry *
|
||||
find_cifs_mid(struct TCP_Server_Info *server, struct smb_hdr *buf,
|
||||
int *length, bool is_large_buf, bool *is_multi_rsp, char **bigbuf)
|
||||
find_mid(struct TCP_Server_Info *server, struct smb_hdr *buf)
|
||||
{
|
||||
struct mid_q_entry *mid = NULL, *tmp_mid, *ret = NULL;
|
||||
struct mid_q_entry *mid;
|
||||
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
list_for_each_entry_safe(mid, tmp_mid, &server->pending_mid_q, qhead) {
|
||||
if (mid->mid != buf->Mid ||
|
||||
mid->midState != MID_REQUEST_SUBMITTED ||
|
||||
mid->command != buf->Command)
|
||||
continue;
|
||||
|
||||
if (*length == 0 && check2ndT2(buf, server->maxBuf) > 0) {
|
||||
/* We have a multipart transact2 resp */
|
||||
*is_multi_rsp = true;
|
||||
if (mid->resp_buf) {
|
||||
/* merge response - fix up 1st*/
|
||||
*length = coalesce_t2(buf, mid->resp_buf);
|
||||
if (*length > 0) {
|
||||
*length = 0;
|
||||
mid->multiRsp = true;
|
||||
break;
|
||||
}
|
||||
/* All parts received or packet is malformed. */
|
||||
mid->multiEnd = true;
|
||||
goto multi_t2_fnd;
|
||||
}
|
||||
if (!is_large_buf) {
|
||||
/*FIXME: switch to already allocated largebuf?*/
|
||||
cERROR(1, "1st trans2 resp needs bigbuf");
|
||||
} else {
|
||||
/* Have first buffer */
|
||||
mid->resp_buf = buf;
|
||||
mid->largeBuf = true;
|
||||
*bigbuf = NULL;
|
||||
}
|
||||
break;
|
||||
list_for_each_entry(mid, &server->pending_mid_q, qhead) {
|
||||
if (mid->mid == buf->Mid &&
|
||||
mid->midState == MID_REQUEST_SUBMITTED &&
|
||||
mid->command == buf->Command) {
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
return mid;
|
||||
}
|
||||
mid->resp_buf = buf;
|
||||
mid->largeBuf = is_large_buf;
|
||||
multi_t2_fnd:
|
||||
if (*length == 0)
|
||||
mid->midState = MID_RESPONSE_RECEIVED;
|
||||
else
|
||||
mid->midState = MID_RESPONSE_MALFORMED;
|
||||
#ifdef CONFIG_CIFS_STATS2
|
||||
mid->when_received = jiffies;
|
||||
#endif
|
||||
list_del_init(&mid->qhead);
|
||||
ret = mid;
|
||||
break;
|
||||
}
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
void
|
||||
dequeue_mid(struct mid_q_entry *mid, bool malformed)
|
||||
{
|
||||
#ifdef CONFIG_CIFS_STATS2
|
||||
mid->when_received = jiffies;
|
||||
#endif
|
||||
spin_lock(&GlobalMid_Lock);
|
||||
if (!malformed)
|
||||
mid->midState = MID_RESPONSE_RECEIVED;
|
||||
else
|
||||
mid->midState = MID_RESPONSE_MALFORMED;
|
||||
list_del_init(&mid->qhead);
|
||||
spin_unlock(&GlobalMid_Lock);
|
||||
}
|
||||
|
||||
static void
|
||||
handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
|
||||
struct smb_hdr *buf, int malformed)
|
||||
{
|
||||
if (malformed == 0 && check2ndT2(buf) > 0) {
|
||||
mid->multiRsp = true;
|
||||
if (mid->resp_buf) {
|
||||
/* merge response - fix up 1st*/
|
||||
malformed = coalesce_t2(buf, mid->resp_buf);
|
||||
if (malformed > 0)
|
||||
return;
|
||||
|
||||
/* All parts received or packet is malformed. */
|
||||
mid->multiEnd = true;
|
||||
return dequeue_mid(mid, malformed);
|
||||
}
|
||||
if (!server->large_buf) {
|
||||
/*FIXME: switch to already allocated largebuf?*/
|
||||
cERROR(1, "1st trans2 resp needs bigbuf");
|
||||
} else {
|
||||
/* Have first buffer */
|
||||
mid->resp_buf = buf;
|
||||
mid->largeBuf = true;
|
||||
server->bigbuf = NULL;
|
||||
}
|
||||
return;
|
||||
}
|
||||
mid->resp_buf = buf;
|
||||
mid->largeBuf = server->large_buf;
|
||||
/* Was previous buf put in mpx struct for multi-rsp? */
|
||||
if (!mid->multiRsp) {
|
||||
/* smb buffer will be freed by user thread */
|
||||
if (server->large_buf)
|
||||
server->bigbuf = NULL;
|
||||
else
|
||||
server->smallbuf = NULL;
|
||||
}
|
||||
dequeue_mid(mid, malformed);
|
||||
}
|
||||
|
||||
static void clean_demultiplex_info(struct TCP_Server_Info *server)
|
||||
@ -618,6 +695,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
|
||||
}
|
||||
|
||||
kfree(server->hostname);
|
||||
kfree(server->iov);
|
||||
kfree(server);
|
||||
|
||||
length = atomic_dec_return(&tcpSesAllocCount);
|
||||
@ -626,21 +704,71 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
|
||||
GFP_KERNEL);
|
||||
}
|
||||
|
||||
static int
|
||||
standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
|
||||
{
|
||||
int length;
|
||||
char *buf = server->smallbuf;
|
||||
struct smb_hdr *smb_buffer = (struct smb_hdr *)buf;
|
||||
unsigned int pdu_length = be32_to_cpu(smb_buffer->smb_buf_length);
|
||||
|
||||
/* make sure this will fit in a large buffer */
|
||||
if (pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
|
||||
cERROR(1, "SMB response too long (%u bytes)",
|
||||
pdu_length);
|
||||
cifs_reconnect(server);
|
||||
wake_up(&server->response_q);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/* switch to large buffer if too big for a small one */
|
||||
if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
|
||||
server->large_buf = true;
|
||||
memcpy(server->bigbuf, server->smallbuf, server->total_read);
|
||||
buf = server->bigbuf;
|
||||
smb_buffer = (struct smb_hdr *)buf;
|
||||
}
|
||||
|
||||
/* now read the rest */
|
||||
length = cifs_read_from_socket(server,
|
||||
buf + sizeof(struct smb_hdr) - 1,
|
||||
pdu_length - sizeof(struct smb_hdr) + 1 + 4);
|
||||
if (length < 0)
|
||||
return length;
|
||||
server->total_read += length;
|
||||
|
||||
dump_smb(smb_buffer, server->total_read);
|
||||
|
||||
/*
|
||||
* We know that we received enough to get to the MID as we
|
||||
* checked the pdu_length earlier. Now check to see
|
||||
* if the rest of the header is OK. We borrow the length
|
||||
* var for the rest of the loop to avoid a new stack var.
|
||||
*
|
||||
* 48 bytes is enough to display the header and a little bit
|
||||
* into the payload for debugging purposes.
|
||||
*/
|
||||
length = checkSMB(smb_buffer, smb_buffer->Mid, server->total_read);
|
||||
if (length != 0)
|
||||
cifs_dump_mem("Bad SMB: ", buf,
|
||||
min_t(unsigned int, server->total_read, 48));
|
||||
|
||||
if (mid)
|
||||
handle_mid(mid, server, smb_buffer, length);
|
||||
|
||||
return length;
|
||||
}
|
||||
|
||||
static int
|
||||
cifs_demultiplex_thread(void *p)
|
||||
{
|
||||
int length;
|
||||
struct TCP_Server_Info *server = p;
|
||||
unsigned int pdu_length, total_read;
|
||||
char *buf = NULL, *bigbuf = NULL, *smallbuf = NULL;
|
||||
unsigned int pdu_length;
|
||||
char *buf = NULL;
|
||||
struct smb_hdr *smb_buffer = NULL;
|
||||
struct msghdr smb_msg;
|
||||
struct kvec iov;
|
||||
struct task_struct *task_to_wake = NULL;
|
||||
struct mid_q_entry *mid_entry;
|
||||
bool isLargeBuf = false;
|
||||
bool isMultiRsp = false;
|
||||
int rc;
|
||||
|
||||
current->flags |= PF_MEMALLOC;
|
||||
cFYI(1, "Demultiplex PID: %d", task_pid_nr(current));
|
||||
@ -655,111 +783,65 @@ cifs_demultiplex_thread(void *p)
|
||||
if (try_to_freeze())
|
||||
continue;
|
||||
|
||||
if (!allocate_buffers(&bigbuf, &smallbuf,
|
||||
sizeof(struct smb_hdr), isLargeBuf))
|
||||
if (!allocate_buffers(server))
|
||||
continue;
|
||||
|
||||
isLargeBuf = false;
|
||||
isMultiRsp = false;
|
||||
smb_buffer = (struct smb_hdr *)smallbuf;
|
||||
buf = smallbuf;
|
||||
iov.iov_base = buf;
|
||||
iov.iov_len = 4;
|
||||
smb_msg.msg_control = NULL;
|
||||
smb_msg.msg_controllen = 0;
|
||||
server->large_buf = false;
|
||||
smb_buffer = (struct smb_hdr *)server->smallbuf;
|
||||
buf = server->smallbuf;
|
||||
pdu_length = 4; /* enough to get RFC1001 header */
|
||||
|
||||
incomplete_rcv:
|
||||
if (echo_retries > 0 && server->tcpStatus == CifsGood &&
|
||||
time_after(jiffies, server->lstrp +
|
||||
(echo_retries * SMB_ECHO_INTERVAL))) {
|
||||
cERROR(1, "Server %s has not responded in %d seconds. "
|
||||
"Reconnecting...", server->hostname,
|
||||
(echo_retries * SMB_ECHO_INTERVAL / HZ));
|
||||
cifs_reconnect(server);
|
||||
wake_up(&server->response_q);
|
||||
continue;
|
||||
}
|
||||
|
||||
rc = read_from_socket(server, &smb_msg, &iov, pdu_length,
|
||||
&total_read, true /* header read */);
|
||||
if (rc == 3)
|
||||
goto incomplete_rcv;
|
||||
else if (rc == 2)
|
||||
break;
|
||||
else if (rc == 1)
|
||||
length = cifs_read_from_socket(server, buf, pdu_length);
|
||||
if (length < 0)
|
||||
continue;
|
||||
server->total_read = length;
|
||||
|
||||
/*
|
||||
* The right amount was read from socket - 4 bytes,
|
||||
* so we can now interpret the length field.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Note that RFC 1001 length is big endian on the wire,
|
||||
* but we convert it here so it is always manipulated
|
||||
* as host byte order.
|
||||
*/
|
||||
pdu_length = be32_to_cpu(smb_buffer->smb_buf_length);
|
||||
|
||||
cFYI(1, "rfc1002 length 0x%x", pdu_length+4);
|
||||
if (!check_rfc1002_header(server, buf))
|
||||
cFYI(1, "RFC1002 header 0x%x", pdu_length);
|
||||
if (!is_smb_response(server, buf[0]))
|
||||
continue;
|
||||
|
||||
/* else length ok */
|
||||
if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
|
||||
isLargeBuf = true;
|
||||
memcpy(bigbuf, smallbuf, 4);
|
||||
smb_buffer = (struct smb_hdr *)bigbuf;
|
||||
buf = bigbuf;
|
||||
/* make sure we have enough to get to the MID */
|
||||
if (pdu_length < sizeof(struct smb_hdr) - 1 - 4) {
|
||||
cERROR(1, "SMB response too short (%u bytes)",
|
||||
pdu_length);
|
||||
cifs_reconnect(server);
|
||||
wake_up(&server->response_q);
|
||||
continue;
|
||||
}
|
||||
|
||||
iov.iov_base = 4 + buf;
|
||||
iov.iov_len = pdu_length;
|
||||
rc = read_from_socket(server, &smb_msg, &iov, pdu_length,
|
||||
&total_read, false);
|
||||
if (rc == 2)
|
||||
break;
|
||||
else if (rc == 1)
|
||||
/* read down to the MID */
|
||||
length = cifs_read_from_socket(server, buf + 4,
|
||||
sizeof(struct smb_hdr) - 1 - 4);
|
||||
if (length < 0)
|
||||
continue;
|
||||
server->total_read += length;
|
||||
|
||||
mid_entry = find_mid(server, smb_buffer);
|
||||
|
||||
if (!mid_entry || !mid_entry->receive)
|
||||
length = standard_receive3(server, mid_entry);
|
||||
else
|
||||
length = mid_entry->receive(server, mid_entry);
|
||||
|
||||
if (length < 0)
|
||||
continue;
|
||||
|
||||
total_read += 4; /* account for rfc1002 hdr */
|
||||
|
||||
dump_smb(smb_buffer, total_read);
|
||||
|
||||
/*
|
||||
* We know that we received enough to get to the MID as we
|
||||
* checked the pdu_length earlier. Now check to see
|
||||
* if the rest of the header is OK. We borrow the length
|
||||
* var for the rest of the loop to avoid a new stack var.
|
||||
*
|
||||
* 48 bytes is enough to display the header and a little bit
|
||||
* into the payload for debugging purposes.
|
||||
*/
|
||||
length = checkSMB(smb_buffer, smb_buffer->Mid, total_read);
|
||||
if (length != 0)
|
||||
cifs_dump_mem("Bad SMB: ", buf,
|
||||
min_t(unsigned int, total_read, 48));
|
||||
if (server->large_buf) {
|
||||
buf = server->bigbuf;
|
||||
smb_buffer = (struct smb_hdr *)buf;
|
||||
}
|
||||
|
||||
server->lstrp = jiffies;
|
||||
|
||||
mid_entry = find_cifs_mid(server, smb_buffer, &length,
|
||||
isLargeBuf, &isMultiRsp, &bigbuf);
|
||||
if (mid_entry != NULL) {
|
||||
mid_entry->callback(mid_entry);
|
||||
/* Was previous buf put in mpx struct for multi-rsp? */
|
||||
if (!isMultiRsp) {
|
||||
/* smb buffer will be freed by user thread */
|
||||
if (isLargeBuf)
|
||||
bigbuf = NULL;
|
||||
else
|
||||
smallbuf = NULL;
|
||||
}
|
||||
} else if (length != 0) {
|
||||
/* response sanity checks failed */
|
||||
continue;
|
||||
} else if (!is_valid_oplock_break(smb_buffer, server) &&
|
||||
!isMultiRsp) {
|
||||
if (!mid_entry->multiRsp || mid_entry->multiEnd)
|
||||
mid_entry->callback(mid_entry);
|
||||
} else if (!is_valid_oplock_break(smb_buffer, server)) {
|
||||
cERROR(1, "No task to wake, unknown frame received! "
|
||||
"NumMids %d", atomic_read(&midCount));
|
||||
cifs_dump_mem("Received Data is: ", buf,
|
||||
@ -773,9 +855,9 @@ incomplete_rcv:
|
||||
} /* end while !EXITING */
|
||||
|
||||
/* buffer usually freed in free_mid - need to free it here on exit */
|
||||
cifs_buf_release(bigbuf);
|
||||
if (smallbuf) /* no sense logging a debug message if NULL */
|
||||
cifs_small_buf_release(smallbuf);
|
||||
cifs_buf_release(server->bigbuf);
|
||||
if (server->smallbuf) /* no sense logging a debug message if NULL */
|
||||
cifs_small_buf_release(server->smallbuf);
|
||||
|
||||
task_to_wake = xchg(&server->tsk, NULL);
|
||||
clean_demultiplex_info(server);
|
||||
@ -827,6 +909,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
|
||||
{
|
||||
char *value, *data, *end;
|
||||
char *mountdata_copy = NULL, *options;
|
||||
int err;
|
||||
unsigned int temp_len, i, j;
|
||||
char separator[2];
|
||||
short int override_uid = -1;
|
||||
@ -883,6 +966,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
|
||||
cFYI(1, "Null separator not allowed");
|
||||
}
|
||||
}
|
||||
vol->backupuid_specified = false; /* no backup intent for a user */
|
||||
vol->backupgid_specified = false; /* no backup intent for a group */
|
||||
|
||||
while ((data = strsep(&options, separator)) != NULL) {
|
||||
if (!*data)
|
||||
@ -1442,6 +1527,22 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
|
||||
vol->mfsymlinks = true;
|
||||
} else if (strnicmp(data, "multiuser", 8) == 0) {
|
||||
vol->multiuser = true;
|
||||
} else if (!strnicmp(data, "backupuid", 9) && value && *value) {
|
||||
err = kstrtouint(value, 0, &vol->backupuid);
|
||||
if (err < 0) {
|
||||
cERROR(1, "%s: Invalid backupuid value",
|
||||
__func__);
|
||||
goto cifs_parse_mount_err;
|
||||
}
|
||||
vol->backupuid_specified = true;
|
||||
} else if (!strnicmp(data, "backupgid", 9) && value && *value) {
|
||||
err = kstrtouint(value, 0, &vol->backupgid);
|
||||
if (err < 0) {
|
||||
cERROR(1, "%s: Invalid backupgid value",
|
||||
__func__);
|
||||
goto cifs_parse_mount_err;
|
||||
}
|
||||
vol->backupgid_specified = true;
|
||||
} else
|
||||
printk(KERN_WARNING "CIFS: Unknown mount option %s\n",
|
||||
data);
|
||||
@ -2209,16 +2310,16 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
|
||||
(new->mnt_cifs_flags & CIFS_MOUNT_MASK))
|
||||
return 0;
|
||||
|
||||
if (old->rsize != new->rsize)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
* We want to share sb only if we don't specify wsize or specified wsize
|
||||
* is greater or equal than existing one.
|
||||
* We want to share sb only if we don't specify an r/wsize or
|
||||
* specified r/wsize is greater than or equal to existing one.
|
||||
*/
|
||||
if (new->wsize && new->wsize < old->wsize)
|
||||
return 0;
|
||||
|
||||
if (new->rsize && new->rsize < old->rsize)
|
||||
return 0;
|
||||
|
||||
if (old->mnt_uid != new->mnt_uid || old->mnt_gid != new->mnt_gid)
|
||||
return 0;
|
||||
|
||||
@ -2656,14 +2757,6 @@ void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
|
||||
CIFS_MOUNT_POSIX_PATHS;
|
||||
}
|
||||
|
||||
if (cifs_sb && (cifs_sb->rsize > 127 * 1024)) {
|
||||
if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) {
|
||||
cifs_sb->rsize = 127 * 1024;
|
||||
cFYI(DBG2, "larger reads not supported by srv");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
cFYI(1, "Negotiate caps 0x%x", (int)cap);
|
||||
#ifdef CONFIG_CIFS_DEBUG2
|
||||
if (cap & CIFS_UNIX_FCNTL_CAP)
|
||||
@ -2708,31 +2801,19 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
|
||||
spin_lock_init(&cifs_sb->tlink_tree_lock);
|
||||
cifs_sb->tlink_tree = RB_ROOT;
|
||||
|
||||
if (pvolume_info->rsize > CIFSMaxBufSize) {
|
||||
cERROR(1, "rsize %d too large, using MaxBufSize",
|
||||
pvolume_info->rsize);
|
||||
cifs_sb->rsize = CIFSMaxBufSize;
|
||||
} else if ((pvolume_info->rsize) &&
|
||||
(pvolume_info->rsize <= CIFSMaxBufSize))
|
||||
cifs_sb->rsize = pvolume_info->rsize;
|
||||
else /* default */
|
||||
cifs_sb->rsize = CIFSMaxBufSize;
|
||||
|
||||
if (cifs_sb->rsize < 2048) {
|
||||
cifs_sb->rsize = 2048;
|
||||
/* Windows ME may prefer this */
|
||||
cFYI(1, "readsize set to minimum: 2048");
|
||||
}
|
||||
|
||||
/*
|
||||
* Temporarily set wsize for matching superblock. If we end up using
|
||||
* new sb then cifs_negotiate_wsize will later negotiate it downward
|
||||
* if needed.
|
||||
* Temporarily set r/wsize for matching superblock. If we end up using
|
||||
* new sb then client will later negotiate it downward if needed.
|
||||
*/
|
||||
cifs_sb->rsize = pvolume_info->rsize;
|
||||
cifs_sb->wsize = pvolume_info->wsize;
|
||||
|
||||
cifs_sb->mnt_uid = pvolume_info->linux_uid;
|
||||
cifs_sb->mnt_gid = pvolume_info->linux_gid;
|
||||
if (pvolume_info->backupuid_specified)
|
||||
cifs_sb->mnt_backupuid = pvolume_info->backupuid;
|
||||
if (pvolume_info->backupgid_specified)
|
||||
cifs_sb->mnt_backupgid = pvolume_info->backupgid;
|
||||
cifs_sb->mnt_file_mode = pvolume_info->file_mode;
|
||||
cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
|
||||
cFYI(1, "file mode: 0x%x dir mode: 0x%x",
|
||||
@ -2763,6 +2844,10 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
|
||||
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD;
|
||||
if (pvolume_info->cifs_acl)
|
||||
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL;
|
||||
if (pvolume_info->backupuid_specified)
|
||||
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPUID;
|
||||
if (pvolume_info->backupgid_specified)
|
||||
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPGID;
|
||||
if (pvolume_info->override_uid)
|
||||
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID;
|
||||
if (pvolume_info->override_gid)
|
||||
@ -2795,29 +2880,41 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
|
||||
}
|
||||
|
||||
/*
|
||||
* When the server supports very large writes via POSIX extensions, we can
|
||||
* allow up to 2^24-1, minus the size of a WRITE_AND_X header, not including
|
||||
* the RFC1001 length.
|
||||
* When the server supports very large reads and writes via POSIX extensions,
|
||||
* we can allow up to 2^24-1, minus the size of a READ/WRITE_AND_X header, not
|
||||
* including the RFC1001 length.
|
||||
*
|
||||
* Note that this might make for "interesting" allocation problems during
|
||||
* writeback however as we have to allocate an array of pointers for the
|
||||
* pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
|
||||
*
|
||||
* For reads, there is a similar problem as we need to allocate an array
|
||||
* of kvecs to handle the receive, though that should only need to be done
|
||||
* once.
|
||||
*/
|
||||
#define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4)
|
||||
#define CIFS_MAX_RSIZE ((1<<24) - sizeof(READ_RSP) + 4)
|
||||
|
||||
/*
|
||||
* When the server doesn't allow large posix writes, only allow a wsize of
|
||||
* 128k minus the size of the WRITE_AND_X header. That allows for a write up
|
||||
* to the maximum size described by RFC1002.
|
||||
* When the server doesn't allow large posix writes, only allow a rsize/wsize
|
||||
* of 2^17-1 minus the size of the call header. That allows for a read or
|
||||
* write up to the maximum size described by RFC1002.
|
||||
*/
|
||||
#define CIFS_MAX_RFC1002_WSIZE (128 * 1024 - sizeof(WRITE_REQ) + 4)
|
||||
#define CIFS_MAX_RFC1002_WSIZE ((1<<17) - 1 - sizeof(WRITE_REQ) + 4)
|
||||
#define CIFS_MAX_RFC1002_RSIZE ((1<<17) - 1 - sizeof(READ_RSP) + 4)
|
||||
|
||||
/*
|
||||
* The default wsize is 1M. find_get_pages seems to return a maximum of 256
|
||||
* pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill
|
||||
* a single wsize request with a single call.
|
||||
*/
|
||||
#define CIFS_DEFAULT_WSIZE (1024 * 1024)
|
||||
#define CIFS_DEFAULT_IOSIZE (1024 * 1024)
|
||||
|
||||
/*
|
||||
* Windows only supports a max of 60k reads. Default to that when posix
|
||||
* extensions aren't in force.
|
||||
*/
|
||||
#define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
|
||||
|
||||
static unsigned int
|
||||
cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
|
||||
@ -2825,7 +2922,7 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
|
||||
__u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
|
||||
struct TCP_Server_Info *server = tcon->ses->server;
|
||||
unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize :
|
||||
CIFS_DEFAULT_WSIZE;
|
||||
CIFS_DEFAULT_IOSIZE;
|
||||
|
||||
/* can server support 24-bit write sizes? (via UNIX extensions) */
|
||||
if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
|
||||
@ -2848,6 +2945,50 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
|
||||
return wsize;
|
||||
}
|
||||
|
||||
static unsigned int
|
||||
cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
|
||||
{
|
||||
__u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
|
||||
struct TCP_Server_Info *server = tcon->ses->server;
|
||||
unsigned int rsize, defsize;
|
||||
|
||||
/*
|
||||
* Set default value...
|
||||
*
|
||||
* HACK alert! Ancient servers have very small buffers. Even though
|
||||
* MS-CIFS indicates that servers are only limited by the client's
|
||||
* bufsize for reads, testing against win98se shows that it throws
|
||||
* INVALID_PARAMETER errors if you try to request too large a read.
|
||||
*
|
||||
* If the server advertises a MaxBufferSize of less than one page,
|
||||
* assume that it also can't satisfy reads larger than that either.
|
||||
*
|
||||
* FIXME: Is there a better heuristic for this?
|
||||
*/
|
||||
if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP))
|
||||
defsize = CIFS_DEFAULT_IOSIZE;
|
||||
else if (server->capabilities & CAP_LARGE_READ_X)
|
||||
defsize = CIFS_DEFAULT_NON_POSIX_RSIZE;
|
||||
else if (server->maxBuf >= PAGE_CACHE_SIZE)
|
||||
defsize = CIFSMaxBufSize;
|
||||
else
|
||||
defsize = server->maxBuf - sizeof(READ_RSP);
|
||||
|
||||
rsize = pvolume_info->rsize ? pvolume_info->rsize : defsize;
|
||||
|
||||
/*
|
||||
* no CAP_LARGE_READ_X? Then MS-CIFS states that we must limit this to
|
||||
* the client's MaxBufferSize.
|
||||
*/
|
||||
if (!(server->capabilities & CAP_LARGE_READ_X))
|
||||
rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);
|
||||
|
||||
/* hard limit of CIFS_MAX_RSIZE */
|
||||
rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);
|
||||
|
||||
return rsize;
|
||||
}
|
||||
|
||||
static int
|
||||
is_path_accessible(int xid, struct cifs_tcon *tcon,
|
||||
struct cifs_sb_info *cifs_sb, const char *full_path)
|
||||
@ -3041,6 +3182,22 @@ cifs_get_volume_info(char *mount_data, const char *devname)
|
||||
return volume_info;
|
||||
}
|
||||
|
||||
/* make sure ra_pages is a multiple of rsize */
|
||||
static inline unsigned int
|
||||
cifs_ra_pages(struct cifs_sb_info *cifs_sb)
|
||||
{
|
||||
unsigned int reads;
|
||||
unsigned int rsize_pages = cifs_sb->rsize / PAGE_CACHE_SIZE;
|
||||
|
||||
if (rsize_pages >= default_backing_dev_info.ra_pages)
|
||||
return default_backing_dev_info.ra_pages;
|
||||
else if (rsize_pages == 0)
|
||||
return rsize_pages;
|
||||
|
||||
reads = default_backing_dev_info.ra_pages / rsize_pages;
|
||||
return reads * rsize_pages;
|
||||
}
|
||||
|
||||
int
|
||||
cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
|
||||
{
|
||||
@ -3059,8 +3216,6 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
|
||||
|
||||
#ifdef CONFIG_CIFS_DFS_UPCALL
|
||||
try_mount_again:
|
||||
/* cleanup activities if we're chasing a referral */
|
||||
@ -3125,15 +3280,11 @@ try_mount_again:
|
||||
CIFSSMBQFSAttributeInfo(xid, tcon);
|
||||
}
|
||||
|
||||
if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) {
|
||||
cifs_sb->rsize = 1024 * 127;
|
||||
cFYI(DBG2, "no very large read support, rsize now 127K");
|
||||
}
|
||||
if (!(tcon->ses->capabilities & CAP_LARGE_READ_X))
|
||||
cifs_sb->rsize = min(cifs_sb->rsize,
|
||||
(tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE));
|
||||
|
||||
cifs_sb->wsize = cifs_negotiate_wsize(tcon, volume_info);
|
||||
cifs_sb->rsize = cifs_negotiate_rsize(tcon, volume_info);
|
||||
|
||||
/* tune readahead according to rsize */
|
||||
cifs_sb->bdi.ra_pages = cifs_ra_pages(cifs_sb);
|
||||
|
||||
remote_path_check:
|
||||
#ifdef CONFIG_CIFS_DFS_UPCALL
|
||||
|
@ -171,7 +171,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
|
||||
}
|
||||
tcon = tlink_tcon(tlink);
|
||||
|
||||
if (oplockEnabled)
|
||||
if (enable_oplocks)
|
||||
oplock = REQ_OPLOCK;
|
||||
|
||||
if (nd)
|
||||
@ -244,6 +244,9 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
|
||||
if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
|
||||
create_options |= CREATE_OPTION_READONLY;
|
||||
|
||||
if (backup_cred(cifs_sb))
|
||||
create_options |= CREATE_OPEN_BACKUP_INTENT;
|
||||
|
||||
if (tcon->ses->capabilities & CAP_NT_SMBS)
|
||||
rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
|
||||
desiredAccess, create_options,
|
||||
@ -357,6 +360,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
|
||||
{
|
||||
int rc = -EPERM;
|
||||
int xid;
|
||||
int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
|
||||
struct cifs_sb_info *cifs_sb;
|
||||
struct tcon_link *tlink;
|
||||
struct cifs_tcon *pTcon;
|
||||
@ -431,9 +435,11 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* FIXME: would WRITE_OWNER | WRITE_DAC be better? */
|
||||
if (backup_cred(cifs_sb))
|
||||
create_options |= CREATE_OPEN_BACKUP_INTENT;
|
||||
|
||||
rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_CREATE,
|
||||
GENERIC_WRITE, CREATE_NOT_DIR | CREATE_OPTION_SPECIAL,
|
||||
GENERIC_WRITE, create_options,
|
||||
&fileHandle, &oplock, buf, cifs_sb->local_nls,
|
||||
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
|
||||
if (rc)
|
||||
@ -642,8 +648,16 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
|
||||
if (direntry->d_inode) {
|
||||
if (cifs_revalidate_dentry(direntry))
|
||||
return 0;
|
||||
else
|
||||
else {
|
||||
/*
|
||||
* Forcibly invalidate automounting directory inodes
|
||||
* (remote DFS directories) so to have them
|
||||
* instantiated again for automount
|
||||
*/
|
||||
if (IS_AUTOMOUNT(direntry->d_inode))
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -45,7 +45,7 @@
|
||||
#include "cifs_debug.h"
|
||||
#include "cifsfs.h"
|
||||
|
||||
#ifdef CIFS_NFSD_EXPORT
|
||||
#ifdef CONFIG_CIFS_NFSD_EXPORT
|
||||
static struct dentry *cifs_get_parent(struct dentry *dentry)
|
||||
{
|
||||
/* BB need to add code here eventually to enable export via NFSD */
|
||||
@ -63,5 +63,5 @@ const struct export_operations cifs_export_ops = {
|
||||
.encode_fs = */
|
||||
};
|
||||
|
||||
#endif /* CIFS_NFSD_EXPORT */
|
||||
#endif /* CONFIG_CIFS_NFSD_EXPORT */
|
||||
|
||||
|
1164
fs/cifs/file.c
1164
fs/cifs/file.c
File diff suppressed because it is too large
Load Diff
@ -562,7 +562,16 @@ int cifs_get_file_info(struct file *filp)
|
||||
|
||||
xid = GetXid();
|
||||
rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data);
|
||||
if (rc == -EOPNOTSUPP || rc == -EINVAL) {
|
||||
switch (rc) {
|
||||
case 0:
|
||||
cifs_all_info_to_fattr(&fattr, &find_data, cifs_sb, false);
|
||||
break;
|
||||
case -EREMOTE:
|
||||
cifs_create_dfs_fattr(&fattr, inode->i_sb);
|
||||
rc = 0;
|
||||
break;
|
||||
case -EOPNOTSUPP:
|
||||
case -EINVAL:
|
||||
/*
|
||||
* FIXME: legacy server -- fall back to path-based call?
|
||||
* for now, just skip revalidating and mark inode for
|
||||
@ -570,18 +579,14 @@ int cifs_get_file_info(struct file *filp)
|
||||
*/
|
||||
rc = 0;
|
||||
CIFS_I(inode)->time = 0;
|
||||
default:
|
||||
goto cgfi_exit;
|
||||
} else if (rc == -EREMOTE) {
|
||||
cifs_create_dfs_fattr(&fattr, inode->i_sb);
|
||||
rc = 0;
|
||||
} else if (rc)
|
||||
goto cgfi_exit;
|
||||
}
|
||||
|
||||
/*
|
||||
* don't bother with SFU junk here -- just mark inode as needing
|
||||
* revalidation.
|
||||
*/
|
||||
cifs_all_info_to_fattr(&fattr, &find_data, cifs_sb, false);
|
||||
fattr.cf_uniqueid = CIFS_I(inode)->uniqueid;
|
||||
fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
|
||||
cifs_fattr_to_inode(inode, &fattr);
|
||||
@ -2096,6 +2101,8 @@ static int
|
||||
cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
|
||||
{
|
||||
int xid;
|
||||
uid_t uid = NO_CHANGE_32;
|
||||
gid_t gid = NO_CHANGE_32;
|
||||
struct inode *inode = direntry->d_inode;
|
||||
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
|
||||
struct cifsInodeInfo *cifsInode = CIFS_I(inode);
|
||||
@ -2146,13 +2153,25 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
|
||||
goto cifs_setattr_exit;
|
||||
}
|
||||
|
||||
/*
|
||||
* Without unix extensions we can't send ownership changes to the
|
||||
* server, so silently ignore them. This is consistent with how
|
||||
* local DOS/Windows filesystems behave (VFAT, NTFS, etc). With
|
||||
* CIFSACL support + proper Windows to Unix idmapping, we may be
|
||||
* able to support this in the future.
|
||||
*/
|
||||
if (attrs->ia_valid & ATTR_UID)
|
||||
uid = attrs->ia_uid;
|
||||
|
||||
if (attrs->ia_valid & ATTR_GID)
|
||||
gid = attrs->ia_gid;
|
||||
|
||||
#ifdef CONFIG_CIFS_ACL
|
||||
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
|
||||
if (uid != NO_CHANGE_32 || gid != NO_CHANGE_32) {
|
||||
rc = id_mode_to_cifs_acl(inode, full_path, NO_CHANGE_64,
|
||||
uid, gid);
|
||||
if (rc) {
|
||||
cFYI(1, "%s: Setting id failed with error: %d",
|
||||
__func__, rc);
|
||||
goto cifs_setattr_exit;
|
||||
}
|
||||
}
|
||||
} else
|
||||
#endif /* CONFIG_CIFS_ACL */
|
||||
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID))
|
||||
attrs->ia_valid &= ~(ATTR_UID | ATTR_GID);
|
||||
|
||||
@ -2161,15 +2180,12 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
|
||||
attrs->ia_valid &= ~ATTR_MODE;
|
||||
|
||||
if (attrs->ia_valid & ATTR_MODE) {
|
||||
cFYI(1, "Mode changed to 0%o", attrs->ia_mode);
|
||||
mode = attrs->ia_mode;
|
||||
}
|
||||
|
||||
if (attrs->ia_valid & ATTR_MODE) {
|
||||
rc = 0;
|
||||
#ifdef CONFIG_CIFS_ACL
|
||||
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
|
||||
rc = mode_to_cifs_acl(inode, full_path, mode);
|
||||
rc = id_mode_to_cifs_acl(inode, full_path, mode,
|
||||
NO_CHANGE_32, NO_CHANGE_32);
|
||||
if (rc) {
|
||||
cFYI(1, "%s: Setting ACL failed with error: %d",
|
||||
__func__, rc);
|
||||
|
@ -183,14 +183,20 @@ CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str)
|
||||
static int
|
||||
CIFSCreateMFSymLink(const int xid, struct cifs_tcon *tcon,
|
||||
const char *fromName, const char *toName,
|
||||
const struct nls_table *nls_codepage, int remap)
|
||||
struct cifs_sb_info *cifs_sb)
|
||||
{
|
||||
int rc;
|
||||
int oplock = 0;
|
||||
int remap;
|
||||
int create_options = CREATE_NOT_DIR;
|
||||
__u16 netfid = 0;
|
||||
u8 *buf;
|
||||
unsigned int bytes_written = 0;
|
||||
struct cifs_io_parms io_parms;
|
||||
struct nls_table *nls_codepage;
|
||||
|
||||
nls_codepage = cifs_sb->local_nls;
|
||||
remap = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR;
|
||||
|
||||
buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
|
||||
if (!buf)
|
||||
@ -202,8 +208,11 @@ CIFSCreateMFSymLink(const int xid, struct cifs_tcon *tcon,
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (backup_cred(cifs_sb))
|
||||
create_options |= CREATE_OPEN_BACKUP_INTENT;
|
||||
|
||||
rc = CIFSSMBOpen(xid, tcon, fromName, FILE_CREATE, GENERIC_WRITE,
|
||||
CREATE_NOT_DIR, &netfid, &oplock, NULL,
|
||||
create_options, &netfid, &oplock, NULL,
|
||||
nls_codepage, remap);
|
||||
if (rc != 0) {
|
||||
kfree(buf);
|
||||
@ -559,9 +568,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
|
||||
/* BB what if DFS and this volume is on different share? BB */
|
||||
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
|
||||
rc = CIFSCreateMFSymLink(xid, pTcon, full_path, symname,
|
||||
cifs_sb->local_nls,
|
||||
cifs_sb->mnt_cifs_flags &
|
||||
CIFS_MOUNT_MAP_SPECIAL_CHR);
|
||||
cifs_sb);
|
||||
else if (pTcon->unix_ext)
|
||||
rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
|
||||
cifs_sb->local_nls);
|
||||
|
@ -420,19 +420,22 @@ check_smb_hdr(struct smb_hdr *smb, __u16 mid)
|
||||
}
|
||||
|
||||
int
|
||||
checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
|
||||
checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int total_read)
|
||||
{
|
||||
__u32 len = be32_to_cpu(smb->smb_buf_length);
|
||||
__u32 rfclen = be32_to_cpu(smb->smb_buf_length);
|
||||
__u32 clc_len; /* calculated length */
|
||||
cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len);
|
||||
cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x",
|
||||
total_read, rfclen);
|
||||
|
||||
if (length < 2 + sizeof(struct smb_hdr)) {
|
||||
if ((length >= sizeof(struct smb_hdr) - 1)
|
||||
/* is this frame too small to even get to a BCC? */
|
||||
if (total_read < 2 + sizeof(struct smb_hdr)) {
|
||||
if ((total_read >= sizeof(struct smb_hdr) - 1)
|
||||
&& (smb->Status.CifsError != 0)) {
|
||||
/* it's an error return */
|
||||
smb->WordCount = 0;
|
||||
/* some error cases do not return wct and bcc */
|
||||
return 0;
|
||||
} else if ((length == sizeof(struct smb_hdr) + 1) &&
|
||||
} else if ((total_read == sizeof(struct smb_hdr) + 1) &&
|
||||
(smb->WordCount == 0)) {
|
||||
char *tmp = (char *)smb;
|
||||
/* Need to work around a bug in two servers here */
|
||||
@ -452,39 +455,35 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
|
||||
} else {
|
||||
cERROR(1, "Length less than smb header size");
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
|
||||
cERROR(1, "smb length greater than MaxBufSize, mid=%d",
|
||||
smb->Mid);
|
||||
return 1;
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* otherwise, there is enough to get to the BCC */
|
||||
if (check_smb_hdr(smb, mid))
|
||||
return 1;
|
||||
return -EIO;
|
||||
clc_len = smbCalcSize(smb);
|
||||
|
||||
if (4 + len != length) {
|
||||
if (4 + rfclen != total_read) {
|
||||
cERROR(1, "Length read does not match RFC1001 length %d",
|
||||
len);
|
||||
return 1;
|
||||
rfclen);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (4 + len != clc_len) {
|
||||
if (4 + rfclen != clc_len) {
|
||||
/* check if bcc wrapped around for large read responses */
|
||||
if ((len > 64 * 1024) && (len > clc_len)) {
|
||||
if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
|
||||
/* check if lengths match mod 64K */
|
||||
if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
|
||||
if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
|
||||
return 0; /* bcc wrapped */
|
||||
}
|
||||
cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u",
|
||||
clc_len, 4 + len, smb->Mid);
|
||||
clc_len, 4 + rfclen, smb->Mid);
|
||||
|
||||
if (4 + len < clc_len) {
|
||||
if (4 + rfclen < clc_len) {
|
||||
cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u",
|
||||
len, smb->Mid);
|
||||
return 1;
|
||||
} else if (len > clc_len + 512) {
|
||||
rfclen, smb->Mid);
|
||||
return -EIO;
|
||||
} else if (rfclen > clc_len + 512) {
|
||||
/*
|
||||
* Some servers (Windows XP in particular) send more
|
||||
* data than the lengths in the SMB packet would
|
||||
@ -495,8 +494,8 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
|
||||
* data to 512 bytes.
|
||||
*/
|
||||
cERROR(1, "RFC1001 size %u more than 512 bytes larger "
|
||||
"than SMB for mid=%u", len, smb->Mid);
|
||||
return 1;
|
||||
"than SMB for mid=%u", rfclen, smb->Mid);
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@ -676,3 +675,18 @@ void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
|
||||
cinode->clientCanCacheRead = false;
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
backup_cred(struct cifs_sb_info *cifs_sb)
|
||||
{
|
||||
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
|
||||
if (cifs_sb->mnt_backupuid == current_fsuid())
|
||||
return true;
|
||||
}
|
||||
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
|
||||
if (in_group_p(cifs_sb->mnt_backupgid))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
@ -124,7 +124,9 @@ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
|
||||
/* that we use in next few lines */
|
||||
/* Note that header is initialized to zero in header_assemble */
|
||||
pSMB->req.AndXCommand = 0xFF;
|
||||
pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
|
||||
pSMB->req.MaxBufferSize = cpu_to_le16(min_t(u32,
|
||||
CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4,
|
||||
USHRT_MAX));
|
||||
pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
|
||||
pSMB->req.VcNumber = get_next_vcnum(ses);
|
||||
|
||||
|
@ -265,91 +265,6 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16)
|
||||
return rc;
|
||||
}
|
||||
|
||||
#if 0 /* currently unused */
|
||||
/* Does both the NT and LM owfs of a user's password */
|
||||
static void
|
||||
nt_lm_owf_gen(char *pwd, unsigned char nt_p16[16], unsigned char p16[16])
|
||||
{
|
||||
char passwd[514];
|
||||
|
||||
memset(passwd, '\0', 514);
|
||||
if (strlen(pwd) < 513)
|
||||
strcpy(passwd, pwd);
|
||||
else
|
||||
memcpy(passwd, pwd, 512);
|
||||
/* Calculate the MD4 hash (NT compatible) of the password */
|
||||
memset(nt_p16, '\0', 16);
|
||||
E_md4hash(passwd, nt_p16);
|
||||
|
||||
/* Mangle the passwords into Lanman format */
|
||||
passwd[14] = '\0';
|
||||
/* strupper(passwd); */
|
||||
|
||||
/* Calculate the SMB (lanman) hash functions of the password */
|
||||
|
||||
memset(p16, '\0', 16);
|
||||
E_P16((unsigned char *) passwd, (unsigned char *) p16);
|
||||
|
||||
/* clear out local copy of user's password (just being paranoid). */
|
||||
memset(passwd, '\0', sizeof(passwd));
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Does the NTLMv2 owfs of a user's password */
|
||||
#if 0 /* function not needed yet - but will be soon */
|
||||
static void
|
||||
ntv2_owf_gen(const unsigned char owf[16], const char *user_n,
|
||||
const char *domain_n, unsigned char kr_buf[16],
|
||||
const struct nls_table *nls_codepage)
|
||||
{
|
||||
wchar_t *user_u;
|
||||
wchar_t *dom_u;
|
||||
int user_l, domain_l;
|
||||
struct HMACMD5Context ctx;
|
||||
|
||||
/* might as well do one alloc to hold both (user_u and dom_u) */
|
||||
user_u = kmalloc(2048 * sizeof(wchar_t), GFP_KERNEL);
|
||||
if (user_u == NULL)
|
||||
return;
|
||||
dom_u = user_u + 1024;
|
||||
|
||||
/* push_ucs2(NULL, user_u, user_n, (user_l+1)*2,
|
||||
STR_UNICODE|STR_NOALIGN|STR_TERMINATE|STR_UPPER);
|
||||
push_ucs2(NULL, dom_u, domain_n, (domain_l+1)*2,
|
||||
STR_UNICODE|STR_NOALIGN|STR_TERMINATE|STR_UPPER); */
|
||||
|
||||
/* BB user and domain may need to be uppercased */
|
||||
user_l = cifs_strtoUCS(user_u, user_n, 511, nls_codepage);
|
||||
domain_l = cifs_strtoUCS(dom_u, domain_n, 511, nls_codepage);
|
||||
|
||||
user_l++; /* trailing null */
|
||||
domain_l++;
|
||||
|
||||
hmac_md5_init_limK_to_64(owf, 16, &ctx);
|
||||
hmac_md5_update((const unsigned char *) user_u, user_l * 2, &ctx);
|
||||
hmac_md5_update((const unsigned char *) dom_u, domain_l * 2, &ctx);
|
||||
hmac_md5_final(kr_buf, &ctx);
|
||||
|
||||
kfree(user_u);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Does the des encryption from the FIRST 8 BYTES of the NT or LM MD4 hash. */
|
||||
#if 0 /* currently unused */
|
||||
static void
|
||||
NTLMSSPOWFencrypt(unsigned char passwd[8],
|
||||
unsigned char *ntlmchalresp, unsigned char p24[24])
|
||||
{
|
||||
unsigned char p21[21];
|
||||
|
||||
memset(p21, '\0', 21);
|
||||
memcpy(p21, passwd, 8);
|
||||
memset(p21 + 8, 0xbd, 8);
|
||||
|
||||
E_P24(p21, ntlmchalresp, p24);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Does the NT MD4 hash then des encryption. */
|
||||
int
|
||||
SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24)
|
||||
@ -369,39 +284,3 @@ SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24)
|
||||
rc = E_P24(p21, c8, p24);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
||||
/* Does the md5 encryption from the NT hash for NTLMv2. */
|
||||
/* These routines will be needed later */
|
||||
#if 0
|
||||
static void
|
||||
SMBOWFencrypt_ntv2(const unsigned char kr[16],
|
||||
const struct data_blob *srv_chal,
|
||||
const struct data_blob *cli_chal, unsigned char resp_buf[16])
|
||||
{
|
||||
struct HMACMD5Context ctx;
|
||||
|
||||
hmac_md5_init_limK_to_64(kr, 16, &ctx);
|
||||
hmac_md5_update(srv_chal->data, srv_chal->length, &ctx);
|
||||
hmac_md5_update(cli_chal->data, cli_chal->length, &ctx);
|
||||
hmac_md5_final(resp_buf, &ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
SMBsesskeygen_ntv2(const unsigned char kr[16],
|
||||
const unsigned char *nt_resp, __u8 sess_key[16])
|
||||
{
|
||||
struct HMACMD5Context ctx;
|
||||
|
||||
hmac_md5_init_limK_to_64(kr, 16, &ctx);
|
||||
hmac_md5_update(nt_resp, 16, &ctx);
|
||||
hmac_md5_final((unsigned char *) sess_key, &ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
SMBsesskeygen_ntv1(const unsigned char kr[16],
|
||||
const unsigned char *nt_resp, __u8 sess_key[16])
|
||||
{
|
||||
mdfour((unsigned char *) sess_key, (unsigned char *) kr, 16);
|
||||
}
|
||||
#endif
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include <linux/wait.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/freezer.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/processor.h>
|
||||
#include <linux/mempool.h>
|
||||
@ -324,7 +325,7 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
|
||||
{
|
||||
int error;
|
||||
|
||||
error = wait_event_killable(server->response_q,
|
||||
error = wait_event_freezekillable(server->response_q,
|
||||
midQ->midState != MID_REQUEST_SUBMITTED);
|
||||
if (error < 0)
|
||||
return -ERESTARTSYS;
|
||||
@ -339,8 +340,8 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
|
||||
*/
|
||||
int
|
||||
cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
|
||||
unsigned int nvec, mid_callback_t *callback, void *cbdata,
|
||||
bool ignore_pend)
|
||||
unsigned int nvec, mid_receive_t *receive,
|
||||
mid_callback_t *callback, void *cbdata, bool ignore_pend)
|
||||
{
|
||||
int rc;
|
||||
struct mid_q_entry *mid;
|
||||
@ -374,6 +375,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
mid->receive = receive;
|
||||
mid->callback = callback;
|
||||
mid->callback_data = cbdata;
|
||||
mid->midState = MID_REQUEST_SUBMITTED;
|
||||
@ -496,13 +498,18 @@ int
|
||||
cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
|
||||
bool log_error)
|
||||
{
|
||||
dump_smb(mid->resp_buf,
|
||||
min_t(u32, 92, be32_to_cpu(mid->resp_buf->smb_buf_length)));
|
||||
unsigned int len = be32_to_cpu(mid->resp_buf->smb_buf_length) + 4;
|
||||
|
||||
dump_smb(mid->resp_buf, min_t(u32, 92, len));
|
||||
|
||||
/* convert the length into a more usable form */
|
||||
if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
|
||||
struct kvec iov;
|
||||
|
||||
iov.iov_base = mid->resp_buf;
|
||||
iov.iov_len = len;
|
||||
/* FIXME: add code to kill session */
|
||||
if (cifs_verify_signature(mid->resp_buf, server,
|
||||
if (cifs_verify_signature(&iov, 1, server,
|
||||
mid->sequence_number + 1) != 0)
|
||||
cERROR(1, "Unexpected SMB signature");
|
||||
}
|
||||
|
@ -173,7 +173,7 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
|
||||
#ifdef CONFIG_CIFS_ACL
|
||||
memcpy(pacl, ea_value, value_size);
|
||||
rc = set_cifs_acl(pacl, value_size,
|
||||
direntry->d_inode, full_path);
|
||||
direntry->d_inode, full_path, CIFS_ACL_DACL);
|
||||
if (rc == 0) /* force revalidate of the inode */
|
||||
CIFS_I(direntry->d_inode)->time = 0;
|
||||
kfree(pacl);
|
||||
|
@ -135,10 +135,25 @@ static inline void set_freezable_with_signal(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Freezer-friendly wrappers around wait_event_interruptible() and
|
||||
* wait_event_interruptible_timeout(), originally defined in <linux/wait.h>
|
||||
* Freezer-friendly wrappers around wait_event_interruptible(),
|
||||
* wait_event_killable() and wait_event_interruptible_timeout(), originally
|
||||
* defined in <linux/wait.h>
|
||||
*/
|
||||
|
||||
#define wait_event_freezekillable(wq, condition) \
|
||||
({ \
|
||||
int __retval; \
|
||||
do { \
|
||||
__retval = wait_event_killable(wq, \
|
||||
(condition) || freezing(current)); \
|
||||
if (__retval && !freezing(current)) \
|
||||
break; \
|
||||
else if (!(condition)) \
|
||||
__retval = -ERESTARTSYS; \
|
||||
} while (try_to_freeze()); \
|
||||
__retval; \
|
||||
})
|
||||
|
||||
#define wait_event_freezable(wq, condition) \
|
||||
({ \
|
||||
int __retval; \
|
||||
@ -190,6 +205,9 @@ static inline void set_freezable_with_signal(void) {}
|
||||
#define wait_event_freezable_timeout(wq, condition, timeout) \
|
||||
wait_event_interruptible_timeout(wq, condition, timeout)
|
||||
|
||||
#define wait_event_freezekillable(wq, condition) \
|
||||
wait_event_killable(wq, condition)
|
||||
|
||||
#endif /* !CONFIG_FREEZER */
|
||||
|
||||
#endif /* FREEZER_H_INCLUDED */
|
||||
|
Loading…
Reference in New Issue
Block a user