Commit dabcbb1b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch '3.2-without-smb2' of git://git.samba.org/sfrench/cifs-2.6

* '3.2-without-smb2' of git://git.samba.org/sfrench/cifs-2.6: (52 commits)
  Fix build break when freezer not configured
  Add definition for share encryption
  CIFS: Make cifs_push_locks send as many locks at once as possible
  CIFS: Send as many mandatory unlock ranges at once as possible
  CIFS: Implement caching mechanism for posix brlocks
  CIFS: Implement caching mechanism for mandatory brlocks
  CIFS: Fix DFS handling in cifs_get_file_info
  CIFS: Fix error handling in cifs_readv_complete
  [CIFS] Fixup trivial checkpatch warning
  [CIFS] Show nostrictsync and noperm mount options in /proc/mounts
  cifs, freezer: add wait_event_freezekillable and have cifs use it
  cifs: allow cifs_max_pending to be readable under /sys/module/cifs/parameters
  cifs: tune bdi.ra_pages in accordance with the rsize
  cifs: allow for larger rsize= options and change defaults
  cifs: convert cifs_readpages to use async reads
  cifs: add cifs_async_readv
  cifs: fix protocol definition for READ_RSP
  cifs: add a callback function to receive the rest of the frame
  cifs: break out 3rd receive phase into separate function
  cifs: find mid earlier in receive codepath
  ...
parents 5619a693 e0c8ea1a
...@@ -745,4 +745,18 @@ installed and something like the following lines should be added to the ...@@ -745,4 +745,18 @@ installed and something like the following lines should be added to the
create cifs.spnego * * /usr/local/sbin/cifs.upcall %k create cifs.spnego * * /usr/local/sbin/cifs.upcall %k
create dns_resolver * * /usr/local/sbin/cifs.upcall %k create dns_resolver * * /usr/local/sbin/cifs.upcall %k
CIFS kernel module parameters
=============================
These module parameters can be specified or modified either during the time of
module loading or during the runtime by using the interface
/proc/module/cifs/parameters/<param>
i.e. echo "value" > /sys/module/cifs/parameters/<param>
1. echo_retries - The number of echo attempts before giving up and
reconnecting to the server. The default is 5. The value 0
means never reconnect.
2. enable_oplocks - Enable or disable oplocks. Oplocks are enabled by default.
[Y/y/1]. To disable use any of [N/n/0].
...@@ -511,7 +511,7 @@ static const struct file_operations cifsFYI_proc_fops = { ...@@ -511,7 +511,7 @@ static const struct file_operations cifsFYI_proc_fops = {
static int cifs_oplock_proc_show(struct seq_file *m, void *v) static int cifs_oplock_proc_show(struct seq_file *m, void *v)
{ {
seq_printf(m, "%d\n", oplockEnabled); seq_printf(m, "%d\n", enable_oplocks);
return 0; return 0;
} }
...@@ -526,13 +526,16 @@ static ssize_t cifs_oplock_proc_write(struct file *file, ...@@ -526,13 +526,16 @@ static ssize_t cifs_oplock_proc_write(struct file *file,
char c; char c;
int rc; int rc;
printk(KERN_WARNING "CIFS: The /proc/fs/cifs/OplockEnabled interface "
"will be removed in kernel version 3.4. Please migrate to "
"using the 'enable_oplocks' module parameter in cifs.ko.\n");
rc = get_user(c, buffer); rc = get_user(c, buffer);
if (rc) if (rc)
return rc; return rc;
if (c == '0' || c == 'n' || c == 'N') if (c == '0' || c == 'n' || c == 'N')
oplockEnabled = 0; enable_oplocks = false;
else if (c == '1' || c == 'y' || c == 'Y') else if (c == '1' || c == 'y' || c == 'Y')
oplockEnabled = 1; enable_oplocks = true;
return count; return count;
} }
......
...@@ -43,6 +43,8 @@ ...@@ -43,6 +43,8 @@
#define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */ #define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */
#define CIFS_MOUNT_RWPIDFORWARD 0x80000 /* use pid forwarding for rw */ #define CIFS_MOUNT_RWPIDFORWARD 0x80000 /* use pid forwarding for rw */
#define CIFS_MOUNT_POSIXACL 0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */ #define CIFS_MOUNT_POSIXACL 0x100000 /* mirror of MS_POSIXACL in mnt_cifs_flags */
#define CIFS_MOUNT_CIFS_BACKUPUID 0x200000 /* backup intent bit for a user */
#define CIFS_MOUNT_CIFS_BACKUPGID 0x400000 /* backup intent bit for a group */
struct cifs_sb_info { struct cifs_sb_info {
struct rb_root tlink_tree; struct rb_root tlink_tree;
...@@ -55,6 +57,8 @@ struct cifs_sb_info { ...@@ -55,6 +57,8 @@ struct cifs_sb_info {
atomic_t active; atomic_t active;
uid_t mnt_uid; uid_t mnt_uid;
gid_t mnt_gid; gid_t mnt_gid;
uid_t mnt_backupuid;
gid_t mnt_backupgid;
mode_t mnt_file_mode; mode_t mnt_file_mode;
mode_t mnt_dir_mode; mode_t mnt_dir_mode;
unsigned int mnt_cifs_flags; unsigned int mnt_cifs_flags;
......
...@@ -91,9 +91,76 @@ cifs_idmap_shrinker(struct shrinker *shrink, struct shrink_control *sc) ...@@ -91,9 +91,76 @@ cifs_idmap_shrinker(struct shrinker *shrink, struct shrink_control *sc)
shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del); shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
spin_unlock(&sidgidlock); spin_unlock(&sidgidlock);
root = &siduidtree;
spin_lock(&uidsidlock);
shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
spin_unlock(&uidsidlock);
root = &sidgidtree;
spin_lock(&gidsidlock);
shrink_idmap_tree(root, nr_to_scan, &nr_rem, &nr_del);
spin_unlock(&gidsidlock);
return nr_rem; return nr_rem;
} }
static void
sid_rb_insert(struct rb_root *root, unsigned long cid,
struct cifs_sid_id **psidid, char *typestr)
{
char *strptr;
struct rb_node *node = root->rb_node;
struct rb_node *parent = NULL;
struct rb_node **linkto = &(root->rb_node);
struct cifs_sid_id *lsidid;
while (node) {
lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
parent = node;
if (cid > lsidid->id) {
linkto = &(node->rb_left);
node = node->rb_left;
}
if (cid < lsidid->id) {
linkto = &(node->rb_right);
node = node->rb_right;
}
}
(*psidid)->id = cid;
(*psidid)->time = jiffies - (SID_MAP_RETRY + 1);
(*psidid)->refcount = 0;
sprintf((*psidid)->sidstr, "%s", typestr);
strptr = (*psidid)->sidstr + strlen((*psidid)->sidstr);
sprintf(strptr, "%ld", cid);
clear_bit(SID_ID_PENDING, &(*psidid)->state);
clear_bit(SID_ID_MAPPED, &(*psidid)->state);
rb_link_node(&(*psidid)->rbnode, parent, linkto);
rb_insert_color(&(*psidid)->rbnode, root);
}
static struct cifs_sid_id *
sid_rb_search(struct rb_root *root, unsigned long cid)
{
struct rb_node *node = root->rb_node;
struct cifs_sid_id *lsidid;
while (node) {
lsidid = rb_entry(node, struct cifs_sid_id, rbnode);
if (cid > lsidid->id)
node = node->rb_left;
else if (cid < lsidid->id)
node = node->rb_right;
else /* node found */
return lsidid;
}
return NULL;
}
static struct shrinker cifs_shrinker = { static struct shrinker cifs_shrinker = {
.shrink = cifs_idmap_shrinker, .shrink = cifs_idmap_shrinker,
.seeks = DEFAULT_SEEKS, .seeks = DEFAULT_SEEKS,
...@@ -110,6 +177,7 @@ cifs_idmap_key_instantiate(struct key *key, const void *data, size_t datalen) ...@@ -110,6 +177,7 @@ cifs_idmap_key_instantiate(struct key *key, const void *data, size_t datalen)
memcpy(payload, data, datalen); memcpy(payload, data, datalen);
key->payload.data = payload; key->payload.data = payload;
key->datalen = datalen;
return 0; return 0;
} }
...@@ -223,6 +291,120 @@ sidid_pending_wait(void *unused) ...@@ -223,6 +291,120 @@ sidid_pending_wait(void *unused)
return signal_pending(current) ? -ERESTARTSYS : 0; return signal_pending(current) ? -ERESTARTSYS : 0;
} }
static int
id_to_sid(unsigned long cid, uint sidtype, struct cifs_sid *ssid)
{
int rc = 0;
struct key *sidkey;
const struct cred *saved_cred;
struct cifs_sid *lsid;
struct cifs_sid_id *psidid, *npsidid;
struct rb_root *cidtree;
spinlock_t *cidlock;
if (sidtype == SIDOWNER) {
cidlock = &siduidlock;
cidtree = &uidtree;
} else if (sidtype == SIDGROUP) {
cidlock = &sidgidlock;
cidtree = &gidtree;
} else
return -EINVAL;
spin_lock(cidlock);
psidid = sid_rb_search(cidtree, cid);
if (!psidid) { /* node does not exist, allocate one & attempt adding */
spin_unlock(cidlock);
npsidid = kzalloc(sizeof(struct cifs_sid_id), GFP_KERNEL);
if (!npsidid)
return -ENOMEM;
npsidid->sidstr = kmalloc(SIDLEN, GFP_KERNEL);
if (!npsidid->sidstr) {
kfree(npsidid);
return -ENOMEM;
}
spin_lock(cidlock);
psidid = sid_rb_search(cidtree, cid);
if (psidid) { /* node happened to get inserted meanwhile */
++psidid->refcount;
spin_unlock(cidlock);
kfree(npsidid->sidstr);
kfree(npsidid);
} else {
psidid = npsidid;
sid_rb_insert(cidtree, cid, &psidid,
sidtype == SIDOWNER ? "oi:" : "gi:");
++psidid->refcount;
spin_unlock(cidlock);
}
} else {
++psidid->refcount;
spin_unlock(cidlock);
}
/*
* If we are here, it is safe to access psidid and its fields
* since a reference was taken earlier while holding the spinlock.
* A reference on the node is put without holding the spinlock
* and it is OK to do so in this case, shrinker will not erase
* this node until all references are put and we do not access
* any fields of the node after a reference is put .
*/
if (test_bit(SID_ID_MAPPED, &psidid->state)) {
memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
psidid->time = jiffies; /* update ts for accessing */
goto id_sid_out;
}
if (time_after(psidid->time + SID_MAP_RETRY, jiffies)) {
rc = -EINVAL;
goto id_sid_out;
}
if (!test_and_set_bit(SID_ID_PENDING, &psidid->state)) {
saved_cred = override_creds(root_cred);
sidkey = request_key(&cifs_idmap_key_type, psidid->sidstr, "");
if (IS_ERR(sidkey)) {
rc = -EINVAL;
cFYI(1, "%s: Can't map and id to a SID", __func__);
} else {
lsid = (struct cifs_sid *)sidkey->payload.data;
memcpy(&psidid->sid, lsid,
sidkey->datalen < sizeof(struct cifs_sid) ?
sidkey->datalen : sizeof(struct cifs_sid));
memcpy(ssid, &psidid->sid,
sidkey->datalen < sizeof(struct cifs_sid) ?
sidkey->datalen : sizeof(struct cifs_sid));
set_bit(SID_ID_MAPPED, &psidid->state);
key_put(sidkey);
kfree(psidid->sidstr);
}
psidid->time = jiffies; /* update ts for accessing */
revert_creds(saved_cred);
clear_bit(SID_ID_PENDING, &psidid->state);
wake_up_bit(&psidid->state, SID_ID_PENDING);
} else {
rc = wait_on_bit(&psidid->state, SID_ID_PENDING,
sidid_pending_wait, TASK_INTERRUPTIBLE);
if (rc) {
cFYI(1, "%s: sidid_pending_wait interrupted %d",
__func__, rc);
--psidid->refcount;
return rc;
}
if (test_bit(SID_ID_MAPPED, &psidid->state))
memcpy(ssid, &psidid->sid, sizeof(struct cifs_sid));
else
rc = -EINVAL;
}
id_sid_out:
--psidid->refcount;
return rc;
}
static int static int
sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid, sid_to_id(struct cifs_sb_info *cifs_sb, struct cifs_sid *psid,
struct cifs_fattr *fattr, uint sidtype) struct cifs_fattr *fattr, uint sidtype)
...@@ -383,6 +565,10 @@ init_cifs_idmap(void) ...@@ -383,6 +565,10 @@ init_cifs_idmap(void)
spin_lock_init(&sidgidlock); spin_lock_init(&sidgidlock);
gidtree = RB_ROOT; gidtree = RB_ROOT;
spin_lock_init(&uidsidlock);
siduidtree = RB_ROOT;
spin_lock_init(&gidsidlock);
sidgidtree = RB_ROOT;
register_shrinker(&cifs_shrinker); register_shrinker(&cifs_shrinker);
cFYI(1, "cifs idmap keyring: %d\n", key_serial(keyring)); cFYI(1, "cifs idmap keyring: %d\n", key_serial(keyring));
...@@ -422,6 +608,18 @@ cifs_destroy_idmaptrees(void) ...@@ -422,6 +608,18 @@ cifs_destroy_idmaptrees(void)
while ((node = rb_first(root))) while ((node = rb_first(root)))
rb_erase(node, root); rb_erase(node, root);
spin_unlock(&sidgidlock); spin_unlock(&sidgidlock);
root = &siduidtree;
spin_lock(&uidsidlock);
while ((node = rb_first(root)))
rb_erase(node, root);
spin_unlock(&uidsidlock);
root = &sidgidtree;
spin_lock(&gidsidlock);
while ((node = rb_first(root)))
rb_erase(node, root);
spin_unlock(&gidsidlock);
} }
/* if the two SIDs (roughly equivalent to a UUID for a user or group) are /* if the two SIDs (roughly equivalent to a UUID for a user or group) are
...@@ -868,52 +1066,82 @@ static int parse_sec_desc(struct cifs_sb_info *cifs_sb, ...@@ -868,52 +1066,82 @@ static int parse_sec_desc(struct cifs_sb_info *cifs_sb,
else else
cFYI(1, "no ACL"); /* BB grant all or default perms? */ cFYI(1, "no ACL"); /* BB grant all or default perms? */
/* cifscred->uid = owner_sid_ptr->rid;
cifscred->gid = group_sid_ptr->rid;
memcpy((void *)(&(cifscred->osid)), (void *)owner_sid_ptr,
sizeof(struct cifs_sid));
memcpy((void *)(&(cifscred->gsid)), (void *)group_sid_ptr,
sizeof(struct cifs_sid)); */
return rc; return rc;
} }
/* Convert permission bits from mode to equivalent CIFS ACL */ /* Convert permission bits from mode to equivalent CIFS ACL */
static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd, static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
struct inode *inode, __u64 nmode) __u32 secdesclen, __u64 nmode, uid_t uid, gid_t gid, int *aclflag)
{ {
int rc = 0; int rc = 0;
__u32 dacloffset; __u32 dacloffset;
__u32 ndacloffset; __u32 ndacloffset;
__u32 sidsoffset; __u32 sidsoffset;
struct cifs_sid *owner_sid_ptr, *group_sid_ptr; struct cifs_sid *owner_sid_ptr, *group_sid_ptr;
struct cifs_sid *nowner_sid_ptr, *ngroup_sid_ptr;
struct cifs_acl *dacl_ptr = NULL; /* no need for SACL ptr */ struct cifs_acl *dacl_ptr = NULL; /* no need for SACL ptr */
struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */ struct cifs_acl *ndacl_ptr = NULL; /* no need for SACL ptr */
if ((inode == NULL) || (pntsd == NULL) || (pnntsd == NULL)) if (nmode != NO_CHANGE_64) { /* chmod */
return -EIO;
owner_sid_ptr = (struct cifs_sid *)((char *)pntsd + owner_sid_ptr = (struct cifs_sid *)((char *)pntsd +
le32_to_cpu(pntsd->osidoffset)); le32_to_cpu(pntsd->osidoffset));
group_sid_ptr = (struct cifs_sid *)((char *)pntsd + group_sid_ptr = (struct cifs_sid *)((char *)pntsd +
le32_to_cpu(pntsd->gsidoffset)); le32_to_cpu(pntsd->gsidoffset));
dacloffset = le32_to_cpu(pntsd->dacloffset); dacloffset = le32_to_cpu(pntsd->dacloffset);
dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset); dacl_ptr = (struct cifs_acl *)((char *)pntsd + dacloffset);
ndacloffset = sizeof(struct cifs_ntsd); ndacloffset = sizeof(struct cifs_ntsd);
ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset); ndacl_ptr = (struct cifs_acl *)((char *)pnntsd + ndacloffset);
ndacl_ptr->revision = dacl_ptr->revision; ndacl_ptr->revision = dacl_ptr->revision;
ndacl_ptr->size = 0; ndacl_ptr->size = 0;
ndacl_ptr->num_aces = 0; ndacl_ptr->num_aces = 0;
rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr, nmode); rc = set_chmod_dacl(ndacl_ptr, owner_sid_ptr, group_sid_ptr,
nmode);
sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size); sidsoffset = ndacloffset + le16_to_cpu(ndacl_ptr->size);
/* copy sec desc control portion & owner and group sids */
/* copy security descriptor control portion and owner and group sid */
copy_sec_desc(pntsd, pnntsd, sidsoffset); copy_sec_desc(pntsd, pnntsd, sidsoffset);
*aclflag = CIFS_ACL_DACL;
} else {
memcpy(pnntsd, pntsd, secdesclen);
if (uid != NO_CHANGE_32) { /* chown */
owner_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
le32_to_cpu(pnntsd->osidoffset));
nowner_sid_ptr = kmalloc(sizeof(struct cifs_sid),
GFP_KERNEL);
if (!nowner_sid_ptr)
return -ENOMEM;
rc = id_to_sid(uid, SIDOWNER, nowner_sid_ptr);
if (rc) {
cFYI(1, "%s: Mapping error %d for owner id %d",
__func__, rc, uid);
kfree(nowner_sid_ptr);
return rc;
}
memcpy(owner_sid_ptr, nowner_sid_ptr,
sizeof(struct cifs_sid));
kfree(nowner_sid_ptr);
*aclflag = CIFS_ACL_OWNER;
}
if (gid != NO_CHANGE_32) { /* chgrp */
group_sid_ptr = (struct cifs_sid *)((char *)pnntsd +
le32_to_cpu(pnntsd->gsidoffset));
ngroup_sid_ptr = kmalloc(sizeof(struct cifs_sid),
GFP_KERNEL);
if (!ngroup_sid_ptr)
return -ENOMEM;
rc = id_to_sid(gid, SIDGROUP, ngroup_sid_ptr);
if (rc) {
cFYI(1, "%s: Mapping error %d for group id %d",
__func__, rc, gid);
kfree(ngroup_sid_ptr);
return rc;
}
memcpy(group_sid_ptr, ngroup_sid_ptr,
sizeof(struct cifs_sid));
kfree(ngroup_sid_ptr);
*aclflag = CIFS_ACL_GROUP;
}
}
return rc; return rc;
} }
...@@ -945,7 +1173,7 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, ...@@ -945,7 +1173,7 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
{ {
struct cifs_ntsd *pntsd = NULL; struct cifs_ntsd *pntsd = NULL;
int oplock = 0; int oplock = 0;
int xid, rc; int xid, rc, create_options = 0;
__u16 fid; __u16 fid;
struct cifs_tcon *tcon; struct cifs_tcon *tcon;
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
...@@ -956,8 +1184,11 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, ...@@ -956,8 +1184,11 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb,
tcon = tlink_tcon(tlink); tcon = tlink_tcon(tlink);
xid = GetXid(); xid = GetXid();
rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL, 0, if (backup_cred(cifs_sb))
&fid, &oplock, NULL, cifs_sb->local_nls, create_options |= CREATE_OPEN_BACKUP_INTENT;
rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, READ_CONTROL,
create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (!rc) { if (!rc) {
rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen); rc = CIFSSMBGetCIFSACL(xid, tcon, fid, &pntsd, pacllen);
...@@ -991,13 +1222,15 @@ struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb, ...@@ -991,13 +1222,15 @@ struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
return pntsd; return pntsd;
} }
static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path, /* Set an ACL on the server */
struct cifs_ntsd *pnntsd, u32 acllen) int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
struct inode *inode, const char *path, int aclflag)
{ {
int oplock = 0; int oplock = 0;
int xid, rc; int xid, rc, access_flags, create_options = 0;
__u16 fid; __u16 fid;
struct cifs_tcon *tcon; struct cifs_tcon *tcon;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink)) if (IS_ERR(tlink))
...@@ -1006,15 +1239,23 @@ static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path, ...@@ -1006,15 +1239,23 @@ static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
tcon = tlink_tcon(tlink); tcon = tlink_tcon(tlink);
xid = GetXid(); xid = GetXid();
rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, WRITE_DAC, 0, if (backup_cred(cifs_sb))
&fid, &oplock, NULL, cifs_sb->local_nls, create_options |= CREATE_OPEN_BACKUP_INTENT;
if (aclflag == CIFS_ACL_OWNER || aclflag == CIFS_ACL_GROUP)
access_flags = WRITE_OWNER;
else
access_flags = WRITE_DAC;
rc = CIFSSMBOpen(xid, tcon, path, FILE_OPEN, access_flags,
create_options, &fid, &oplock, NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) { if (rc) {
cERROR(1, "Unable to open file to set ACL"); cERROR(1, "Unable to open file to set ACL");
goto out; goto out;
} }
rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen); rc = CIFSSMBSetCIFSACL(xid, tcon, fid, pnntsd, acllen, aclflag);
cFYI(DBG2, "SetCIFSACL rc = %d", rc); cFYI(DBG2, "SetCIFSACL rc = %d", rc);
CIFSSMBClose(xid, tcon, fid); CIFSSMBClose(xid, tcon, fid);
...@@ -1024,17 +1265,6 @@ static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path, ...@@ -1024,17 +1265,6 @@ static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path,
return rc; return rc;
} }
/* Set an ACL on the server */
int set_cifs_acl(struct cifs_ntsd *pnntsd, __u32 acllen,
struct inode *inode, const char *path)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
cFYI(DBG2, "set ACL for %s from mode 0x%x", path, inode->i_mode);
return set_cifs_acl_by_path(cifs_sb, path, pnntsd, acllen);
}
/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */ /* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
int int
cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
...@@ -1066,9 +1296,12 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr, ...@@ -1066,9 +1296,12 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
} }
/* Convert mode bits to an ACL so we can update the ACL on the server */ /* Convert mode bits to an ACL so we can update the ACL on the server */
int mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode) int
id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
uid_t uid, gid_t gid)
{ {
int rc = 0; int rc = 0;
int aclflag = CIFS_ACL_DACL; /* default flag to set */
__u32 secdesclen = 0; __u32 secdesclen = 0;
struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */ struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */ struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
...@@ -1098,13 +1331,15 @@ int mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode) ...@@ -1098,13 +1331,15 @@ int mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode)
return -ENOMEM; return -ENOMEM;
} }
rc = build_sec_desc(pntsd, pnntsd, inode, nmode); rc = build_sec_desc(pntsd, pnntsd, secdesclen, nmode, uid, gid,
&aclflag);
cFYI(DBG2, "build_sec_desc rc: %d", rc); cFYI(DBG2, "build_sec_desc rc: %d", rc);
if (!rc) { if (!rc) {
/* Set the security descriptor */ /* Set the security descriptor */
rc = set_cifs_acl(pnntsd, secdesclen, inode, path); rc = set_cifs_acl(pnntsd, secdesclen, inode,
path, aclflag);
cFYI(DBG2, "set_cifs_acl rc: %d", rc); cFYI(DBG2, "set_cifs_acl rc: %d", rc);
} }
......
...@@ -37,82 +37,7 @@ ...@@ -37,82 +37,7 @@
* the sequence number before this function is called. Also, this function * the sequence number before this function is called. Also, this function
* should be called with the server->srv_mutex held. * should be called with the server->srv_mutex held.
*/ */
static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu, static int cifs_calc_signature(const struct kvec *iov, int n_vec,
struct TCP_Server_Info *server, char *signature)
{
int rc;
if (cifs_pdu == NULL || signature == NULL || server == NULL)
return -EINVAL;
if (!server->secmech.sdescmd5) {
cERROR(1, "%s: Can't generate signature\n", __func__);
return -1;
}
rc = crypto_shash_init(&server->secmech.sdescmd5->shash);
if (rc) {
cERROR(1, "%s: Could not init md5\n", __func__);
return rc;
}
rc = crypto_shash_update(&server->secmech.sdescmd5->shash,
server->session_key.response, server->session_key.len);
if (rc) {
cERROR(1, "%s: Could not update with response\n", __func__);
return rc;
}
rc = crypto_shash_update(&server->secmech.sdescmd5->shash,
cifs_pdu->Protocol, be32_to_cpu(cifs_pdu->smb_buf_length));
if (rc) {
cERROR(1, "%s: Could not update with payload\n", __func__);
return rc;
}
rc = crypto_shash_final(&server->secmech.sdescmd5->shash, signature);
if (rc)
cERROR(1, "%s: Could not generate md5 hash\n", __func__);
return rc;
}
/* must be called with server->srv_mutex held */
int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
__u32 *pexpected_response_sequence_number)
{
int rc = 0;
char smb_signature[20];
if ((cifs_pdu == NULL) || (server == NULL))
return -EINVAL;
if (!(cifs_pdu->Flags2 & SMBFLG2_SECURITY_SIGNATURE) ||
server->tcpStatus == CifsNeedNegotiate)
return rc;
if (!server->session_estab) {
strncpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);
return rc;
}
cifs_pdu->Signature.Sequence.SequenceNumber =
cpu_to_le32(server->sequence_number);
cifs_pdu->Signature.Sequence.Reserved = 0;
*pexpected_response_sequence_number = server->sequence_number++;
server->sequence_number++;
rc = cifs_calculate_signature(cifs_pdu, server, smb_signature);
if (rc)
memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
else
memcpy(cifs_pdu->Signature.SecuritySignature, smb_signature, 8);
return rc;
}
static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
struct TCP_Server_Info *server, char *signature) struct TCP_Server_Info *server, char *signature)
{ {
int i; int i;
...@@ -179,7 +104,7 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server, ...@@ -179,7 +104,7 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
{ {
int rc = 0; int rc = 0;
char smb_signature[20]; char smb_signature[20];
struct smb_hdr *cifs_pdu = iov[0].iov_base; struct smb_hdr *cifs_pdu = (struct smb_hdr *)iov[0].iov_base;
if ((cifs_pdu == NULL) || (server == NULL)) if ((cifs_pdu == NULL) || (server == NULL))
return -EINVAL; return -EINVAL;
...@@ -189,7 +114,7 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server, ...@@ -189,7 +114,7 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
return rc; return rc;
if (!server->session_estab) { if (!server->session_estab) {
strncpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8); memcpy(cifs_pdu->Signature.SecuritySignature, "BSRSPYL", 8);
return rc; return rc;
} }
...@@ -200,7 +125,7 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server, ...@@ -200,7 +125,7 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
*pexpected_response_sequence_number = server->sequence_number++; *pexpected_response_sequence_number = server->sequence_number++;
server->sequence_number++; server->sequence_number++;
rc = cifs_calc_signature2(iov, n_vec, server, smb_signature); rc = cifs_calc_signature(iov, n_vec, server, smb_signature);
if (rc) if (rc)
memset(cifs_pdu->Signature.SecuritySignature, 0, 8); memset(cifs_pdu->Signature.SecuritySignature, 0, 8);
else else
...@@ -209,13 +134,27 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server, ...@@ -209,13 +134,27 @@ int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *server,
return rc; return rc;
} }
int cifs_verify_signature(struct smb_hdr *cifs_pdu, /* must be called with server->srv_mutex held */
int cifs_sign_smb(struct smb_hdr *cifs_pdu, struct TCP_Server_Info *server,
__u32 *pexpected_response_sequence_number)
{
struct kvec iov;
iov.iov_base = cifs_pdu;
iov.iov_len = be32_to_cpu(cifs_pdu->smb_buf_length) + 4;
return cifs_sign_smb2(&iov, 1, server,
pexpected_response_sequence_number);
}
int cifs_verify_signature(struct kvec *iov, unsigned int nr_iov,
struct TCP_Server_Info *server, struct TCP_Server_Info *server,
__u32 expected_sequence_number) __u32 expected_sequence_number)
{ {
unsigned int rc; unsigned int rc;
char server_response_sig[8]; char server_response_sig[8];
char what_we_think_sig_should_be[20]; char what_we_think_sig_should_be[20];
struct smb_hdr *cifs_pdu = (struct smb_hdr *)iov[0].iov_base;
if (cifs_pdu == NULL || server == NULL) if (cifs_pdu == NULL || server == NULL)
return -EINVAL; return -EINVAL;
...@@ -247,7 +186,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu, ...@@ -247,7 +186,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu,
cifs_pdu->Signature.Sequence.Reserved = 0; cifs_pdu->Signature.Sequence.Reserved = 0;
mutex_lock(&server->srv_mutex); mutex_lock(&server->srv_mutex);
rc = cifs_calculate_signature(cifs_pdu, server, rc = cifs_calc_signature(iov, nr_iov, server,
what_we_think_sig_should_be); what_we_think_sig_should_be);
mutex_unlock(&server->srv_mutex); mutex_unlock(&server->srv_mutex);
......
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
int cifsFYI = 0; int cifsFYI = 0;
int cifsERROR = 1; int cifsERROR = 1;
int traceSMB = 0; int traceSMB = 0;
unsigned int oplockEnabled = 1; bool enable_oplocks = true;
unsigned int linuxExtEnabled = 1; unsigned int linuxExtEnabled = 1;
unsigned int lookupCacheEnabled = 1; unsigned int lookupCacheEnabled = 1;
unsigned int multiuser_mount = 0; unsigned int multiuser_mount = 0;
...@@ -74,7 +74,7 @@ module_param(cifs_min_small, int, 0); ...@@ -74,7 +74,7 @@ module_param(cifs_min_small, int, 0);
MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 " MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
"Range: 2 to 256"); "Range: 2 to 256");
unsigned int cifs_max_pending = CIFS_MAX_REQ; unsigned int cifs_max_pending = CIFS_MAX_REQ;
module_param(cifs_max_pending, int, 0); module_param(cifs_max_pending, int, 0444);
MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. " MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. "
"Default: 50 Range: 2 to 256"); "Default: 50 Range: 2 to 256");
unsigned short echo_retries = 5; unsigned short echo_retries = 5;
...@@ -82,6 +82,10 @@ module_param(echo_retries, ushort, 0644); ...@@ -82,6 +82,10 @@ module_param(echo_retries, ushort, 0644);
MODULE_PARM_DESC(echo_retries, "Number of echo attempts before giving up and " MODULE_PARM_DESC(echo_retries, "Number of echo attempts before giving up and "
"reconnecting server. Default: 5. 0 means " "reconnecting server. Default: 5. 0 means "
"never reconnect."); "never reconnect.");
module_param(enable_oplocks, bool, 0644);
MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks (bool). Default:"
"y/Y/1");
extern mempool_t *cifs_sm_req_poolp; extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp; extern mempool_t *cifs_req_poolp;
extern mempool_t *cifs_mid_poolp; extern mempool_t *cifs_mid_poolp;
...@@ -132,12 +136,12 @@ cifs_read_super(struct super_block *sb) ...@@ -132,12 +136,12 @@ cifs_read_super(struct super_block *sb)
else else
sb->s_d_op = &cifs_dentry_ops; sb->s_d_op = &cifs_dentry_ops;
#ifdef CIFS_NFSD_EXPORT #ifdef CONFIG_CIFS_NFSD_EXPORT
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) { if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
cFYI(1, "export ops supported"); cFYI(1, "export ops supported");
sb->s_export_op = &cifs_export_ops; sb->s_export_op = &cifs_export_ops;
} }
#endif /* CIFS_NFSD_EXPORT */ #endif /* CONFIG_CIFS_NFSD_EXPORT */
return 0; return 0;
...@@ -432,6 +436,12 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m) ...@@ -432,6 +436,12 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
seq_printf(s, ",mfsymlinks"); seq_printf(s, ",mfsymlinks");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE) if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
seq_printf(s, ",fsc"); seq_printf(s, ",fsc");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
seq_printf(s, ",nostrictsync");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
seq_printf(s, ",noperm");
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
seq_printf(s, ",strictcache");
seq_printf(s, ",rsize=%d", cifs_sb->rsize); seq_printf(s, ",rsize=%d", cifs_sb->rsize);
seq_printf(s, ",wsize=%d", cifs_sb->wsize); seq_printf(s, ",wsize=%d", cifs_sb->wsize);
...@@ -530,7 +540,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb) ...@@ -530,7 +540,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
char *full_path = NULL; char *full_path = NULL;
char *s, *p; char *s, *p;
char sep; char sep;
int xid;
full_path = cifs_build_path_to_root(vol, cifs_sb, full_path = cifs_build_path_to_root(vol, cifs_sb,
cifs_sb_master_tcon(cifs_sb)); cifs_sb_master_tcon(cifs_sb));
...@@ -539,7 +548,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb) ...@@ -539,7 +548,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
cFYI(1, "Get root dentry for %s", full_path); cFYI(1, "Get root dentry for %s", full_path);
xid = GetXid();
sep = CIFS_DIR_SEP(cifs_sb); sep = CIFS_DIR_SEP(cifs_sb);
dentry = dget(sb->s_root); dentry = dget(sb->s_root);
p = s = full_path; p = s = full_path;
...@@ -570,7 +578,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb) ...@@ -570,7 +578,6 @@ cifs_get_root(struct smb_vol *vol, struct super_block *sb)
dput(dentry); dput(dentry);
dentry = child; dentry = child;
} while (!IS_ERR(dentry)); } while (!IS_ERR(dentry));
_FreeXid(xid);
kfree(full_path); kfree(full_path);
return dentry; return dentry;
} }
...@@ -942,7 +949,8 @@ cifs_init_once(void *inode) ...@@ -942,7 +949,8 @@ cifs_init_once(void *inode)
struct cifsInodeInfo *cifsi = inode; struct cifsInodeInfo *cifsi = inode;
inode_init_once(&cifsi->vfs_inode); inode_init_once(&cifsi->vfs_inode);
INIT_LIST_HEAD(&cifsi->lockList); INIT_LIST_HEAD(&cifsi->llist);
mutex_init(&cifsi->lock_mutex);
} }
static int static int
......
...@@ -121,9 +121,9 @@ extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t); ...@@ -121,9 +121,9 @@ extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
extern ssize_t cifs_listxattr(struct dentry *, char *, size_t); extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
#ifdef CIFS_NFSD_EXPORT #ifdef CONFIG_CIFS_NFSD_EXPORT
extern const struct export_operations cifs_export_ops; extern const struct export_operations cifs_export_ops;
#endif /* CIFS_NFSD_EXPORT */ #endif /* CONFIG_CIFS_NFSD_EXPORT */
#define CIFS_VERSION "1.75" #define CIFS_VERSION "1.75"
#endif /* _CIFSFS_H */ #endif /* _CIFSFS_H */
...@@ -167,6 +167,8 @@ struct smb_vol { ...@@ -167,6 +167,8 @@ struct smb_vol {
uid_t cred_uid; uid_t cred_uid;
uid_t linux_uid; uid_t linux_uid;
gid_t linux_gid; gid_t linux_gid;
uid_t backupuid;
gid_t backupgid;
mode_t file_mode; mode_t file_mode;
mode_t dir_mode; mode_t dir_mode;
unsigned secFlg; unsigned secFlg;
...@@ -179,6 +181,8 @@ struct smb_vol { ...@@ -179,6 +181,8 @@ struct smb_vol {
bool noperm:1; bool noperm:1;
bool no_psx_acl:1; /* set if posix acl support should be disabled */ bool no_psx_acl:1; /* set if posix acl support should be disabled */
bool cifs_acl:1; bool cifs_acl:1;
bool backupuid_specified; /* mount option backupuid is specified */
bool backupgid_specified; /* mount option backupgid is specified */
bool no_xattr:1; /* set if xattr (EA) support should be disabled*/ bool no_xattr:1; /* set if xattr (EA) support should be disabled*/
bool server_ino:1; /* use inode numbers from server ie UniqueId */ bool server_ino:1; /* use inode numbers from server ie UniqueId */
bool direct_io:1; bool direct_io:1;
...@@ -219,7 +223,8 @@ struct smb_vol { ...@@ -219,7 +223,8 @@ struct smb_vol {
CIFS_MOUNT_OVERR_GID | CIFS_MOUNT_DYNPERM | \ CIFS_MOUNT_OVERR_GID | CIFS_MOUNT_DYNPERM | \
CIFS_MOUNT_NOPOSIXBRL | CIFS_MOUNT_NOSSYNC | \ CIFS_MOUNT_NOPOSIXBRL | CIFS_MOUNT_NOSSYNC | \
CIFS_MOUNT_FSCACHE | CIFS_MOUNT_MF_SYMLINKS | \ CIFS_MOUNT_FSCACHE | CIFS_MOUNT_MF_SYMLINKS | \
CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO) CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO | \
CIFS_MOUNT_CIFS_BACKUPUID | CIFS_MOUNT_CIFS_BACKUPGID)
#define CIFS_MS_MASK (MS_RDONLY | MS_MANDLOCK | MS_NOEXEC | MS_NOSUID | \ #define CIFS_MS_MASK (MS_RDONLY | MS_MANDLOCK | MS_NOEXEC | MS_NOSUID | \
MS_NODEV | MS_SYNCHRONOUS) MS_NODEV | MS_SYNCHRONOUS)
...@@ -286,7 +291,13 @@ struct TCP_Server_Info { ...@@ -286,7 +291,13 @@ struct TCP_Server_Info {
bool sec_kerberosu2u; /* supports U2U Kerberos */ bool sec_kerberosu2u; /* supports U2U Kerberos */
bool sec_kerberos; /* supports plain Kerberos */ bool sec_kerberos; /* supports plain Kerberos */
bool sec_mskerberos; /* supports legacy MS Kerberos */ bool sec_mskerberos; /* supports legacy MS Kerberos */
bool large_buf; /* is current buffer large? */
struct delayed_work echo; /* echo ping workqueue job */ struct delayed_work echo; /* echo ping workqueue job */
struct kvec *iov; /* reusable kvec array for receives */
unsigned int nr_iov; /* number of kvecs in array */
char *smallbuf; /* pointer to current "small" buffer */
char *bigbuf; /* pointer to current "big" buffer */
unsigned int total_read; /* total amount of data read in this pass */
#ifdef CONFIG_CIFS_FSCACHE #ifdef CONFIG_CIFS_FSCACHE
struct fscache_cookie *fscache; /* client index cache cookie */ struct fscache_cookie *fscache; /* client index cache cookie */
#endif #endif
...@@ -485,9 +496,13 @@ extern struct cifs_tcon *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb); ...@@ -485,9 +496,13 @@ extern struct cifs_tcon *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb);
*/ */
struct cifsLockInfo { struct cifsLockInfo {
struct list_head llist; /* pointer to next cifsLockInfo */ struct list_head llist; /* pointer to next cifsLockInfo */
struct list_head blist; /* pointer to locks blocked on this */
wait_queue_head_t block_q;
__u64 offset; __u64 offset;
__u64 length; __u64 length;
__u32 pid;
__u8 type; __u8 type;
__u16 netfid;
}; };
/* /*
...@@ -520,8 +535,6 @@ struct cifsFileInfo { ...@@ -520,8 +535,6 @@ struct cifsFileInfo {
struct dentry *dentry; struct dentry *dentry;
unsigned int f_flags; unsigned int f_flags;
struct tcon_link *tlink; struct tcon_link *tlink;
struct mutex lock_mutex;
struct list_head llist; /* list of byte range locks we have. */
bool invalidHandle:1; /* file closed via session abend */ bool invalidHandle:1; /* file closed via session abend */
bool oplock_break_cancelled:1; bool oplock_break_cancelled:1;
int count; /* refcount protected by cifs_file_list_lock */ int count; /* refcount protected by cifs_file_list_lock */
...@@ -554,7 +567,9 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file); ...@@ -554,7 +567,9 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
*/ */
struct cifsInodeInfo { struct cifsInodeInfo {
struct list_head lockList; struct list_head llist; /* brlocks for this inode */
bool can_cache_brlcks;
struct mutex lock_mutex; /* protect two fields above */
/* BB add in lists for dirty pages i.e. write caching info for oplock */ /* BB add in lists for dirty pages i.e. write caching info for oplock */
struct list_head openFileList; struct list_head openFileList;
__u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */ __u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
...@@ -643,8 +658,24 @@ static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon, ...@@ -643,8 +658,24 @@ static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon,
struct mid_q_entry; struct mid_q_entry;
/* /*
* This is the prototype for the mid callback function. When creating one, * This is the prototype for the mid receive function. This function is for
* take special care to avoid deadlocks. Things to bear in mind: * receiving the rest of the SMB frame, starting with the WordCount (which is
* just after the MID in struct smb_hdr). Note:
*
* - This will be called by cifsd, with no locks held.
* - The mid will still be on the pending_mid_q.
* - mid->resp_buf will point to the current buffer.
*
* Returns zero on a successful receive, or an error. The receive state in
* the TCP_Server_Info will also be updated.
*/
typedef int (mid_receive_t)(struct TCP_Server_Info *server,
struct mid_q_entry *mid);
/*
* This is the prototype for the mid callback function. This is called once the
* mid has been received off of the socket. When creating one, take special
* care to avoid deadlocks. Things to bear in mind:
* *
* - it will be called by cifsd, with no locks held * - it will be called by cifsd, with no locks held
* - the mid will be removed from any lists * - the mid will be removed from any lists
...@@ -662,9 +693,10 @@ struct mid_q_entry { ...@@ -662,9 +693,10 @@ struct mid_q_entry {
unsigned long when_sent; /* time when smb send finished */ unsigned long when_sent; /* time when smb send finished */
unsigned long when_received; /* when demux complete (taken off wire) */ unsigned long when_received; /* when demux complete (taken off wire) */
#endif #endif
mid_receive_t *receive; /* call receive callback */
mid_callback_t *callback; /* call completion callback */ mid_callback_t *callback; /* call completion callback */
void *callback_data; /* general purpose pointer for callback */ void *callback_data; /* general purpose pointer for callback */
struct smb_hdr *resp_buf; /* response buffer */ struct smb_hdr *resp_buf; /* pointer to received SMB header */
int midState; /* wish this were enum but can not pass to wait_event */ int midState; /* wish this were enum but can not pass to wait_event */
__u8 command; /* smb command code */ __u8 command; /* smb command code */
bool largeBuf:1; /* if valid response, is pointer to large buf */ bool largeBuf:1; /* if valid response, is pointer to large buf */
...@@ -964,7 +996,8 @@ GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions ...@@ -964,7 +996,8 @@ GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions
to be established on existing mount if we to be established on existing mount if we
have the uid/password or Kerberos credential have the uid/password or Kerberos credential
or equivalent for current user */ or equivalent for current user */
GLOBAL_EXTERN unsigned int oplockEnabled; /* enable or disable oplocks */
GLOBAL_EXTERN bool enable_oplocks;
GLOBAL_EXTERN unsigned int lookupCacheEnabled; GLOBAL_EXTERN unsigned int lookupCacheEnabled;
GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent
with more secure ntlmssp2 challenge/resp */ with more secure ntlmssp2 challenge/resp */
...@@ -978,10 +1011,16 @@ GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/ ...@@ -978,10 +1011,16 @@ GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/
/* reconnect after this many failed echo attempts */ /* reconnect after this many failed echo attempts */
GLOBAL_EXTERN unsigned short echo_retries; GLOBAL_EXTERN unsigned short echo_retries;
#ifdef CONFIG_CIFS_ACL
GLOBAL_EXTERN struct rb_root uidtree; GLOBAL_EXTERN struct rb_root uidtree;
GLOBAL_EXTERN struct rb_root gidtree; GLOBAL_EXTERN struct rb_root gidtree;
GLOBAL_EXTERN spinlock_t siduidlock; GLOBAL_EXTERN spinlock_t siduidlock;
GLOBAL_EXTERN spinlock_t sidgidlock; GLOBAL_EXTERN spinlock_t sidgidlock;
GLOBAL_EXTERN struct rb_root siduidtree;
GLOBAL_EXTERN struct rb_root sidgidtree;
GLOBAL_EXTERN spinlock_t uidsidlock;
GLOBAL_EXTERN spinlock_t gidsidlock;
#endif /* CONFIG_CIFS_ACL */
void cifs_oplock_break(struct work_struct *work); void cifs_oplock_break(struct work_struct *work);
......
...@@ -1089,9 +1089,7 @@ typedef struct smb_com_read_rsp { ...@@ -1089,9 +1089,7 @@ typedef struct smb_com_read_rsp {
__le16 DataLengthHigh; __le16 DataLengthHigh;
__u64 Reserved2; __u64 Reserved2;
__u16 ByteCount; __u16 ByteCount;
__u8 Pad; /* BB check for whether padded to DWORD /* read response data immediately follows */
boundary and optimum performance here */
char Data[1];
} __attribute__((packed)) READ_RSP; } __attribute__((packed)) READ_RSP;
typedef struct locking_andx_range { typedef struct locking_andx_range {
...@@ -1913,6 +1911,10 @@ typedef struct whoami_rsp_data { /* Query level 0x202 */ ...@@ -1913,6 +1911,10 @@ typedef struct whoami_rsp_data { /* Query level 0x202 */
/* SETFSInfo Levels */ /* SETFSInfo Levels */
#define SMB_SET_CIFS_UNIX_INFO 0x200 #define SMB_SET_CIFS_UNIX_INFO 0x200
/* level 0x203 is defined above in list of QFS info levels */
/* #define SMB_REQUEST_TRANSPORT_ENCRYPTION 0x203 */
/* Level 0x200 request structure follows */
typedef struct smb_com_transaction2_setfsi_req { typedef struct smb_com_transaction2_setfsi_req {
struct smb_hdr hdr; /* wct = 15 */ struct smb_hdr hdr; /* wct = 15 */
__le16 TotalParameterCount; __le16 TotalParameterCount;
...@@ -1940,13 +1942,39 @@ typedef struct smb_com_transaction2_setfsi_req { ...@@ -1940,13 +1942,39 @@ typedef struct smb_com_transaction2_setfsi_req {
__le64 ClientUnixCap; /* Data end */ __le64 ClientUnixCap; /* Data end */
} __attribute__((packed)) TRANSACTION2_SETFSI_REQ; } __attribute__((packed)) TRANSACTION2_SETFSI_REQ;
/* level 0x203 request structure follows */
typedef struct smb_com_transaction2_setfs_enc_req {
struct smb_hdr hdr; /* wct = 15 */
__le16 TotalParameterCount;
__le16 TotalDataCount;
__le16 MaxParameterCount;
__le16 MaxDataCount;
__u8 MaxSetupCount;
__u8 Reserved;
__le16 Flags;
__le32 Timeout;
__u16 Reserved2;
__le16 ParameterCount; /* 4 */
__le16 ParameterOffset;
__le16 DataCount; /* 12 */
__le16 DataOffset;
__u8 SetupCount; /* one */
__u8 Reserved3;
__le16 SubCommand; /* TRANS2_SET_FS_INFORMATION */
__le16 ByteCount;
__u8 Pad;
__u16 Reserved4; /* Parameters start. */
__le16 InformationLevel;/* Parameters end. */
/* NTLMSSP Blob, Data start. */
} __attribute__((packed)) TRANSACTION2_SETFSI_ENC_REQ;
/* response for setfsinfo levels 0x200 and 0x203 */
typedef struct smb_com_transaction2_setfsi_rsp { typedef struct smb_com_transaction2_setfsi_rsp {
struct smb_hdr hdr; /* wct = 10 */ struct smb_hdr hdr; /* wct = 10 */
struct trans2_resp t2; struct trans2_resp t2;
__u16 ByteCount; __u16 ByteCount;
} __attribute__((packed)) TRANSACTION2_SETFSI_RSP; } __attribute__((packed)) TRANSACTION2_SETFSI_RSP;
typedef struct smb_com_transaction2_get_dfs_refer_req { typedef struct smb_com_transaction2_get_dfs_refer_req {
struct smb_hdr hdr; /* wct = 15 */ struct smb_hdr hdr; /* wct = 15 */
__le16 TotalParameterCount; __le16 TotalParameterCount;
...@@ -2098,13 +2126,13 @@ typedef struct { ...@@ -2098,13 +2126,13 @@ typedef struct {
#define CIFS_UNIX_PROXY_CAP 0x00000400 /* Proxy cap: 0xACE ioctl and #define CIFS_UNIX_PROXY_CAP 0x00000400 /* Proxy cap: 0xACE ioctl and
QFS PROXY call */ QFS PROXY call */
#ifdef CONFIG_CIFS_POSIX #ifdef CONFIG_CIFS_POSIX
/* Can not set pathnames cap yet until we send new posix create SMB since /* presumably don't need the 0x20 POSIX_PATH_OPS_CAP since we never send
otherwise server can treat such handles opened with older ntcreatex LockingX instead of posix locking call on unix sess (and we do not expect
(by a new client which knows how to send posix path ops) LockingX to use different (ie Windows) semantics than posix locking on
as non-posix handles (can affect write behavior with byte range locks. the same session (if WINE needs to do this later, we can add this cap
We can add back in POSIX_PATH_OPS cap when Posix Create/Mkdir finished */ back in later */
/* #define CIFS_UNIX_CAP_MASK 0x000000fb */ /* #define CIFS_UNIX_CAP_MASK 0x000000fb */
#define CIFS_UNIX_CAP_MASK 0x000000db #define CIFS_UNIX_CAP_MASK 0x000003db
#else #else
#define CIFS_UNIX_CAP_MASK 0x00000013 #define CIFS_UNIX_CAP_MASK 0x00000013
#endif /* CONFIG_CIFS_POSIX */ #endif /* CONFIG_CIFS_POSIX */
......
...@@ -69,8 +69,9 @@ extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer, ...@@ -69,8 +69,9 @@ extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
struct TCP_Server_Info *server); struct TCP_Server_Info *server);
extern void DeleteMidQEntry(struct mid_q_entry *midEntry); extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
extern int cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, extern int cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
unsigned int nvec, mid_callback_t *callback, unsigned int nvec, mid_receive_t *receive,
void *cbdata, bool ignore_pend); mid_callback_t *callback, void *cbdata,
bool ignore_pend);
extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *, extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
struct smb_hdr * /* input */ , struct smb_hdr * /* input */ ,
struct smb_hdr * /* out */ , struct smb_hdr * /* out */ ,
...@@ -90,6 +91,7 @@ extern int SendReceiveBlockingLock(const unsigned int xid, ...@@ -90,6 +91,7 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length); extern int checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length);
extern bool is_valid_oplock_break(struct smb_hdr *smb, extern bool is_valid_oplock_break(struct smb_hdr *smb,
struct TCP_Server_Info *); struct TCP_Server_Info *);
extern bool backup_cred(struct cifs_sb_info *);
extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof); extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
unsigned int bytes_written); unsigned int bytes_written);
...@@ -145,12 +147,19 @@ extern int cifs_get_inode_info_unix(struct inode **pinode, ...@@ -145,12 +147,19 @@ extern int cifs_get_inode_info_unix(struct inode **pinode,
extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
struct cifs_fattr *fattr, struct inode *inode, struct cifs_fattr *fattr, struct inode *inode,
const char *path, const __u16 *pfid); const char *path, const __u16 *pfid);
extern int mode_to_cifs_acl(struct inode *inode, const char *path, __u64); extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64,
uid_t, gid_t);
extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *, extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
const char *, u32 *); const char *, u32 *);
extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *, extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
const char *); const char *, int);
extern void dequeue_mid(struct mid_q_entry *mid, bool malformed);
extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
unsigned int to_read);
extern int cifs_readv_from_socket(struct TCP_Server_Info *server,
struct kvec *iov_orig, unsigned int nr_segs,
unsigned int to_read);
extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
struct cifs_sb_info *cifs_sb); struct cifs_sb_info *cifs_sb);
extern int cifs_match_super(struct super_block *, void *); extern int cifs_match_super(struct super_block *, void *);
...@@ -359,14 +368,17 @@ extern int CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon, ...@@ -359,14 +368,17 @@ extern int CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon,
const struct nls_table *nls_codepage, const struct nls_table *nls_codepage,
int remap_special_chars); int remap_special_chars);
extern int cifs_lockv(const int xid, struct cifs_tcon *tcon, const __u16 netfid,
const __u8 lock_type, const __u32 num_unlock,
const __u32 num_lock, LOCKING_ANDX_RANGE *buf);
extern int CIFSSMBLock(const int xid, struct cifs_tcon *tcon, extern int CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
const __u16 netfid, const __u64 len, const __u16 netfid, const __u32 netpid, const __u64 len,
const __u64 offset, const __u32 numUnlock, const __u64 offset, const __u32 numUnlock,
const __u32 numLock, const __u8 lockType, const __u32 numLock, const __u8 lockType,
const bool waitFlag, const __u8 oplock_level); const bool waitFlag, const __u8 oplock_level);
extern int CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon, extern int CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
const __u16 smb_file_id, const int get_flag, const __u16 smb_file_id, const __u32 netpid,
const __u64 len, struct file_lock *, const int get_flag, const __u64 len, struct file_lock *,
const __u16 lock_type, const bool waitFlag); const __u16 lock_type, const bool waitFlag);
extern int CIFSSMBTDis(const int xid, struct cifs_tcon *tcon); extern int CIFSSMBTDis(const int xid, struct cifs_tcon *tcon);
extern int CIFSSMBEcho(struct TCP_Server_Info *server); extern int CIFSSMBEcho(struct TCP_Server_Info *server);
...@@ -380,7 +392,7 @@ extern void tconInfoFree(struct cifs_tcon *); ...@@ -380,7 +392,7 @@ extern void tconInfoFree(struct cifs_tcon *);
extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *); extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *);
extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *, extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *,
__u32 *); __u32 *);
extern int cifs_verify_signature(struct smb_hdr *, extern int cifs_verify_signature(struct kvec *iov, unsigned int nr_iov,
struct TCP_Server_Info *server, struct TCP_Server_Info *server,
__u32 expected_sequence_number); __u32 expected_sequence_number);
extern int SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *); extern int SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *);
...@@ -419,7 +431,7 @@ extern int CIFSSMBSetEA(const int xid, struct cifs_tcon *tcon, ...@@ -419,7 +431,7 @@ extern int CIFSSMBSetEA(const int xid, struct cifs_tcon *tcon,
extern int CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon, extern int CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon,
__u16 fid, struct cifs_ntsd **acl_inf, __u32 *buflen); __u16 fid, struct cifs_ntsd **acl_inf, __u32 *buflen);
extern int CIFSSMBSetCIFSACL(const int, struct cifs_tcon *, __u16, extern int CIFSSMBSetCIFSACL(const int, struct cifs_tcon *, __u16,
struct cifs_ntsd *, __u32); struct cifs_ntsd *, __u32, int);
extern int CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon, extern int CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon,
const unsigned char *searchName, const unsigned char *searchName,
char *acl_inf, const int buflen, const int acl_type, char *acl_inf, const int buflen, const int acl_type,
...@@ -440,6 +452,24 @@ extern int E_md4hash(const unsigned char *passwd, unsigned char *p16); ...@@ -440,6 +452,24 @@ extern int E_md4hash(const unsigned char *passwd, unsigned char *p16);
extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8, extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8,
unsigned char *p24); unsigned char *p24);
/* asynchronous read support */
struct cifs_readdata {
struct cifsFileInfo *cfile;
struct address_space *mapping;
__u64 offset;
unsigned int bytes;
pid_t pid;
int result;
struct list_head pages;
struct work_struct work;
unsigned int nr_iov;
struct kvec iov[1];
};
struct cifs_readdata *cifs_readdata_alloc(unsigned int nr_pages);
void cifs_readdata_free(struct cifs_readdata *rdata);
int cifs_async_readv(struct cifs_readdata *rdata);
/* asynchronous write support */ /* asynchronous write support */
struct cifs_writedata { struct cifs_writedata {
struct kref refcount; struct kref refcount;
......
...@@ -33,6 +33,8 @@ ...@@ -33,6 +33,8 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/posix_acl_xattr.h> #include <linux/posix_acl_xattr.h>
#include <linux/pagemap.h> #include <linux/pagemap.h>
#include <linux/swap.h>
#include <linux/task_io_accounting_ops.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include "cifspdu.h" #include "cifspdu.h"
#include "cifsglob.h" #include "cifsglob.h"
...@@ -40,6 +42,7 @@ ...@@ -40,6 +42,7 @@
#include "cifsproto.h" #include "cifsproto.h"
#include "cifs_unicode.h" #include "cifs_unicode.h"
#include "cifs_debug.h" #include "cifs_debug.h"
#include "fscache.h"
#ifdef CONFIG_CIFS_POSIX #ifdef CONFIG_CIFS_POSIX
static struct { static struct {
...@@ -83,6 +86,9 @@ static struct { ...@@ -83,6 +86,9 @@ static struct {
#endif /* CONFIG_CIFS_WEAK_PW_HASH */ #endif /* CONFIG_CIFS_WEAK_PW_HASH */
#endif /* CIFS_POSIX */ #endif /* CIFS_POSIX */
/* Forward declarations */
static void cifs_readv_complete(struct work_struct *work);
/* Mark as invalid, all open files on tree connections since they /* Mark as invalid, all open files on tree connections since they
were closed when session to server was lost */ were closed when session to server was lost */
static void mark_open_files_invalid(struct cifs_tcon *pTcon) static void mark_open_files_invalid(struct cifs_tcon *pTcon)
...@@ -453,8 +459,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses) ...@@ -453,8 +459,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
} }
server->sec_mode = (__u8)le16_to_cpu(rsp->SecurityMode); server->sec_mode = (__u8)le16_to_cpu(rsp->SecurityMode);
server->maxReq = le16_to_cpu(rsp->MaxMpxCount); server->maxReq = le16_to_cpu(rsp->MaxMpxCount);
server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize), server->maxBuf = le16_to_cpu(rsp->MaxBufSize);
(__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs); server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
/* even though we do not use raw we might as well set this /* even though we do not use raw we might as well set this
accurately, in case we ever find a need for it */ accurately, in case we ever find a need for it */
...@@ -561,8 +566,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses) ...@@ -561,8 +566,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
little endian */ little endian */
server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount); server->maxReq = le16_to_cpu(pSMBr->MaxMpxCount);
/* probably no need to store and check maxvcs */ /* probably no need to store and check maxvcs */
server->maxBuf = min(le32_to_cpu(pSMBr->MaxBufferSize), server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize);
(__u32) CIFSMaxBufSize + MAX_CIFS_HDR_SIZE);
server->max_rw = le32_to_cpu(pSMBr->MaxRawSize); server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
cFYI(DBG2, "Max buf = %d", ses->server->maxBuf); cFYI(DBG2, "Max buf = %d", ses->server->maxBuf);
server->capabilities = le32_to_cpu(pSMBr->Capabilities); server->capabilities = le32_to_cpu(pSMBr->Capabilities);
...@@ -739,7 +743,8 @@ CIFSSMBEcho(struct TCP_Server_Info *server) ...@@ -739,7 +743,8 @@ CIFSSMBEcho(struct TCP_Server_Info *server)
iov.iov_base = smb; iov.iov_base = smb;
iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4; iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
rc = cifs_call_async(server, &iov, 1, cifs_echo_callback, server, true); rc = cifs_call_async(server, &iov, 1, NULL, cifs_echo_callback,
server, true);
if (rc) if (rc)
cFYI(1, "Echo request failed: %d", rc); cFYI(1, "Echo request failed: %d", rc);
...@@ -1376,6 +1381,359 @@ CIFSSMBOpen(const int xid, struct cifs_tcon *tcon, ...@@ -1376,6 +1381,359 @@ CIFSSMBOpen(const int xid, struct cifs_tcon *tcon,
return rc; return rc;
} }
struct cifs_readdata *
cifs_readdata_alloc(unsigned int nr_pages)
{
struct cifs_readdata *rdata;
/* readdata + 1 kvec for each page */
rdata = kzalloc(sizeof(*rdata) +
sizeof(struct kvec) * nr_pages, GFP_KERNEL);
if (rdata != NULL) {
INIT_WORK(&rdata->work, cifs_readv_complete);
INIT_LIST_HEAD(&rdata->pages);
}
return rdata;
}
void
cifs_readdata_free(struct cifs_readdata *rdata)
{
cifsFileInfo_put(rdata->cfile);
kfree(rdata);
}
/*
* Discard any remaining data in the current SMB. To do this, we borrow the
* current bigbuf.
*/
static int
cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
{
READ_RSP *rsp = (READ_RSP *)server->smallbuf;
unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length);
int remaining = rfclen + 4 - server->total_read;
struct cifs_readdata *rdata = mid->callback_data;
while (remaining > 0) {
int length;
length = cifs_read_from_socket(server, server->bigbuf,
min_t(unsigned int, remaining,
CIFSMaxBufSize + MAX_CIFS_HDR_SIZE));
if (length < 0)
return length;
server->total_read += length;
remaining -= length;
}
dequeue_mid(mid, rdata->result);
return 0;
}
static int
cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
{
int length, len;
unsigned int data_offset, remaining, data_len;
struct cifs_readdata *rdata = mid->callback_data;
READ_RSP *rsp = (READ_RSP *)server->smallbuf;
unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length) + 4;
u64 eof;
pgoff_t eof_index;
struct page *page, *tpage;
cFYI(1, "%s: mid=%u offset=%llu bytes=%u", __func__,
mid->mid, rdata->offset, rdata->bytes);
/*
* read the rest of READ_RSP header (sans Data array), or whatever we
* can if there's not enough data. At this point, we've read down to
* the Mid.
*/
len = min_t(unsigned int, rfclen, sizeof(*rsp)) -
sizeof(struct smb_hdr) + 1;
rdata->iov[0].iov_base = server->smallbuf + sizeof(struct smb_hdr) - 1;
rdata->iov[0].iov_len = len;
length = cifs_readv_from_socket(server, rdata->iov, 1, len);
if (length < 0)
return length;
server->total_read += length;
/* Was the SMB read successful? */
rdata->result = map_smb_to_linux_error(&rsp->hdr, false);
if (rdata->result != 0) {
cFYI(1, "%s: server returned error %d", __func__,
rdata->result);
return cifs_readv_discard(server, mid);
}
/* Is there enough to get to the rest of the READ_RSP header? */
if (server->total_read < sizeof(READ_RSP)) {
cFYI(1, "%s: server returned short header. got=%u expected=%zu",
__func__, server->total_read, sizeof(READ_RSP));
rdata->result = -EIO;
return cifs_readv_discard(server, mid);
}
data_offset = le16_to_cpu(rsp->DataOffset) + 4;
if (data_offset < server->total_read) {
/*
* win2k8 sometimes sends an offset of 0 when the read
* is beyond the EOF. Treat it as if the data starts just after
* the header.
*/
cFYI(1, "%s: data offset (%u) inside read response header",
__func__, data_offset);
data_offset = server->total_read;
} else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) {
/* data_offset is beyond the end of smallbuf */
cFYI(1, "%s: data offset (%u) beyond end of smallbuf",
__func__, data_offset);
rdata->result = -EIO;
return cifs_readv_discard(server, mid);
}
cFYI(1, "%s: total_read=%u data_offset=%u", __func__,
server->total_read, data_offset);
len = data_offset - server->total_read;
if (len > 0) {
/* read any junk before data into the rest of smallbuf */
rdata->iov[0].iov_base = server->smallbuf + server->total_read;
rdata->iov[0].iov_len = len;
length = cifs_readv_from_socket(server, rdata->iov, 1, len);
if (length < 0)
return length;
server->total_read += length;
}
/* set up first iov for signature check */
rdata->iov[0].iov_base = server->smallbuf;
rdata->iov[0].iov_len = server->total_read;
cFYI(1, "0: iov_base=%p iov_len=%zu",
rdata->iov[0].iov_base, rdata->iov[0].iov_len);
/* how much data is in the response? */
data_len = le16_to_cpu(rsp->DataLengthHigh) << 16;
data_len += le16_to_cpu(rsp->DataLength);
if (data_offset + data_len > rfclen) {
/* data_len is corrupt -- discard frame */
rdata->result = -EIO;
return cifs_readv_discard(server, mid);
}
/* marshal up the page array */
len = 0;
remaining = data_len;
rdata->nr_iov = 1;
/* determine the eof that the server (probably) has */
eof = CIFS_I(rdata->mapping->host)->server_eof;
eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
if (remaining >= PAGE_CACHE_SIZE) {
/* enough data to fill the page */
rdata->iov[rdata->nr_iov].iov_base = kmap(page);
rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
rdata->nr_iov, page->index,
rdata->iov[rdata->nr_iov].iov_base,
rdata->iov[rdata->nr_iov].iov_len);
++rdata->nr_iov;
len += PAGE_CACHE_SIZE;
remaining -= PAGE_CACHE_SIZE;
} else if (remaining > 0) {
/* enough for partial page, fill and zero the rest */
rdata->iov[rdata->nr_iov].iov_base = kmap(page);
rdata->iov[rdata->nr_iov].iov_len = remaining;
cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
rdata->nr_iov, page->index,
rdata->iov[rdata->nr_iov].iov_base,
rdata->iov[rdata->nr_iov].iov_len);
memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
'\0', PAGE_CACHE_SIZE - remaining);
++rdata->nr_iov;
len += remaining;
remaining = 0;
} else if (page->index > eof_index) {
/*
* The VFS will not try to do readahead past the
* i_size, but it's possible that we have outstanding
* writes with gaps in the middle and the i_size hasn't
* caught up yet. Populate those with zeroed out pages
* to prevent the VFS from repeatedly attempting to
* fill them until the writes are flushed.
*/
zero_user(page, 0, PAGE_CACHE_SIZE);
list_del(&page->lru);
lru_cache_add_file(page);
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
page_cache_release(page);
} else {
/* no need to hold page hostage */
list_del(&page->lru);
lru_cache_add_file(page);
unlock_page(page);
page_cache_release(page);
}
}
/* issue the read if we have any iovecs left to fill */
if (rdata->nr_iov > 1) {
length = cifs_readv_from_socket(server, &rdata->iov[1],
rdata->nr_iov - 1, len);
if (length < 0)
return length;
server->total_read += length;
} else {
length = 0;
}
rdata->bytes = length;
cFYI(1, "total_read=%u rfclen=%u remaining=%u", server->total_read,
rfclen, remaining);
/* discard anything left over */
if (server->total_read < rfclen)
return cifs_readv_discard(server, mid);
dequeue_mid(mid, false);
return length;
}
static void
cifs_readv_complete(struct work_struct *work)
{
struct cifs_readdata *rdata = container_of(work,
struct cifs_readdata, work);
struct page *page, *tpage;
list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
list_del(&page->lru);
lru_cache_add_file(page);
if (rdata->result == 0) {
kunmap(page);
flush_dcache_page(page);
SetPageUptodate(page);
}
unlock_page(page);
if (rdata->result == 0)
cifs_readpage_to_fscache(rdata->mapping->host, page);
page_cache_release(page);
}
cifs_readdata_free(rdata);
}
static void
cifs_readv_callback(struct mid_q_entry *mid)
{
struct cifs_readdata *rdata = mid->callback_data;
struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
cFYI(1, "%s: mid=%u state=%d result=%d bytes=%u", __func__,
mid->mid, mid->midState, rdata->result, rdata->bytes);
switch (mid->midState) {
case MID_RESPONSE_RECEIVED:
/* result already set, check signature */
if (server->sec_mode &
(SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
if (cifs_verify_signature(rdata->iov, rdata->nr_iov,
server, mid->sequence_number + 1))
cERROR(1, "Unexpected SMB signature");
}
/* FIXME: should this be counted toward the initiating task? */
task_io_account_read(rdata->bytes);
cifs_stats_bytes_read(tcon, rdata->bytes);
break;
case MID_REQUEST_SUBMITTED:
case MID_RETRY_NEEDED:
rdata->result = -EAGAIN;
break;
default:
rdata->result = -EIO;
}
queue_work(system_nrt_wq, &rdata->work);
DeleteMidQEntry(mid);
atomic_dec(&server->inFlight);
wake_up(&server->request_q);
}
/* cifs_async_readv - send an async write, and set up mid to handle result */
int
cifs_async_readv(struct cifs_readdata *rdata)
{
int rc;
READ_REQ *smb = NULL;
int wct;
struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink);
cFYI(1, "%s: offset=%llu bytes=%u", __func__,
rdata->offset, rdata->bytes);
if (tcon->ses->capabilities & CAP_LARGE_FILES)
wct = 12;
else {
wct = 10; /* old style read */
if ((rdata->offset >> 32) > 0) {
/* can not handle this big offset for old */
return -EIO;
}
}
rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **)&smb);
if (rc)
return rc;
smb->hdr.Pid = cpu_to_le16((__u16)rdata->pid);
smb->hdr.PidHigh = cpu_to_le16((__u16)(rdata->pid >> 16));
smb->AndXCommand = 0xFF; /* none */
smb->Fid = rdata->cfile->netfid;
smb->OffsetLow = cpu_to_le32(rdata->offset & 0xFFFFFFFF);
if (wct == 12)
smb->OffsetHigh = cpu_to_le32(rdata->offset >> 32);
smb->Remaining = 0;
smb->MaxCount = cpu_to_le16(rdata->bytes & 0xFFFF);
smb->MaxCountHigh = cpu_to_le32(rdata->bytes >> 16);
if (wct == 12)
smb->ByteCount = 0;
else {
/* old style read */
struct smb_com_readx_req *smbr =
(struct smb_com_readx_req *)smb;
smbr->ByteCount = 0;
}
/* 4 for RFC1001 length + 1 for BCC */
rdata->iov[0].iov_base = smb;
rdata->iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
rc = cifs_call_async(tcon->ses->server, rdata->iov, 1,
cifs_readv_receive, cifs_readv_callback,
rdata, false);
if (rc == 0)
cifs_stats_inc(&tcon->num_reads);
cifs_small_buf_release(smb);
return rc;
}
int int
CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes,
char **buf, int *pbuf_type) char **buf, int *pbuf_type)
...@@ -1836,7 +2194,7 @@ cifs_async_writev(struct cifs_writedata *wdata) ...@@ -1836,7 +2194,7 @@ cifs_async_writev(struct cifs_writedata *wdata)
kref_get(&wdata->refcount); kref_get(&wdata->refcount);
rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1, rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1,
cifs_writev_callback, wdata, false); NULL, cifs_writev_callback, wdata, false);
if (rc == 0) if (rc == 0)
cifs_stats_inc(&tcon->num_writes); cifs_stats_inc(&tcon->num_writes);
...@@ -1962,10 +2320,50 @@ CIFSSMBWrite2(const int xid, struct cifs_io_parms *io_parms, ...@@ -1962,10 +2320,50 @@ CIFSSMBWrite2(const int xid, struct cifs_io_parms *io_parms,
return rc; return rc;
} }
int cifs_lockv(const int xid, struct cifs_tcon *tcon, const __u16 netfid,
const __u8 lock_type, const __u32 num_unlock,
const __u32 num_lock, LOCKING_ANDX_RANGE *buf)
{
int rc = 0;
LOCK_REQ *pSMB = NULL;
struct kvec iov[2];
int resp_buf_type;
__u16 count;
cFYI(1, "cifs_lockv num lock %d num unlock %d", num_lock, num_unlock);
rc = small_smb_init(SMB_COM_LOCKING_ANDX, 8, tcon, (void **) &pSMB);
if (rc)
return rc;
pSMB->Timeout = 0;
pSMB->NumberOfLocks = cpu_to_le16(num_lock);
pSMB->NumberOfUnlocks = cpu_to_le16(num_unlock);
pSMB->LockType = lock_type;
pSMB->AndXCommand = 0xFF; /* none */
pSMB->Fid = netfid; /* netfid stays le */
count = (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE);
inc_rfc1001_len(pSMB, count);
pSMB->ByteCount = cpu_to_le16(count);
iov[0].iov_base = (char *)pSMB;
iov[0].iov_len = be32_to_cpu(pSMB->hdr.smb_buf_length) + 4 -
(num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE);
iov[1].iov_base = (char *)buf;
iov[1].iov_len = (num_unlock + num_lock) * sizeof(LOCKING_ANDX_RANGE);
cifs_stats_inc(&tcon->num_locks);
rc = SendReceive2(xid, tcon->ses, iov, 2, &resp_buf_type, CIFS_NO_RESP);
if (rc)
cFYI(1, "Send error in cifs_lockv = %d", rc);
return rc;
}
int int
CIFSSMBLock(const int xid, struct cifs_tcon *tcon, CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
const __u16 smb_file_id, const __u64 len, const __u16 smb_file_id, const __u32 netpid, const __u64 len,
const __u64 offset, const __u32 numUnlock, const __u64 offset, const __u32 numUnlock,
const __u32 numLock, const __u8 lockType, const __u32 numLock, const __u8 lockType,
const bool waitFlag, const __u8 oplock_level) const bool waitFlag, const __u8 oplock_level)
...@@ -2001,7 +2399,7 @@ CIFSSMBLock(const int xid, struct cifs_tcon *tcon, ...@@ -2001,7 +2399,7 @@ CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
pSMB->Fid = smb_file_id; /* netfid stays le */ pSMB->Fid = smb_file_id; /* netfid stays le */
if ((numLock != 0) || (numUnlock != 0)) { if ((numLock != 0) || (numUnlock != 0)) {
pSMB->Locks[0].Pid = cpu_to_le16(current->tgid); pSMB->Locks[0].Pid = cpu_to_le16(netpid);
/* BB where to store pid high? */ /* BB where to store pid high? */
pSMB->Locks[0].LengthLow = cpu_to_le32((u32)len); pSMB->Locks[0].LengthLow = cpu_to_le32((u32)len);
pSMB->Locks[0].LengthHigh = cpu_to_le32((u32)(len>>32)); pSMB->Locks[0].LengthHigh = cpu_to_le32((u32)(len>>32));
...@@ -2035,9 +2433,9 @@ CIFSSMBLock(const int xid, struct cifs_tcon *tcon, ...@@ -2035,9 +2433,9 @@ CIFSSMBLock(const int xid, struct cifs_tcon *tcon,
int int
CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon, CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
const __u16 smb_file_id, const int get_flag, const __u64 len, const __u16 smb_file_id, const __u32 netpid, const int get_flag,
struct file_lock *pLockData, const __u16 lock_type, const __u64 len, struct file_lock *pLockData,
const bool waitFlag) const __u16 lock_type, const bool waitFlag)
{ {
struct smb_com_transaction2_sfi_req *pSMB = NULL; struct smb_com_transaction2_sfi_req *pSMB = NULL;
struct smb_com_transaction2_sfi_rsp *pSMBr = NULL; struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
...@@ -2095,7 +2493,7 @@ CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon, ...@@ -2095,7 +2493,7 @@ CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon,
} else } else
pSMB->Timeout = 0; pSMB->Timeout = 0;
parm_data->pid = cpu_to_le32(current->tgid); parm_data->pid = cpu_to_le32(netpid);
parm_data->start = cpu_to_le64(pLockData->fl_start); parm_data->start = cpu_to_le64(pLockData->fl_start);
parm_data->length = cpu_to_le64(len); /* normalize negative numbers */ parm_data->length = cpu_to_le64(len); /* normalize negative numbers */
...@@ -2812,8 +3210,7 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifs_tcon *tcon, ...@@ -2812,8 +3210,7 @@ CIFSSMBQueryReparseLinkInfo(const int xid, struct cifs_tcon *tcon,
pSMB->TotalDataCount = 0; pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le32(2); pSMB->MaxParameterCount = cpu_to_le32(2);
/* BB find exact data count max from sess structure BB */ /* BB find exact data count max from sess structure BB */
pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf - pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00);
MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
pSMB->MaxSetupCount = 4; pSMB->MaxSetupCount = 4;
pSMB->Reserved = 0; pSMB->Reserved = 0;
pSMB->ParameterOffset = 0; pSMB->ParameterOffset = 0;
...@@ -3306,8 +3703,7 @@ smb_init_nttransact(const __u16 sub_command, const int setup_count, ...@@ -3306,8 +3703,7 @@ smb_init_nttransact(const __u16 sub_command, const int setup_count,
pSMB->Reserved = 0; pSMB->Reserved = 0;
pSMB->TotalParameterCount = cpu_to_le32(parm_len); pSMB->TotalParameterCount = cpu_to_le32(parm_len);
pSMB->TotalDataCount = 0; pSMB->TotalDataCount = 0;
pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf - pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00);
MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
pSMB->ParameterCount = pSMB->TotalParameterCount; pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->DataCount = pSMB->TotalDataCount; pSMB->DataCount = pSMB->TotalDataCount;
temp_offset = offsetof(struct smb_com_ntransact_req, Parms) + temp_offset = offsetof(struct smb_com_ntransact_req, Parms) +
...@@ -3467,7 +3863,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid, ...@@ -3467,7 +3863,7 @@ CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid,
int int
CIFSSMBSetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid, CIFSSMBSetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid,
struct cifs_ntsd *pntsd, __u32 acllen) struct cifs_ntsd *pntsd, __u32 acllen, int aclflag)
{ {
__u16 byte_count, param_count, data_count, param_offset, data_offset; __u16 byte_count, param_count, data_count, param_offset, data_offset;
int rc = 0; int rc = 0;
...@@ -3504,7 +3900,7 @@ CIFSSMBSetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid, ...@@ -3504,7 +3900,7 @@ CIFSSMBSetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid,
pSMB->Fid = fid; /* file handle always le */ pSMB->Fid = fid; /* file handle always le */
pSMB->Reserved2 = 0; pSMB->Reserved2 = 0;
pSMB->AclFlags = cpu_to_le32(CIFS_ACL_DACL); pSMB->AclFlags = cpu_to_le32(aclflag);
if (pntsd && acllen) { if (pntsd && acllen) {
memcpy((char *) &pSMBr->hdr.Protocol + data_offset, memcpy((char *) &pSMBr->hdr.Protocol + data_offset,
...@@ -3977,8 +4373,7 @@ CIFSFindFirst(const int xid, struct cifs_tcon *tcon, ...@@ -3977,8 +4373,7 @@ CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
params = 12 + name_len /* includes null */ ; params = 12 + name_len /* includes null */ ;
pSMB->TotalDataCount = 0; /* no EAs */ pSMB->TotalDataCount = 0; /* no EAs */
pSMB->MaxParameterCount = cpu_to_le16(10); pSMB->MaxParameterCount = cpu_to_le16(10);
pSMB->MaxDataCount = cpu_to_le16((tcon->ses->server->maxBuf - pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize & 0xFFFFFF00);
MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
pSMB->MaxSetupCount = 0; pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0; pSMB->Reserved = 0;
pSMB->Flags = 0; pSMB->Flags = 0;
...@@ -4052,8 +4447,7 @@ CIFSFindFirst(const int xid, struct cifs_tcon *tcon, ...@@ -4052,8 +4447,7 @@ CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
psrch_inf->index_of_last_entry = 2 /* skip . and .. */ + psrch_inf->index_of_last_entry = 2 /* skip . and .. */ +
psrch_inf->entries_in_buffer; psrch_inf->entries_in_buffer;
lnoff = le16_to_cpu(parms->LastNameOffset); lnoff = le16_to_cpu(parms->LastNameOffset);
if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE < if (CIFSMaxBufSize < lnoff) {
lnoff) {
cERROR(1, "ignoring corrupt resume name"); cERROR(1, "ignoring corrupt resume name");
psrch_inf->last_entry = NULL; psrch_inf->last_entry = NULL;
return rc; return rc;
...@@ -4097,9 +4491,7 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon, ...@@ -4097,9 +4491,7 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
byte_count = 0; byte_count = 0;
pSMB->TotalDataCount = 0; /* no EAs */ pSMB->TotalDataCount = 0; /* no EAs */
pSMB->MaxParameterCount = cpu_to_le16(8); pSMB->MaxParameterCount = cpu_to_le16(8);
pSMB->MaxDataCount = pSMB->MaxDataCount = cpu_to_le16(CIFSMaxBufSize & 0xFFFFFF00);
cpu_to_le16((tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) &
0xFFFFFF00);
pSMB->MaxSetupCount = 0; pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0; pSMB->Reserved = 0;
pSMB->Flags = 0; pSMB->Flags = 0;
...@@ -4181,8 +4573,7 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon, ...@@ -4181,8 +4573,7 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
psrch_inf->index_of_last_entry += psrch_inf->index_of_last_entry +=
psrch_inf->entries_in_buffer; psrch_inf->entries_in_buffer;
lnoff = le16_to_cpu(parms->LastNameOffset); lnoff = le16_to_cpu(parms->LastNameOffset);
if (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE < if (CIFSMaxBufSize < lnoff) {
lnoff) {
cERROR(1, "ignoring corrupt resume name"); cERROR(1, "ignoring corrupt resume name");
psrch_inf->last_entry = NULL; psrch_inf->last_entry = NULL;
return rc; return rc;
...@@ -5840,7 +6231,7 @@ CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon, ...@@ -5840,7 +6231,7 @@ CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon,
if (ea_name) { if (ea_name) {
if (ea_name_len == name_len && if (ea_name_len == name_len &&
strncmp(ea_name, temp_ptr, name_len) == 0) { memcmp(ea_name, temp_ptr, name_len) == 0) {
temp_ptr += name_len + 1; temp_ptr += name_len + 1;
rc = value_len; rc = value_len;
if (buf_size == 0) if (buf_size == 0)
...@@ -6035,12 +6426,7 @@ int CIFSSMBNotify(const int xid, struct cifs_tcon *tcon, ...@@ -6035,12 +6426,7 @@ int CIFSSMBNotify(const int xid, struct cifs_tcon *tcon,
pSMB->TotalParameterCount = 0 ; pSMB->TotalParameterCount = 0 ;
pSMB->TotalDataCount = 0; pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le32(2); pSMB->MaxParameterCount = cpu_to_le32(2);
/* BB find exact data count max from sess structure BB */ pSMB->MaxDataCount = cpu_to_le32(CIFSMaxBufSize & 0xFFFFFF00);
pSMB->MaxDataCount = 0; /* same in little endian or be */
/* BB VERIFY verify which is correct for above BB */
pSMB->MaxDataCount = cpu_to_le32((tcon->ses->server->maxBuf -
MAX_CIFS_HDR_SIZE) & 0xFFFFFF00);
pSMB->MaxSetupCount = 4; pSMB->MaxSetupCount = 4;
pSMB->Reserved = 0; pSMB->Reserved = 0;
pSMB->ParameterOffset = 0; pSMB->ParameterOffset = 0;
......
...@@ -181,7 +181,7 @@ cifs_reconnect(struct TCP_Server_Info *server) ...@@ -181,7 +181,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
-EINVAL = invalid transact2 -EINVAL = invalid transact2
*/ */
static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize) static int check2ndT2(struct smb_hdr *pSMB)
{ {
struct smb_t2_rsp *pSMBt; struct smb_t2_rsp *pSMBt;
int remaining; int remaining;
...@@ -214,9 +214,9 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize) ...@@ -214,9 +214,9 @@ static int check2ndT2(struct smb_hdr *pSMB, unsigned int maxBufSize)
cFYI(1, "missing %d bytes from transact2, check next response", cFYI(1, "missing %d bytes from transact2, check next response",
remaining); remaining);
if (total_data_size > maxBufSize) { if (total_data_size > CIFSMaxBufSize) {
cERROR(1, "TotalDataSize %d is over maximum buffer %d", cERROR(1, "TotalDataSize %d is over maximum buffer %d",
total_data_size, maxBufSize); total_data_size, CIFSMaxBufSize);
return -EINVAL; return -EINVAL;
} }
return remaining; return remaining;
...@@ -320,27 +320,24 @@ cifs_echo_request(struct work_struct *work) ...@@ -320,27 +320,24 @@ cifs_echo_request(struct work_struct *work)
} }
static bool static bool
allocate_buffers(char **bigbuf, char **smallbuf, unsigned int size, allocate_buffers(struct TCP_Server_Info *server)
bool is_large_buf)
{ {
char *bbuf = *bigbuf, *sbuf = *smallbuf; if (!server->bigbuf) {
server->bigbuf = (char *)cifs_buf_get();
if (bbuf == NULL) { if (!server->bigbuf) {
bbuf = (char *)cifs_buf_get();
if (!bbuf) {
cERROR(1, "No memory for large SMB response"); cERROR(1, "No memory for large SMB response");
msleep(3000); msleep(3000);
/* retry will check if exiting */ /* retry will check if exiting */
return false; return false;
} }
} else if (is_large_buf) { } else if (server->large_buf) {
/* we are reusing a dirty large buf, clear its start */ /* we are reusing a dirty large buf, clear its start */
memset(bbuf, 0, size); memset(server->bigbuf, 0, sizeof(struct smb_hdr));
} }
if (sbuf == NULL) { if (!server->smallbuf) {
sbuf = (char *)cifs_small_buf_get(); server->smallbuf = (char *)cifs_small_buf_get();
if (!sbuf) { if (!server->smallbuf) {
cERROR(1, "No memory for SMB response"); cERROR(1, "No memory for SMB response");
msleep(1000); msleep(1000);
/* retry will check if exiting */ /* retry will check if exiting */
...@@ -349,36 +346,116 @@ allocate_buffers(char **bigbuf, char **smallbuf, unsigned int size, ...@@ -349,36 +346,116 @@ allocate_buffers(char **bigbuf, char **smallbuf, unsigned int size,
/* beginning of smb buffer is cleared in our buf_get */ /* beginning of smb buffer is cleared in our buf_get */
} else { } else {
/* if existing small buf clear beginning */ /* if existing small buf clear beginning */
memset(sbuf, 0, size); memset(server->smallbuf, 0, sizeof(struct smb_hdr));
} }
*bigbuf = bbuf; return true;
*smallbuf = sbuf; }
static bool
server_unresponsive(struct TCP_Server_Info *server)
{
if (echo_retries > 0 && server->tcpStatus == CifsGood &&
time_after(jiffies, server->lstrp +
(echo_retries * SMB_ECHO_INTERVAL))) {
cERROR(1, "Server %s has not responded in %d seconds. "
"Reconnecting...", server->hostname,
(echo_retries * SMB_ECHO_INTERVAL / HZ));
cifs_reconnect(server);
wake_up(&server->response_q);
return true; return true;
}
return false;
} }
static int /*
read_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg, * kvec_array_init - clone a kvec array, and advance into it
struct kvec *iov, unsigned int to_read, * @new: pointer to memory for cloned array
unsigned int *ptotal_read, bool is_header_read) * @iov: pointer to original array
* @nr_segs: number of members in original array
* @bytes: number of bytes to advance into the cloned array
*
* This function will copy the array provided in iov to a section of memory
* and advance the specified number of bytes into the new array. It returns
* the number of segments in the new array. "new" must be at least as big as
* the original iov array.
*/
static unsigned int
kvec_array_init(struct kvec *new, struct kvec *iov, unsigned int nr_segs,
size_t bytes)
{
size_t base = 0;
while (bytes || !iov->iov_len) {
int copy = min(bytes, iov->iov_len);
bytes -= copy;
base += copy;
if (iov->iov_len == base) {
iov++;
nr_segs--;
base = 0;
}
}
memcpy(new, iov, sizeof(*iov) * nr_segs);
new->iov_base += base;
new->iov_len -= base;
return nr_segs;
}
static struct kvec *
get_server_iovec(struct TCP_Server_Info *server, unsigned int nr_segs)
{
struct kvec *new_iov;
if (server->iov && nr_segs <= server->nr_iov)
return server->iov;
/* not big enough -- allocate a new one and release the old */
new_iov = kmalloc(sizeof(*new_iov) * nr_segs, GFP_NOFS);
if (new_iov) {
kfree(server->iov);
server->iov = new_iov;
server->nr_iov = nr_segs;
}
return new_iov;
}
int
cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig,
unsigned int nr_segs, unsigned int to_read)
{ {
int length, rc = 0; int length = 0;
unsigned int total_read; int total_read;
char *buf = iov->iov_base; unsigned int segs;
struct msghdr smb_msg;
struct kvec *iov;
iov = get_server_iovec(server, nr_segs);
if (!iov)
return -ENOMEM;
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
for (total_read = 0; to_read; total_read += length, to_read -= length) {
if (server_unresponsive(server)) {
total_read = -EAGAIN;
break;
}
segs = kvec_array_init(iov, iov_orig, nr_segs, total_read);
length = kernel_recvmsg(server->ssocket, &smb_msg,
iov, segs, to_read, 0);
for (total_read = 0; total_read < to_read; total_read += length) {
length = kernel_recvmsg(server->ssocket, smb_msg, iov, 1,
to_read - total_read, 0);
if (server->tcpStatus == CifsExiting) { if (server->tcpStatus == CifsExiting) {
/* then will exit */ total_read = -ESHUTDOWN;
rc = 2;
break; break;
} else if (server->tcpStatus == CifsNeedReconnect) { } else if (server->tcpStatus == CifsNeedReconnect) {
cifs_reconnect(server); cifs_reconnect(server);
/* Reconnect wakes up rspns q */ total_read = -EAGAIN;
/* Now we will reread sock */
rc = 1;
break; break;
} else if (length == -ERESTARTSYS || } else if (length == -ERESTARTSYS ||
length == -EAGAIN || length == -EAGAIN ||
...@@ -390,56 +467,54 @@ read_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg, ...@@ -390,56 +467,54 @@ read_from_socket(struct TCP_Server_Info *server, struct msghdr *smb_msg,
*/ */
usleep_range(1000, 2000); usleep_range(1000, 2000);
length = 0; length = 0;
if (!is_header_read)
continue; continue;
/* Special handling for header read */
if (total_read) {
iov->iov_base = (to_read - total_read) +
buf;
iov->iov_len = to_read - total_read;
smb_msg->msg_control = NULL;
smb_msg->msg_controllen = 0;
rc = 3;
} else
rc = 1;
break;
} else if (length <= 0) { } else if (length <= 0) {
cERROR(1, "Received no data, expecting %d", cFYI(1, "Received no data or error: expecting %d "
to_read - total_read); "got %d", to_read, length);
cifs_reconnect(server); cifs_reconnect(server);
rc = 1; total_read = -EAGAIN;
break; break;
} }
} }
return total_read;
}
*ptotal_read = total_read; int
return rc; cifs_read_from_socket(struct TCP_Server_Info *server, char *buf,
unsigned int to_read)
{
struct kvec iov;
iov.iov_base = buf;
iov.iov_len = to_read;
return cifs_readv_from_socket(server, &iov, 1, to_read);
} }
static bool static bool
check_rfc1002_header(struct TCP_Server_Info *server, char *buf) is_smb_response(struct TCP_Server_Info *server, unsigned char type)
{ {
char temp = *buf;
unsigned int pdu_length = be32_to_cpu(
((struct smb_hdr *)buf)->smb_buf_length);
/* /*
* The first byte big endian of the length field, * The first byte big endian of the length field,
* is actually not part of the length but the type * is actually not part of the length but the type
* with the most common, zero, as regular data. * with the most common, zero, as regular data.
*/ */
if (temp == (char) RFC1002_SESSION_KEEP_ALIVE) { switch (type) {
return false; case RFC1002_SESSION_MESSAGE:
} else if (temp == (char)RFC1002_POSITIVE_SESSION_RESPONSE) { /* Regular SMB response */
cFYI(1, "Good RFC 1002 session rsp"); return true;
return false; case RFC1002_SESSION_KEEP_ALIVE:
} else if (temp == (char)RFC1002_NEGATIVE_SESSION_RESPONSE) { cFYI(1, "RFC 1002 session keep alive");
break;
case RFC1002_POSITIVE_SESSION_RESPONSE:
cFYI(1, "RFC 1002 positive session response");
break;
case RFC1002_NEGATIVE_SESSION_RESPONSE:
/* /*
* We get this from Windows 98 instead of an error on * We get this from Windows 98 instead of an error on
* SMB negprot response. * SMB negprot response.
*/ */
cFYI(1, "Negative RFC1002 Session Response Error 0x%x)", cFYI(1, "RFC 1002 negative session response");
pdu_length);
/* give server a second to clean up */ /* give server a second to clean up */
msleep(1000); msleep(1000);
/* /*
...@@ -448,87 +523,89 @@ check_rfc1002_header(struct TCP_Server_Info *server, char *buf) ...@@ -448,87 +523,89 @@ check_rfc1002_header(struct TCP_Server_Info *server, char *buf)
* is since we do not begin with RFC1001 session * is since we do not begin with RFC1001 session
* initialize frame). * initialize frame).
*/ */
cifs_set_port((struct sockaddr *) cifs_set_port((struct sockaddr *)&server->dstaddr, CIFS_PORT);
&server->dstaddr, CIFS_PORT);
cifs_reconnect(server); cifs_reconnect(server);
wake_up(&server->response_q); wake_up(&server->response_q);
return false; break;
} else if (temp != (char) 0) { default:
cERROR(1, "Unknown RFC 1002 frame"); cERROR(1, "RFC 1002 unknown response type 0x%x", type);
cifs_dump_mem(" Received Data: ", buf, 4);
cifs_reconnect(server); cifs_reconnect(server);
return false;
} }
/* else we have an SMB response */
if ((pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) ||
(pdu_length < sizeof(struct smb_hdr) - 1 - 4)) {
cERROR(1, "Invalid size SMB length %d pdu_length %d",
4, pdu_length+4);
cifs_reconnect(server);
wake_up(&server->response_q);
return false; return false;
}
return true;
} }
static struct mid_q_entry * static struct mid_q_entry *
find_cifs_mid(struct TCP_Server_Info *server, struct smb_hdr *buf, find_mid(struct TCP_Server_Info *server, struct smb_hdr *buf)
int *length, bool is_large_buf, bool *is_multi_rsp, char **bigbuf)
{ {
struct mid_q_entry *mid = NULL, *tmp_mid, *ret = NULL; struct mid_q_entry *mid;
spin_lock(&GlobalMid_Lock); spin_lock(&GlobalMid_Lock);
list_for_each_entry_safe(mid, tmp_mid, &server->pending_mid_q, qhead) { list_for_each_entry(mid, &server->pending_mid_q, qhead) {
if (mid->mid != buf->Mid || if (mid->mid == buf->Mid &&
mid->midState != MID_REQUEST_SUBMITTED || mid->midState == MID_REQUEST_SUBMITTED &&
mid->command != buf->Command) mid->command == buf->Command) {
continue; spin_unlock(&GlobalMid_Lock);
return mid;
}
}
spin_unlock(&GlobalMid_Lock);
return NULL;
}
void
dequeue_mid(struct mid_q_entry *mid, bool malformed)
{
#ifdef CONFIG_CIFS_STATS2
mid->when_received = jiffies;
#endif
spin_lock(&GlobalMid_Lock);
if (!malformed)
mid->midState = MID_RESPONSE_RECEIVED;
else
mid->midState = MID_RESPONSE_MALFORMED;
list_del_init(&mid->qhead);
spin_unlock(&GlobalMid_Lock);
}
if (*length == 0 && check2ndT2(buf, server->maxBuf) > 0) { static void
/* We have a multipart transact2 resp */ handle_mid(struct mid_q_entry *mid, struct TCP_Server_Info *server,
*is_multi_rsp = true; struct smb_hdr *buf, int malformed)
{
if (malformed == 0 && check2ndT2(buf) > 0) {
mid->multiRsp = true;
if (mid->resp_buf) { if (mid->resp_buf) {
/* merge response - fix up 1st*/ /* merge response - fix up 1st*/
*length = coalesce_t2(buf, mid->resp_buf); malformed = coalesce_t2(buf, mid->resp_buf);
if (*length > 0) { if (malformed > 0)
*length = 0; return;
mid->multiRsp = true;
break;
}
/* All parts received or packet is malformed. */ /* All parts received or packet is malformed. */
mid->multiEnd = true; mid->multiEnd = true;
goto multi_t2_fnd; return dequeue_mid(mid, malformed);
} }
if (!is_large_buf) { if (!server->large_buf) {
/*FIXME: switch to already allocated largebuf?*/ /*FIXME: switch to already allocated largebuf?*/
cERROR(1, "1st trans2 resp needs bigbuf"); cERROR(1, "1st trans2 resp needs bigbuf");
} else { } else {
/* Have first buffer */ /* Have first buffer */
mid->resp_buf = buf; mid->resp_buf = buf;
mid->largeBuf = true; mid->largeBuf = true;
*bigbuf = NULL; server->bigbuf = NULL;
} }
break; return;
} }
mid->resp_buf = buf; mid->resp_buf = buf;
mid->largeBuf = is_large_buf; mid->largeBuf = server->large_buf;
multi_t2_fnd: /* Was previous buf put in mpx struct for multi-rsp? */
if (*length == 0) if (!mid->multiRsp) {
mid->midState = MID_RESPONSE_RECEIVED; /* smb buffer will be freed by user thread */
if (server->large_buf)
server->bigbuf = NULL;
else else
mid->midState = MID_RESPONSE_MALFORMED; server->smallbuf = NULL;
#ifdef CONFIG_CIFS_STATS2
mid->when_received = jiffies;
#endif
list_del_init(&mid->qhead);
ret = mid;
break;
} }
spin_unlock(&GlobalMid_Lock); dequeue_mid(mid, malformed);
return ret;
} }
static void clean_demultiplex_info(struct TCP_Server_Info *server) static void clean_demultiplex_info(struct TCP_Server_Info *server)
...@@ -618,6 +695,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) ...@@ -618,6 +695,7 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
} }
kfree(server->hostname); kfree(server->hostname);
kfree(server->iov);
kfree(server); kfree(server);
length = atomic_dec_return(&tcpSesAllocCount); length = atomic_dec_return(&tcpSesAllocCount);
...@@ -626,21 +704,71 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server) ...@@ -626,21 +704,71 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
GFP_KERNEL); GFP_KERNEL);
} }
static int
standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
{
int length;
char *buf = server->smallbuf;
struct smb_hdr *smb_buffer = (struct smb_hdr *)buf;
unsigned int pdu_length = be32_to_cpu(smb_buffer->smb_buf_length);
/* make sure this will fit in a large buffer */
if (pdu_length > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
cERROR(1, "SMB response too long (%u bytes)",
pdu_length);
cifs_reconnect(server);
wake_up(&server->response_q);
return -EAGAIN;
}
/* switch to large buffer if too big for a small one */
if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) {
server->large_buf = true;
memcpy(server->bigbuf, server->smallbuf, server->total_read);
buf = server->bigbuf;
smb_buffer = (struct smb_hdr *)buf;
}
/* now read the rest */
length = cifs_read_from_socket(server,
buf + sizeof(struct smb_hdr) - 1,
pdu_length - sizeof(struct smb_hdr) + 1 + 4);
if (length < 0)
return length;
server->total_read += length;
dump_smb(smb_buffer, server->total_read);
/*
* We know that we received enough to get to the MID as we
* checked the pdu_length earlier. Now check to see
* if the rest of the header is OK. We borrow the length
* var for the rest of the loop to avoid a new stack var.
*
* 48 bytes is enough to display the header and a little bit
* into the payload for debugging purposes.
*/
length = checkSMB(smb_buffer, smb_buffer->Mid, server->total_read);
if (length != 0)
cifs_dump_mem("Bad SMB: ", buf,
min_t(unsigned int, server->total_read, 48));
if (mid)
handle_mid(mid, server, smb_buffer, length);
return length;
}
static int static int
cifs_demultiplex_thread(void *p) cifs_demultiplex_thread(void *p)
{ {
int length; int length;
struct TCP_Server_Info *server = p; struct TCP_Server_Info *server = p;
unsigned int pdu_length, total_read; unsigned int pdu_length;
char *buf = NULL, *bigbuf = NULL, *smallbuf = NULL; char *buf = NULL;
struct smb_hdr *smb_buffer = NULL; struct smb_hdr *smb_buffer = NULL;
struct msghdr smb_msg;
struct kvec iov;
struct task_struct *task_to_wake = NULL; struct task_struct *task_to_wake = NULL;
struct mid_q_entry *mid_entry; struct mid_q_entry *mid_entry;
bool isLargeBuf = false;
bool isMultiRsp = false;
int rc;
current->flags |= PF_MEMALLOC; current->flags |= PF_MEMALLOC;
cFYI(1, "Demultiplex PID: %d", task_pid_nr(current)); cFYI(1, "Demultiplex PID: %d", task_pid_nr(current));
...@@ -655,111 +783,65 @@ cifs_demultiplex_thread(void *p) ...@@ -655,111 +783,65 @@ cifs_demultiplex_thread(void *p)
if (try_to_freeze()) if (try_to_freeze())
continue; continue;
if (!allocate_buffers(&bigbuf, &smallbuf, if (!allocate_buffers(server))
sizeof(struct smb_hdr), isLargeBuf))
continue; continue;
isLargeBuf = false; server->large_buf = false;
isMultiRsp = false; smb_buffer = (struct smb_hdr *)server->smallbuf;
smb_buffer = (struct smb_hdr *)smallbuf; buf = server->smallbuf;
buf = smallbuf;
iov.iov_base = buf;
iov.iov_len = 4;
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
pdu_length = 4; /* enough to get RFC1001 header */ pdu_length = 4; /* enough to get RFC1001 header */
incomplete_rcv: length = cifs_read_from_socket(server, buf, pdu_length);
if (echo_retries > 0 && server->tcpStatus == CifsGood && if (length < 0)
time_after(jiffies, server->lstrp +
(echo_retries * SMB_ECHO_INTERVAL))) {
cERROR(1, "Server %s has not responded in %d seconds. "
"Reconnecting...", server->hostname,
(echo_retries * SMB_ECHO_INTERVAL / HZ));
cifs_reconnect(server);
wake_up(&server->response_q);
continue;
}
rc = read_from_socket(server, &smb_msg, &iov, pdu_length,
&total_read, true /* header read */);
if (rc == 3)
goto incomplete_rcv;
else if (rc == 2)
break;
else if (rc == 1)
continue; continue;
server->total_read = length;
/* /*
* The right amount was read from socket - 4 bytes, * The right amount was read from socket - 4 bytes,
* so we can now interpret the length field. * so we can now interpret the length field.
*/ */
/*
* Note that RFC 1001 length is big endian on the wire,
* but we convert it here so it is always manipulated
* as host byte order.
*/
pdu_length = be32_to_cpu(smb_buffer->smb_buf_length); pdu_length = be32_to_cpu(smb_buffer->smb_buf_length);
cFYI(1, "rfc1002 length 0x%x", pdu_length+4); cFYI(1, "RFC1002 header 0x%x", pdu_length);
if (!check_rfc1002_header(server, buf)) if (!is_smb_response(server, buf[0]))
continue; continue;
/* else length ok */ /* make sure we have enough to get to the MID */
if (pdu_length > MAX_CIFS_SMALL_BUFFER_SIZE - 4) { if (pdu_length < sizeof(struct smb_hdr) - 1 - 4) {
isLargeBuf = true; cERROR(1, "SMB response too short (%u bytes)",
memcpy(bigbuf, smallbuf, 4); pdu_length);
smb_buffer = (struct smb_hdr *)bigbuf; cifs_reconnect(server);
buf = bigbuf; wake_up(&server->response_q);
continue;
} }
iov.iov_base = 4 + buf; /* read down to the MID */
iov.iov_len = pdu_length; length = cifs_read_from_socket(server, buf + 4,
rc = read_from_socket(server, &smb_msg, &iov, pdu_length, sizeof(struct smb_hdr) - 1 - 4);
&total_read, false); if (length < 0)
if (rc == 2)
break;
else if (rc == 1)
continue; continue;
server->total_read += length;
total_read += 4; /* account for rfc1002 hdr */ mid_entry = find_mid(server, smb_buffer);
dump_smb(smb_buffer, total_read); if (!mid_entry || !mid_entry->receive)
length = standard_receive3(server, mid_entry);
else
length = mid_entry->receive(server, mid_entry);
/* if (length < 0)
* We know that we received enough to get to the MID as we continue;
* checked the pdu_length earlier. Now check to see
* if the rest of the header is OK. We borrow the length
* var for the rest of the loop to avoid a new stack var.
*
* 48 bytes is enough to display the header and a little bit
* into the payload for debugging purposes.
*/
length = checkSMB(smb_buffer, smb_buffer->Mid, total_read);
if (length != 0)
cifs_dump_mem("Bad SMB: ", buf,
min_t(unsigned int, total_read, 48));
server->lstrp = jiffies; if (server->large_buf) {
buf = server->bigbuf;
smb_buffer = (struct smb_hdr *)buf;
}
mid_entry = find_cifs_mid(server, smb_buffer, &length, server->lstrp = jiffies;
isLargeBuf, &isMultiRsp, &bigbuf);
if (mid_entry != NULL) { if (mid_entry != NULL) {
if (!mid_entry->multiRsp || mid_entry->multiEnd)
mid_entry->callback(mid_entry); mid_entry->callback(mid_entry);
/* Was previous buf put in mpx struct for multi-rsp? */ } else if (!is_valid_oplock_break(smb_buffer, server)) {
if (!isMultiRsp) {
/* smb buffer will be freed by user thread */
if (isLargeBuf)
bigbuf = NULL;
else
smallbuf = NULL;
}
} else if (length != 0) {
/* response sanity checks failed */
continue;
} else if (!is_valid_oplock_break(smb_buffer, server) &&
!isMultiRsp) {
cERROR(1, "No task to wake, unknown frame received! " cERROR(1, "No task to wake, unknown frame received! "
"NumMids %d", atomic_read(&midCount)); "NumMids %d", atomic_read(&midCount));
cifs_dump_mem("Received Data is: ", buf, cifs_dump_mem("Received Data is: ", buf,
...@@ -773,9 +855,9 @@ cifs_demultiplex_thread(void *p) ...@@ -773,9 +855,9 @@ cifs_demultiplex_thread(void *p)
} /* end while !EXITING */ } /* end while !EXITING */
/* buffer usually freed in free_mid - need to free it here on exit */ /* buffer usually freed in free_mid - need to free it here on exit */
cifs_buf_release(bigbuf); cifs_buf_release(server->bigbuf);
if (smallbuf) /* no sense logging a debug message if NULL */ if (server->smallbuf) /* no sense logging a debug message if NULL */
cifs_small_buf_release(smallbuf); cifs_small_buf_release(server->smallbuf);
task_to_wake = xchg(&server->tsk, NULL); task_to_wake = xchg(&server->tsk, NULL);
clean_demultiplex_info(server); clean_demultiplex_info(server);
...@@ -827,6 +909,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, ...@@ -827,6 +909,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
{ {
char *value, *data, *end; char *value, *data, *end;
char *mountdata_copy = NULL, *options; char *mountdata_copy = NULL, *options;
int err;
unsigned int temp_len, i, j; unsigned int temp_len, i, j;
char separator[2]; char separator[2];
short int override_uid = -1; short int override_uid = -1;
...@@ -883,6 +966,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, ...@@ -883,6 +966,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
cFYI(1, "Null separator not allowed"); cFYI(1, "Null separator not allowed");
} }
} }
vol->backupuid_specified = false; /* no backup intent for a user */
vol->backupgid_specified = false; /* no backup intent for a group */
while ((data = strsep(&options, separator)) != NULL) { while ((data = strsep(&options, separator)) != NULL) {
if (!*data) if (!*data)
...@@ -1442,6 +1527,22 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, ...@@ -1442,6 +1527,22 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
vol->mfsymlinks = true; vol->mfsymlinks = true;
} else if (strnicmp(data, "multiuser", 8) == 0) { } else if (strnicmp(data, "multiuser", 8) == 0) {
vol->multiuser = true; vol->multiuser = true;
} else if (!strnicmp(data, "backupuid", 9) && value && *value) {
err = kstrtouint(value, 0, &vol->backupuid);
if (err < 0) {
cERROR(1, "%s: Invalid backupuid value",
__func__);
goto cifs_parse_mount_err;
}
vol->backupuid_specified = true;
} else if (!strnicmp(data, "backupgid", 9) && value && *value) {
err = kstrtouint(value, 0, &vol->backupgid);
if (err < 0) {
cERROR(1, "%s: Invalid backupgid value",
__func__);
goto cifs_parse_mount_err;
}
vol->backupgid_specified = true;
} else } else
printk(KERN_WARNING "CIFS: Unknown mount option %s\n", printk(KERN_WARNING "CIFS: Unknown mount option %s\n",
data); data);
...@@ -2209,16 +2310,16 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data) ...@@ -2209,16 +2310,16 @@ compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data)
(new->mnt_cifs_flags & CIFS_MOUNT_MASK)) (new->mnt_cifs_flags & CIFS_MOUNT_MASK))
return 0; return 0;
if (old->rsize != new->rsize)
return 0;
/* /*
* We want to share sb only if we don't specify wsize or specified wsize * We want to share sb only if we don't specify an r/wsize or
* is greater or equal than existing one. * specified r/wsize is greater than or equal to existing one.
*/ */
if (new->wsize && new->wsize < old->wsize) if (new->wsize && new->wsize < old->wsize)
return 0; return 0;
if (new->rsize && new->rsize < old->rsize)
return 0;
if (old->mnt_uid != new->mnt_uid || old->mnt_gid != new->mnt_gid) if (old->mnt_uid != new->mnt_uid || old->mnt_gid != new->mnt_gid)
return 0; return 0;
...@@ -2656,14 +2757,6 @@ void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon, ...@@ -2656,14 +2757,6 @@ void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon,
CIFS_MOUNT_POSIX_PATHS; CIFS_MOUNT_POSIX_PATHS;
} }
if (cifs_sb && (cifs_sb->rsize > 127 * 1024)) {
if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) {
cifs_sb->rsize = 127 * 1024;
cFYI(DBG2, "larger reads not supported by srv");
}
}
cFYI(1, "Negotiate caps 0x%x", (int)cap); cFYI(1, "Negotiate caps 0x%x", (int)cap);
#ifdef CONFIG_CIFS_DEBUG2 #ifdef CONFIG_CIFS_DEBUG2
if (cap & CIFS_UNIX_FCNTL_CAP) if (cap & CIFS_UNIX_FCNTL_CAP)
...@@ -2708,31 +2801,19 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, ...@@ -2708,31 +2801,19 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
spin_lock_init(&cifs_sb->tlink_tree_lock); spin_lock_init(&cifs_sb->tlink_tree_lock);
cifs_sb->tlink_tree = RB_ROOT; cifs_sb->tlink_tree = RB_ROOT;
if (pvolume_info->rsize > CIFSMaxBufSize) {
cERROR(1, "rsize %d too large, using MaxBufSize",
pvolume_info->rsize);
cifs_sb->rsize = CIFSMaxBufSize;
} else if ((pvolume_info->rsize) &&
(pvolume_info->rsize <= CIFSMaxBufSize))
cifs_sb->rsize = pvolume_info->rsize;
else /* default */
cifs_sb->rsize = CIFSMaxBufSize;
if (cifs_sb->rsize < 2048) {
cifs_sb->rsize = 2048;
/* Windows ME may prefer this */
cFYI(1, "readsize set to minimum: 2048");
}
/* /*
* Temporarily set wsize for matching superblock. If we end up using * Temporarily set r/wsize for matching superblock. If we end up using
* new sb then cifs_negotiate_wsize will later negotiate it downward * new sb then client will later negotiate it downward if needed.
* if needed.
*/ */
cifs_sb->rsize = pvolume_info->rsize;
cifs_sb->wsize = pvolume_info->wsize; cifs_sb->wsize = pvolume_info->wsize;
cifs_sb->mnt_uid = pvolume_info->linux_uid; cifs_sb->mnt_uid = pvolume_info->linux_uid;
cifs_sb->mnt_gid = pvolume_info->linux_gid; cifs_sb->mnt_gid = pvolume_info->linux_gid;
if (pvolume_info->backupuid_specified)
cifs_sb->mnt_backupuid = pvolume_info->backupuid;
if (pvolume_info->backupgid_specified)
cifs_sb->mnt_backupgid = pvolume_info->backupgid;
cifs_sb->mnt_file_mode = pvolume_info->file_mode; cifs_sb->mnt_file_mode = pvolume_info->file_mode;
cifs_sb->mnt_dir_mode = pvolume_info->dir_mode; cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
cFYI(1, "file mode: 0x%x dir mode: 0x%x", cFYI(1, "file mode: 0x%x dir mode: 0x%x",
...@@ -2763,6 +2844,10 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, ...@@ -2763,6 +2844,10 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD; cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD;
if (pvolume_info->cifs_acl) if (pvolume_info->cifs_acl)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL; cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL;
if (pvolume_info->backupuid_specified)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPUID;
if (pvolume_info->backupgid_specified)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_BACKUPGID;
if (pvolume_info->override_uid) if (pvolume_info->override_uid)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID; cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID;
if (pvolume_info->override_gid) if (pvolume_info->override_gid)
...@@ -2795,29 +2880,41 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, ...@@ -2795,29 +2880,41 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
} }
/* /*
* When the server supports very large writes via POSIX extensions, we can * When the server supports very large reads and writes via POSIX extensions,
* allow up to 2^24-1, minus the size of a WRITE_AND_X header, not including * we can allow up to 2^24-1, minus the size of a READ/WRITE_AND_X header, not
* the RFC1001 length. * including the RFC1001 length.
* *
* Note that this might make for "interesting" allocation problems during * Note that this might make for "interesting" allocation problems during
* writeback however as we have to allocate an array of pointers for the * writeback however as we have to allocate an array of pointers for the
* pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096. * pages. A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096.
*
* For reads, there is a similar problem as we need to allocate an array
* of kvecs to handle the receive, though that should only need to be done
* once.
*/ */
#define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4) #define CIFS_MAX_WSIZE ((1<<24) - 1 - sizeof(WRITE_REQ) + 4)
#define CIFS_MAX_RSIZE ((1<<24) - sizeof(READ_RSP) + 4)
/* /*
* When the server doesn't allow large posix writes, only allow a wsize of * When the server doesn't allow large posix writes, only allow a rsize/wsize
* 128k minus the size of the WRITE_AND_X header. That allows for a write up * of 2^17-1 minus the size of the call header. That allows for a read or
* to the maximum size described by RFC1002. * write up to the maximum size described by RFC1002.
*/ */
#define CIFS_MAX_RFC1002_WSIZE (128 * 1024 - sizeof(WRITE_REQ) + 4) #define CIFS_MAX_RFC1002_WSIZE ((1<<17) - 1 - sizeof(WRITE_REQ) + 4)
#define CIFS_MAX_RFC1002_RSIZE ((1<<17) - 1 - sizeof(READ_RSP) + 4)
/* /*
* The default wsize is 1M. find_get_pages seems to return a maximum of 256 * The default wsize is 1M. find_get_pages seems to return a maximum of 256
* pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill
* a single wsize request with a single call. * a single wsize request with a single call.
*/ */
#define CIFS_DEFAULT_WSIZE (1024 * 1024) #define CIFS_DEFAULT_IOSIZE (1024 * 1024)
/*
* Windows only supports a max of 60k reads. Default to that when posix
* extensions aren't in force.
*/
#define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
static unsigned int static unsigned int
cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info) cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
...@@ -2825,7 +2922,7 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info) ...@@ -2825,7 +2922,7 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
__u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
struct TCP_Server_Info *server = tcon->ses->server; struct TCP_Server_Info *server = tcon->ses->server;
unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize : unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize :
CIFS_DEFAULT_WSIZE; CIFS_DEFAULT_IOSIZE;
/* can server support 24-bit write sizes? (via UNIX extensions) */ /* can server support 24-bit write sizes? (via UNIX extensions) */
if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
...@@ -2848,6 +2945,50 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info) ...@@ -2848,6 +2945,50 @@ cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
return wsize; return wsize;
} }
static unsigned int
cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
{
__u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
struct TCP_Server_Info *server = tcon->ses->server;
unsigned int rsize, defsize;
/*
* Set default value...
*
* HACK alert! Ancient servers have very small buffers. Even though
* MS-CIFS indicates that servers are only limited by the client's
* bufsize for reads, testing against win98se shows that it throws
* INVALID_PARAMETER errors if you try to request too large a read.
*
* If the server advertises a MaxBufferSize of less than one page,
* assume that it also can't satisfy reads larger than that either.
*
* FIXME: Is there a better heuristic for this?
*/
if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP))
defsize = CIFS_DEFAULT_IOSIZE;
else if (server->capabilities & CAP_LARGE_READ_X)
defsize = CIFS_DEFAULT_NON_POSIX_RSIZE;
else if (server->maxBuf >= PAGE_CACHE_SIZE)
defsize = CIFSMaxBufSize;
else
defsize = server->maxBuf - sizeof(READ_RSP);
rsize = pvolume_info->rsize ? pvolume_info->rsize : defsize;
/*
* no CAP_LARGE_READ_X? Then MS-CIFS states that we must limit this to
* the client's MaxBufferSize.
*/
if (!(server->capabilities & CAP_LARGE_READ_X))
rsize = min_t(unsigned int, CIFSMaxBufSize, rsize);
/* hard limit of CIFS_MAX_RSIZE */
rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE);
return rsize;
}
static int static int
is_path_accessible(int xid, struct cifs_tcon *tcon, is_path_accessible(int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const char *full_path) struct cifs_sb_info *cifs_sb, const char *full_path)
...@@ -3041,6 +3182,22 @@ cifs_get_volume_info(char *mount_data, const char *devname) ...@@ -3041,6 +3182,22 @@ cifs_get_volume_info(char *mount_data, const char *devname)
return volume_info; return volume_info;
} }
/* make sure ra_pages is a multiple of rsize */
static inline unsigned int
cifs_ra_pages(struct cifs_sb_info *cifs_sb)
{
unsigned int reads;
unsigned int rsize_pages = cifs_sb->rsize / PAGE_CACHE_SIZE;
if (rsize_pages >= default_backing_dev_info.ra_pages)
return default_backing_dev_info.ra_pages;
else if (rsize_pages == 0)
return rsize_pages;
reads = default_backing_dev_info.ra_pages / rsize_pages;
return reads * rsize_pages;
}
int int
cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
{ {
...@@ -3059,8 +3216,6 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) ...@@ -3059,8 +3216,6 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
if (rc) if (rc)
return rc; return rc;
cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages;
#ifdef CONFIG_CIFS_DFS_UPCALL #ifdef CONFIG_CIFS_DFS_UPCALL
try_mount_again: try_mount_again:
/* cleanup activities if we're chasing a referral */ /* cleanup activities if we're chasing a referral */
...@@ -3125,15 +3280,11 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info) ...@@ -3125,15 +3280,11 @@ cifs_mount(struct cifs_sb_info *cifs_sb, struct smb_vol *volume_info)
CIFSSMBQFSAttributeInfo(xid, tcon); CIFSSMBQFSAttributeInfo(xid, tcon);
} }
if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) {
cifs_sb->rsize = 1024 * 127;
cFYI(DBG2, "no very large read support, rsize now 127K");
}
if (!(tcon->ses->capabilities & CAP_LARGE_READ_X))
cifs_sb->rsize = min(cifs_sb->rsize,
(tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE));
cifs_sb->wsize = cifs_negotiate_wsize(tcon, volume_info); cifs_sb->wsize = cifs_negotiate_wsize(tcon, volume_info);
cifs_sb->rsize = cifs_negotiate_rsize(tcon, volume_info);
/* tune readahead according to rsize */
cifs_sb->bdi.ra_pages = cifs_ra_pages(cifs_sb);
remote_path_check: remote_path_check:
#ifdef CONFIG_CIFS_DFS_UPCALL #ifdef CONFIG_CIFS_DFS_UPCALL
......
...@@ -171,7 +171,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, ...@@ -171,7 +171,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
} }
tcon = tlink_tcon(tlink); tcon = tlink_tcon(tlink);
if (oplockEnabled) if (enable_oplocks)
oplock = REQ_OPLOCK; oplock = REQ_OPLOCK;
if (nd) if (nd)
...@@ -244,6 +244,9 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, ...@@ -244,6 +244,9 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
if (!tcon->unix_ext && (mode & S_IWUGO) == 0) if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
create_options |= CREATE_OPTION_READONLY; create_options |= CREATE_OPTION_READONLY;
if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
if (tcon->ses->capabilities & CAP_NT_SMBS) if (tcon->ses->capabilities & CAP_NT_SMBS)
rc = CIFSSMBOpen(xid, tcon, full_path, disposition, rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
desiredAccess, create_options, desiredAccess, create_options,
...@@ -357,6 +360,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, ...@@ -357,6 +360,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
{ {
int rc = -EPERM; int rc = -EPERM;
int xid; int xid;
int create_options = CREATE_NOT_DIR | CREATE_OPTION_SPECIAL;
struct cifs_sb_info *cifs_sb; struct cifs_sb_info *cifs_sb;
struct tcon_link *tlink; struct tcon_link *tlink;
struct cifs_tcon *pTcon; struct cifs_tcon *pTcon;
...@@ -431,9 +435,11 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, ...@@ -431,9 +435,11 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode,
return rc; return rc;
} }
/* FIXME: would WRITE_OWNER | WRITE_DAC be better? */ if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_CREATE, rc = CIFSSMBOpen(xid, pTcon, full_path, FILE_CREATE,
GENERIC_WRITE, CREATE_NOT_DIR | CREATE_OPTION_SPECIAL, GENERIC_WRITE, create_options,
&fileHandle, &oplock, buf, cifs_sb->local_nls, &fileHandle, &oplock, buf, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) if (rc)
...@@ -642,9 +648,17 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd) ...@@ -642,9 +648,17 @@ cifs_d_revalidate(struct dentry *direntry, struct nameidata *nd)
if (direntry->d_inode) { if (direntry->d_inode) {
if (cifs_revalidate_dentry(direntry)) if (cifs_revalidate_dentry(direntry))
return 0; return 0;
else else {
/*
* Forcibly invalidate automounting directory inodes
* (remote DFS directories) so to have them
* instantiated again for automount
*/
if (IS_AUTOMOUNT(direntry->d_inode))
return 0;
return 1; return 1;
} }
}
/* /*
* This may be nfsd (or something), anyway, we can't see the * This may be nfsd (or something), anyway, we can't see the
......
...@@ -45,7 +45,7 @@ ...@@ -45,7 +45,7 @@
#include "cifs_debug.h" #include "cifs_debug.h"
#include "cifsfs.h" #include "cifsfs.h"
#ifdef CIFS_NFSD_EXPORT #ifdef CONFIG_CIFS_NFSD_EXPORT
static struct dentry *cifs_get_parent(struct dentry *dentry) static struct dentry *cifs_get_parent(struct dentry *dentry)
{ {
/* BB need to add code here eventually to enable export via NFSD */ /* BB need to add code here eventually to enable export via NFSD */
...@@ -63,5 +63,5 @@ const struct export_operations cifs_export_ops = { ...@@ -63,5 +63,5 @@ const struct export_operations cifs_export_ops = {
.encode_fs = */ .encode_fs = */
}; };
#endif /* CIFS_NFSD_EXPORT */ #endif /* CONFIG_CIFS_NFSD_EXPORT */
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/swap.h>
#include <asm/div64.h> #include <asm/div64.h>
#include "cifsfs.h" #include "cifsfs.h"
#include "cifspdu.h" #include "cifspdu.h"
...@@ -174,6 +175,7 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, ...@@ -174,6 +175,7 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
int rc; int rc;
int desiredAccess; int desiredAccess;
int disposition; int disposition;
int create_options = CREATE_NOT_DIR;
FILE_ALL_INFO *buf; FILE_ALL_INFO *buf;
desiredAccess = cifs_convert_flags(f_flags); desiredAccess = cifs_convert_flags(f_flags);
...@@ -210,9 +212,12 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, ...@@ -210,9 +212,12 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
if (tcon->ses->capabilities & CAP_NT_SMBS) if (tcon->ses->capabilities & CAP_NT_SMBS)
rc = CIFSSMBOpen(xid, tcon, full_path, disposition, rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
desiredAccess, CREATE_NOT_DIR, pnetfid, poplock, buf, desiredAccess, create_options, pnetfid, poplock, buf,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags cifs_sb->local_nls, cifs_sb->mnt_cifs_flags
& CIFS_MOUNT_MAP_SPECIAL_CHR); & CIFS_MOUNT_MAP_SPECIAL_CHR);
else else
...@@ -258,8 +263,6 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file, ...@@ -258,8 +263,6 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
pCifsFile->invalidHandle = false; pCifsFile->invalidHandle = false;
pCifsFile->tlink = cifs_get_tlink(tlink); pCifsFile->tlink = cifs_get_tlink(tlink);
mutex_init(&pCifsFile->fh_mutex); mutex_init(&pCifsFile->fh_mutex);
mutex_init(&pCifsFile->lock_mutex);
INIT_LIST_HEAD(&pCifsFile->llist);
INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break); INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
spin_lock(&cifs_file_list_lock); spin_lock(&cifs_file_list_lock);
...@@ -272,11 +275,14 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file, ...@@ -272,11 +275,14 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
spin_unlock(&cifs_file_list_lock); spin_unlock(&cifs_file_list_lock);
cifs_set_oplock_level(pCifsInode, oplock); cifs_set_oplock_level(pCifsInode, oplock);
pCifsInode->can_cache_brlcks = pCifsInode->clientCanCacheAll;
file->private_data = pCifsFile; file->private_data = pCifsFile;
return pCifsFile; return pCifsFile;
} }
static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
/* /*
* Release a reference on the file private data. This may involve closing * Release a reference on the file private data. This may involve closing
* the filehandle out on the server. Must be called without holding * the filehandle out on the server. Must be called without holding
...@@ -327,12 +333,15 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file) ...@@ -327,12 +333,15 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
/* Delete any outstanding lock records. We'll lose them when the file /* Delete any outstanding lock records. We'll lose them when the file
* is closed anyway. * is closed anyway.
*/ */
mutex_lock(&cifs_file->lock_mutex); mutex_lock(&cifsi->lock_mutex);
list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) { list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) {
if (li->netfid != cifs_file->netfid)
continue;
list_del(&li->llist); list_del(&li->llist);
cifs_del_lock_waiters(li);
kfree(li); kfree(li);
} }
mutex_unlock(&cifs_file->lock_mutex); mutex_unlock(&cifsi->lock_mutex);
cifs_put_tlink(cifs_file->tlink); cifs_put_tlink(cifs_file->tlink);
dput(cifs_file->dentry); dput(cifs_file->dentry);
...@@ -371,7 +380,7 @@ int cifs_open(struct inode *inode, struct file *file) ...@@ -371,7 +380,7 @@ int cifs_open(struct inode *inode, struct file *file)
cFYI(1, "inode = 0x%p file flags are 0x%x for %s", cFYI(1, "inode = 0x%p file flags are 0x%x for %s",
inode, file->f_flags, full_path); inode, file->f_flags, full_path);
if (oplockEnabled) if (enable_oplocks)
oplock = REQ_OPLOCK; oplock = REQ_OPLOCK;
else else
oplock = 0; oplock = 0;
...@@ -465,6 +474,7 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush) ...@@ -465,6 +474,7 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
char *full_path = NULL; char *full_path = NULL;
int desiredAccess; int desiredAccess;
int disposition = FILE_OPEN; int disposition = FILE_OPEN;
int create_options = CREATE_NOT_DIR;
__u16 netfid; __u16 netfid;
xid = GetXid(); xid = GetXid();
...@@ -495,7 +505,7 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush) ...@@ -495,7 +505,7 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
cFYI(1, "inode = 0x%p file flags 0x%x for %s", cFYI(1, "inode = 0x%p file flags 0x%x for %s",
inode, pCifsFile->f_flags, full_path); inode, pCifsFile->f_flags, full_path);
if (oplockEnabled) if (enable_oplocks)
oplock = REQ_OPLOCK; oplock = REQ_OPLOCK;
else else
oplock = 0; oplock = 0;
...@@ -524,6 +534,9 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush) ...@@ -524,6 +534,9 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
desiredAccess = cifs_convert_flags(pCifsFile->f_flags); desiredAccess = cifs_convert_flags(pCifsFile->f_flags);
if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
/* Can not refresh inode by passing in file_info buf to be returned /* Can not refresh inode by passing in file_info buf to be returned
by SMBOpen and then calling get_inode_info with returned buf by SMBOpen and then calling get_inode_info with returned buf
since file might have write behind data that needs to be flushed since file might have write behind data that needs to be flushed
...@@ -531,7 +544,7 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush) ...@@ -531,7 +544,7 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush)
that inode was not dirty locally we could do this */ that inode was not dirty locally we could do this */
rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess, rc = CIFSSMBOpen(xid, tcon, full_path, disposition, desiredAccess,
CREATE_NOT_DIR, &netfid, &oplock, NULL, create_options, &netfid, &oplock, NULL,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR); CIFS_MOUNT_MAP_SPECIAL_CHR);
if (rc) { if (rc) {
...@@ -631,219 +644,687 @@ int cifs_closedir(struct inode *inode, struct file *file) ...@@ -631,219 +644,687 @@ int cifs_closedir(struct inode *inode, struct file *file)
return rc; return rc;
} }
static int store_file_lock(struct cifsFileInfo *fid, __u64 len, static struct cifsLockInfo *
__u64 offset, __u8 lockType) cifs_lock_init(__u64 len, __u64 offset, __u8 type, __u16 netfid)
{ {
struct cifsLockInfo *li = struct cifsLockInfo *li =
kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL); kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
if (li == NULL) if (!li)
return -ENOMEM; return li;
li->netfid = netfid;
li->offset = offset; li->offset = offset;
li->length = len; li->length = len;
li->type = lockType; li->type = type;
mutex_lock(&fid->lock_mutex); li->pid = current->tgid;
list_add(&li->llist, &fid->llist); INIT_LIST_HEAD(&li->blist);
mutex_unlock(&fid->lock_mutex); init_waitqueue_head(&li->block_q);
return li;
}
static void
cifs_del_lock_waiters(struct cifsLockInfo *lock)
{
struct cifsLockInfo *li, *tmp;
list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
list_del_init(&li->blist);
wake_up(&li->block_q);
}
}
static bool
cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
__u64 length, __u8 type, __u16 netfid,
struct cifsLockInfo **conf_lock)
{
struct cifsLockInfo *li, *tmp;
list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
if (offset + length <= li->offset ||
offset >= li->offset + li->length)
continue;
else if ((type & LOCKING_ANDX_SHARED_LOCK) &&
((netfid == li->netfid && current->tgid == li->pid) ||
type == li->type))
continue;
else {
*conf_lock = li;
return true;
}
}
return false;
}
static int
cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
__u8 type, __u16 netfid, struct file_lock *flock)
{
int rc = 0;
struct cifsLockInfo *conf_lock;
bool exist;
mutex_lock(&cinode->lock_mutex);
exist = cifs_find_lock_conflict(cinode, offset, length, type, netfid,
&conf_lock);
if (exist) {
flock->fl_start = conf_lock->offset;
flock->fl_end = conf_lock->offset + conf_lock->length - 1;
flock->fl_pid = conf_lock->pid;
if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK)
flock->fl_type = F_RDLCK;
else
flock->fl_type = F_WRLCK;
} else if (!cinode->can_cache_brlcks)
rc = 1;
else
flock->fl_type = F_UNLCK;
mutex_unlock(&cinode->lock_mutex);
return rc;
}
static int
cifs_lock_add(struct cifsInodeInfo *cinode, __u64 len, __u64 offset,
__u8 type, __u16 netfid)
{
struct cifsLockInfo *li;
li = cifs_lock_init(len, offset, type, netfid);
if (!li)
return -ENOMEM;
mutex_lock(&cinode->lock_mutex);
list_add_tail(&li->llist, &cinode->llist);
mutex_unlock(&cinode->lock_mutex);
return 0; return 0;
} }
int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) static int
cifs_lock_add_if(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
__u8 type, __u16 netfid, bool wait)
{ {
int rc, xid; struct cifsLockInfo *lock, *conf_lock;
__u32 numLock = 0; bool exist;
__u32 numUnlock = 0; int rc = 0;
__u64 length;
bool wait_flag = false; lock = cifs_lock_init(length, offset, type, netfid);
struct cifs_sb_info *cifs_sb; if (!lock)
struct cifs_tcon *tcon; return -ENOMEM;
__u16 netfid;
__u8 lockType = LOCKING_ANDX_LARGE_FILES; try_again:
bool posix_locking = 0; exist = false;
mutex_lock(&cinode->lock_mutex);
length = 1 + pfLock->fl_end - pfLock->fl_start; exist = cifs_find_lock_conflict(cinode, offset, length, type, netfid,
&conf_lock);
if (!exist && cinode->can_cache_brlcks) {
list_add_tail(&lock->llist, &cinode->llist);
mutex_unlock(&cinode->lock_mutex);
return rc;
}
if (!exist)
rc = 1;
else if (!wait)
rc = -EACCES; rc = -EACCES;
else {
list_add_tail(&lock->blist, &conf_lock->blist);
mutex_unlock(&cinode->lock_mutex);
rc = wait_event_interruptible(lock->block_q,
(lock->blist.prev == &lock->blist) &&
(lock->blist.next == &lock->blist));
if (!rc)
goto try_again;
else {
mutex_lock(&cinode->lock_mutex);
list_del_init(&lock->blist);
mutex_unlock(&cinode->lock_mutex);
}
}
kfree(lock);
mutex_unlock(&cinode->lock_mutex);
return rc;
}
static int
cifs_posix_lock_test(struct file *file, struct file_lock *flock)
{
int rc = 0;
struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
unsigned char saved_type = flock->fl_type;
mutex_lock(&cinode->lock_mutex);
posix_test_lock(file, flock);
if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
flock->fl_type = saved_type;
rc = 1;
}
mutex_unlock(&cinode->lock_mutex);
return rc;
}
static int
cifs_posix_lock_set(struct file *file, struct file_lock *flock)
{
struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
int rc;
mutex_lock(&cinode->lock_mutex);
if (!cinode->can_cache_brlcks) {
mutex_unlock(&cinode->lock_mutex);
return 1;
}
rc = posix_lock_file_wait(file, flock);
mutex_unlock(&cinode->lock_mutex);
return rc;
}
static int
cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
{
int xid, rc = 0, stored_rc;
struct cifsLockInfo *li, *tmp;
struct cifs_tcon *tcon;
struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
unsigned int num, max_num;
LOCKING_ANDX_RANGE *buf, *cur;
int types[] = {LOCKING_ANDX_LARGE_FILES,
LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
int i;
xid = GetXid();
tcon = tlink_tcon(cfile->tlink);
mutex_lock(&cinode->lock_mutex);
if (!cinode->can_cache_brlcks) {
mutex_unlock(&cinode->lock_mutex);
FreeXid(xid);
return rc;
}
max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
sizeof(LOCKING_ANDX_RANGE);
buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
if (!buf) {
mutex_unlock(&cinode->lock_mutex);
FreeXid(xid);
return rc;
}
for (i = 0; i < 2; i++) {
cur = buf;
num = 0;
list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
if (li->type != types[i])
continue;
cur->Pid = cpu_to_le16(li->pid);
cur->LengthLow = cpu_to_le32((u32)li->length);
cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
cur->OffsetLow = cpu_to_le32((u32)li->offset);
cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
if (++num == max_num) {
stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
li->type, 0, num, buf);
if (stored_rc)
rc = stored_rc;
cur = buf;
num = 0;
} else
cur++;
}
if (num) {
stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
types[i], 0, num, buf);
if (stored_rc)
rc = stored_rc;
}
}
cinode->can_cache_brlcks = false;
mutex_unlock(&cinode->lock_mutex);
kfree(buf);
FreeXid(xid);
return rc;
}
/* copied from fs/locks.c with a name change */
#define cifs_for_each_lock(inode, lockp) \
for (lockp = &inode->i_flock; *lockp != NULL; \
lockp = &(*lockp)->fl_next)
static int
cifs_push_posix_locks(struct cifsFileInfo *cfile)
{
struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct file_lock *flock, **before;
struct cifsLockInfo *lck, *tmp;
int rc = 0, xid, type;
__u64 length;
struct list_head locks_to_send;
xid = GetXid(); xid = GetXid();
cFYI(1, "Lock parm: 0x%x flockflags: " mutex_lock(&cinode->lock_mutex);
"0x%x flocktype: 0x%x start: %lld end: %lld", if (!cinode->can_cache_brlcks) {
cmd, pfLock->fl_flags, pfLock->fl_type, pfLock->fl_start, mutex_unlock(&cinode->lock_mutex);
pfLock->fl_end); FreeXid(xid);
return rc;
}
INIT_LIST_HEAD(&locks_to_send);
lock_flocks();
cifs_for_each_lock(cfile->dentry->d_inode, before) {
flock = *before;
length = 1 + flock->fl_end - flock->fl_start;
if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
type = CIFS_RDLCK;
else
type = CIFS_WRLCK;
lck = cifs_lock_init(length, flock->fl_start, type,
cfile->netfid);
if (!lck) {
rc = -ENOMEM;
goto send_locks;
}
lck->pid = flock->fl_pid;
if (pfLock->fl_flags & FL_POSIX) list_add_tail(&lck->llist, &locks_to_send);
}
send_locks:
unlock_flocks();
list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
struct file_lock tmp_lock;
int stored_rc;
tmp_lock.fl_start = lck->offset;
stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
0, lck->length, &tmp_lock,
lck->type, 0);
if (stored_rc)
rc = stored_rc;
list_del(&lck->llist);
kfree(lck);
}
cinode->can_cache_brlcks = false;
mutex_unlock(&cinode->lock_mutex);
FreeXid(xid);
return rc;
}
static int
cifs_push_locks(struct cifsFileInfo *cfile)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
if ((tcon->ses->capabilities & CAP_UNIX) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
return cifs_push_posix_locks(cfile);
return cifs_push_mandatory_locks(cfile);
}
static void
cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
bool *wait_flag)
{
if (flock->fl_flags & FL_POSIX)
cFYI(1, "Posix"); cFYI(1, "Posix");
if (pfLock->fl_flags & FL_FLOCK) if (flock->fl_flags & FL_FLOCK)
cFYI(1, "Flock"); cFYI(1, "Flock");
if (pfLock->fl_flags & FL_SLEEP) { if (flock->fl_flags & FL_SLEEP) {
cFYI(1, "Blocking lock"); cFYI(1, "Blocking lock");
wait_flag = true; *wait_flag = true;
} }
if (pfLock->fl_flags & FL_ACCESS) if (flock->fl_flags & FL_ACCESS)
cFYI(1, "Process suspended by mandatory locking - " cFYI(1, "Process suspended by mandatory locking - "
"not implemented yet"); "not implemented yet");
if (pfLock->fl_flags & FL_LEASE) if (flock->fl_flags & FL_LEASE)
cFYI(1, "Lease on file - not implemented yet"); cFYI(1, "Lease on file - not implemented yet");
if (pfLock->fl_flags & if (flock->fl_flags &
(~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE))) (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
cFYI(1, "Unknown lock flags 0x%x", pfLock->fl_flags); cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
if (pfLock->fl_type == F_WRLCK) { *type = LOCKING_ANDX_LARGE_FILES;
if (flock->fl_type == F_WRLCK) {
cFYI(1, "F_WRLCK "); cFYI(1, "F_WRLCK ");
numLock = 1; *lock = 1;
} else if (pfLock->fl_type == F_UNLCK) { } else if (flock->fl_type == F_UNLCK) {
cFYI(1, "F_UNLCK"); cFYI(1, "F_UNLCK");
numUnlock = 1; *unlock = 1;
/* Check if unlock includes more than /* Check if unlock includes more than one lock range */
one lock range */ } else if (flock->fl_type == F_RDLCK) {
} else if (pfLock->fl_type == F_RDLCK) {
cFYI(1, "F_RDLCK"); cFYI(1, "F_RDLCK");
lockType |= LOCKING_ANDX_SHARED_LOCK; *type |= LOCKING_ANDX_SHARED_LOCK;
numLock = 1; *lock = 1;
} else if (pfLock->fl_type == F_EXLCK) { } else if (flock->fl_type == F_EXLCK) {
cFYI(1, "F_EXLCK"); cFYI(1, "F_EXLCK");
numLock = 1; *lock = 1;
} else if (pfLock->fl_type == F_SHLCK) { } else if (flock->fl_type == F_SHLCK) {
cFYI(1, "F_SHLCK"); cFYI(1, "F_SHLCK");
lockType |= LOCKING_ANDX_SHARED_LOCK; *type |= LOCKING_ANDX_SHARED_LOCK;
numLock = 1; *lock = 1;
} else } else
cFYI(1, "Unknown type of lock"); cFYI(1, "Unknown type of lock");
}
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); static int
tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink); cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
netfid = ((struct cifsFileInfo *)file->private_data)->netfid; bool wait_flag, bool posix_lck, int xid)
{
int rc = 0;
__u64 length = 1 + flock->fl_end - flock->fl_start;
struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
__u16 netfid = cfile->netfid;
if ((tcon->ses->capabilities & CAP_UNIX) && if (posix_lck) {
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
posix_locking = 1;
/* BB add code here to normalize offset and length to
account for negative length which we can not accept over the
wire */
if (IS_GETLK(cmd)) {
if (posix_locking) {
int posix_lock_type; int posix_lock_type;
if (lockType & LOCKING_ANDX_SHARED_LOCK)
rc = cifs_posix_lock_test(file, flock);
if (!rc)
return rc;
if (type & LOCKING_ANDX_SHARED_LOCK)
posix_lock_type = CIFS_RDLCK; posix_lock_type = CIFS_RDLCK;
else else
posix_lock_type = CIFS_WRLCK; posix_lock_type = CIFS_WRLCK;
rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */, rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
length, pfLock, posix_lock_type, 1 /* get */, length, flock,
wait_flag); posix_lock_type, wait_flag);
FreeXid(xid);
return rc; return rc;
} }
rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid,
flock);
if (!rc)
return rc;
/* BB we could chain these into one lock request BB */ /* BB we could chain these into one lock request BB */
rc = CIFSSMBLock(xid, tcon, netfid, length, pfLock->fl_start, rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
0, 1, lockType, 0 /* wait flag */, 0); flock->fl_start, 0, 1, type, 0, 0);
if (rc == 0) { if (rc == 0) {
rc = CIFSSMBLock(xid, tcon, netfid, length, rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
pfLock->fl_start, 1 /* numUnlock */ , length, flock->fl_start, 1, 0,
0 /* numLock */ , lockType, type, 0, 0);
0 /* wait flag */, 0); flock->fl_type = F_UNLCK;
pfLock->fl_type = F_UNLCK;
if (rc != 0) if (rc != 0)
cERROR(1, "Error unlocking previously locked " cERROR(1, "Error unlocking previously locked "
"range %d during test of lock", rc); "range %d during test of lock", rc);
rc = 0; rc = 0;
return rc;
}
} else { if (type & LOCKING_ANDX_SHARED_LOCK) {
/* if rc == ERR_SHARING_VIOLATION ? */ flock->fl_type = F_WRLCK;
rc = 0; rc = 0;
return rc;
}
if (lockType & LOCKING_ANDX_SHARED_LOCK) { rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
pfLock->fl_type = F_WRLCK; flock->fl_start, 0, 1,
} else { type | LOCKING_ANDX_SHARED_LOCK, 0, 0);
rc = CIFSSMBLock(xid, tcon, netfid, length,
pfLock->fl_start, 0, 1,
lockType | LOCKING_ANDX_SHARED_LOCK,
0 /* wait flag */, 0);
if (rc == 0) { if (rc == 0) {
rc = CIFSSMBLock(xid, tcon, netfid, rc = CIFSSMBLock(xid, tcon, netfid, current->tgid,
length, pfLock->fl_start, 1, 0, length, flock->fl_start, 1, 0,
lockType | type | LOCKING_ANDX_SHARED_LOCK,
LOCKING_ANDX_SHARED_LOCK, 0, 0);
0 /* wait flag */, 0); flock->fl_type = F_RDLCK;
pfLock->fl_type = F_RDLCK;
if (rc != 0) if (rc != 0)
cERROR(1, "Error unlocking " cERROR(1, "Error unlocking previously locked "
"previously locked range %d " "range %d during test of lock", rc);
"during test of lock", rc); } else
flock->fl_type = F_WRLCK;
rc = 0; rc = 0;
return rc;
}
static void
cifs_move_llist(struct list_head *source, struct list_head *dest)
{
struct list_head *li, *tmp;
list_for_each_safe(li, tmp, source)
list_move(li, dest);
}
static void
cifs_free_llist(struct list_head *llist)
{
struct cifsLockInfo *li, *tmp;
list_for_each_entry_safe(li, tmp, llist, llist) {
cifs_del_lock_waiters(li);
list_del(&li->llist);
kfree(li);
}
}
static int
cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
{
int rc = 0, stored_rc;
int types[] = {LOCKING_ANDX_LARGE_FILES,
LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
unsigned int i;
unsigned int max_num, num;
LOCKING_ANDX_RANGE *buf, *cur;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
struct cifsLockInfo *li, *tmp;
__u64 length = 1 + flock->fl_end - flock->fl_start;
struct list_head tmp_llist;
INIT_LIST_HEAD(&tmp_llist);
max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) /
sizeof(LOCKING_ANDX_RANGE);
buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&cinode->lock_mutex);
for (i = 0; i < 2; i++) {
cur = buf;
num = 0;
list_for_each_entry_safe(li, tmp, &cinode->llist, llist) {
if (flock->fl_start > li->offset ||
(flock->fl_start + length) <
(li->offset + li->length))
continue;
if (current->tgid != li->pid)
continue;
if (cfile->netfid != li->netfid)
continue;
if (types[i] != li->type)
continue;
if (!cinode->can_cache_brlcks) {
cur->Pid = cpu_to_le16(li->pid);
cur->LengthLow = cpu_to_le32((u32)li->length);
cur->LengthHigh =
cpu_to_le32((u32)(li->length>>32));
cur->OffsetLow = cpu_to_le32((u32)li->offset);
cur->OffsetHigh =
cpu_to_le32((u32)(li->offset>>32));
/*
* We need to save a lock here to let us add
* it again to the inode list if the unlock
* range request fails on the server.
*/
list_move(&li->llist, &tmp_llist);
if (++num == max_num) {
stored_rc = cifs_lockv(xid, tcon,
cfile->netfid,
li->type, num,
0, buf);
if (stored_rc) {
/*
* We failed on the unlock range
* request - add all locks from
* the tmp list to the head of
* the inode list.
*/
cifs_move_llist(&tmp_llist,
&cinode->llist);
rc = stored_rc;
} else
/*
* The unlock range request
* succeed - free the tmp list.
*/
cifs_free_llist(&tmp_llist);
cur = buf;
num = 0;
} else
cur++;
} else { } else {
pfLock->fl_type = F_WRLCK; /*
rc = 0; * We can cache brlock requests - simply remove
* a lock from the inode list.
*/
list_del(&li->llist);
cifs_del_lock_waiters(li);
kfree(li);
} }
} }
if (num) {
stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
types[i], num, 0, buf);
if (stored_rc) {
cifs_move_llist(&tmp_llist, &cinode->llist);
rc = stored_rc;
} else
cifs_free_llist(&tmp_llist);
}
} }
FreeXid(xid); mutex_unlock(&cinode->lock_mutex);
kfree(buf);
return rc; return rc;
} }
if (!numLock && !numUnlock) { static int
/* if no lock or unlock then nothing cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
to do since we do not know what it is */ bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
FreeXid(xid); {
return -EOPNOTSUPP; int rc = 0;
} __u64 length = 1 + flock->fl_end - flock->fl_start;
struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode);
__u16 netfid = cfile->netfid;
if (posix_locking) { if (posix_lck) {
int posix_lock_type; int posix_lock_type;
if (lockType & LOCKING_ANDX_SHARED_LOCK)
rc = cifs_posix_lock_set(file, flock);
if (!rc || rc < 0)
return rc;
if (type & LOCKING_ANDX_SHARED_LOCK)
posix_lock_type = CIFS_RDLCK; posix_lock_type = CIFS_RDLCK;
else else
posix_lock_type = CIFS_WRLCK; posix_lock_type = CIFS_WRLCK;
if (numUnlock == 1) if (unlock == 1)
posix_lock_type = CIFS_UNLCK; posix_lock_type = CIFS_UNLCK;
rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */, rc = CIFSSMBPosixLock(xid, tcon, netfid, current->tgid,
length, pfLock, posix_lock_type, 0 /* set */, length, flock,
wait_flag); posix_lock_type, wait_flag);
} else { goto out;
struct cifsFileInfo *fid = file->private_data; }
if (numLock) { if (lock) {
rc = CIFSSMBLock(xid, tcon, netfid, length, rc = cifs_lock_add_if(cinode, flock->fl_start, length,
pfLock->fl_start, 0, numLock, lockType, type, netfid, wait_flag);
wait_flag, 0); if (rc < 0)
return rc;
else if (!rc)
goto out;
rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length,
flock->fl_start, 0, 1, type, wait_flag, 0);
if (rc == 0) { if (rc == 0) {
/* For Windows locks we must store them. */ /* For Windows locks we must store them. */
rc = store_file_lock(fid, length, rc = cifs_lock_add(cinode, length, flock->fl_start,
pfLock->fl_start, lockType); type, netfid);
} }
} else if (numUnlock) { } else if (unlock)
/* For each stored lock that this unlock overlaps rc = cifs_unlock_range(cfile, flock, xid);
completely, unlock it. */
int stored_rc = 0;
struct cifsLockInfo *li, *tmp;
rc = 0; out:
mutex_lock(&fid->lock_mutex); if (flock->fl_flags & FL_POSIX)
list_for_each_entry_safe(li, tmp, &fid->llist, llist) { posix_lock_file_wait(file, flock);
if (pfLock->fl_start <= li->offset && return rc;
(pfLock->fl_start + length) >= }
(li->offset + li->length)) {
stored_rc = CIFSSMBLock(xid, tcon, int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
netfid, li->length, {
li->offset, 1, 0, int rc, xid;
li->type, false, 0); int lock = 0, unlock = 0;
if (stored_rc) bool wait_flag = false;
rc = stored_rc; bool posix_lck = false;
else { struct cifs_sb_info *cifs_sb;
list_del(&li->llist); struct cifs_tcon *tcon;
kfree(li); struct cifsInodeInfo *cinode;
} struct cifsFileInfo *cfile;
} __u16 netfid;
} __u8 type;
mutex_unlock(&fid->lock_mutex);
rc = -EACCES;
xid = GetXid();
cFYI(1, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld "
"end: %lld", cmd, flock->fl_flags, flock->fl_type,
flock->fl_start, flock->fl_end);
cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
cfile = (struct cifsFileInfo *)file->private_data;
tcon = tlink_tcon(cfile->tlink);
netfid = cfile->netfid;
cinode = CIFS_I(file->f_path.dentry->d_inode);
if ((tcon->ses->capabilities & CAP_UNIX) &&
(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
posix_lck = true;
/*
* BB add code here to normalize offset and length to account for
* negative length which we can not accept over the wire.
*/
if (IS_GETLK(cmd)) {
rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
FreeXid(xid);
return rc;
} }
if (!lock && !unlock) {
/*
* if no lock or unlock then nothing to do since we do not
* know what it is
*/
FreeXid(xid);
return -EOPNOTSUPP;
} }
if (pfLock->fl_flags & FL_POSIX) rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
posix_lock_file_wait(file, pfLock); xid);
FreeXid(xid); FreeXid(xid);
return rc; return rc;
} }
...@@ -1714,6 +2195,7 @@ cifs_iovec_read(struct file *file, const struct iovec *iov, ...@@ -1714,6 +2195,7 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
struct smb_com_read_rsp *pSMBr; struct smb_com_read_rsp *pSMBr;
struct cifs_io_parms io_parms; struct cifs_io_parms io_parms;
char *read_data; char *read_data;
unsigned int rsize;
__u32 pid; __u32 pid;
if (!nr_segs) if (!nr_segs)
...@@ -1726,6 +2208,9 @@ cifs_iovec_read(struct file *file, const struct iovec *iov, ...@@ -1726,6 +2208,9 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
xid = GetXid(); xid = GetXid();
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
/* FIXME: set up handlers for larger reads and/or convert to async */
rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
open_file = file->private_data; open_file = file->private_data;
pTcon = tlink_tcon(open_file->tlink); pTcon = tlink_tcon(open_file->tlink);
...@@ -1738,7 +2223,7 @@ cifs_iovec_read(struct file *file, const struct iovec *iov, ...@@ -1738,7 +2223,7 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
cFYI(1, "attempting read on write only file instance"); cFYI(1, "attempting read on write only file instance");
for (total_read = 0; total_read < len; total_read += bytes_read) { for (total_read = 0; total_read < len; total_read += bytes_read) {
cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize); cur_len = min_t(const size_t, len - total_read, rsize);
rc = -EAGAIN; rc = -EAGAIN;
read_data = NULL; read_data = NULL;
...@@ -1830,6 +2315,7 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, ...@@ -1830,6 +2315,7 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
unsigned int bytes_read = 0; unsigned int bytes_read = 0;
unsigned int total_read; unsigned int total_read;
unsigned int current_read_size; unsigned int current_read_size;
unsigned int rsize;
struct cifs_sb_info *cifs_sb; struct cifs_sb_info *cifs_sb;
struct cifs_tcon *pTcon; struct cifs_tcon *pTcon;
int xid; int xid;
...@@ -1842,6 +2328,9 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, ...@@ -1842,6 +2328,9 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
xid = GetXid(); xid = GetXid();
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
/* FIXME: set up handlers for larger reads and/or convert to async */
rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
if (file->private_data == NULL) { if (file->private_data == NULL) {
rc = -EBADF; rc = -EBADF;
FreeXid(xid); FreeXid(xid);
...@@ -1861,14 +2350,14 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, ...@@ -1861,14 +2350,14 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size,
for (total_read = 0, current_offset = read_data; for (total_read = 0, current_offset = read_data;
read_size > total_read; read_size > total_read;
total_read += bytes_read, current_offset += bytes_read) { total_read += bytes_read, current_offset += bytes_read) {
current_read_size = min_t(const int, read_size - total_read, current_read_size = min_t(uint, read_size - total_read, rsize);
cifs_sb->rsize);
/* For windows me and 9x we do not want to request more /* For windows me and 9x we do not want to request more
than it negotiated since it will refuse the read then */ than it negotiated since it will refuse the read then */
if ((pTcon->ses) && if ((pTcon->ses) &&
!(pTcon->ses->capabilities & CAP_LARGE_FILES)) { !(pTcon->ses->capabilities & CAP_LARGE_FILES)) {
current_read_size = min_t(const int, current_read_size, current_read_size = min_t(uint, current_read_size,
pTcon->ses->server->maxBuf - 128); CIFSMaxBufSize);
} }
rc = -EAGAIN; rc = -EAGAIN;
while (rc == -EAGAIN) { while (rc == -EAGAIN) {
...@@ -1957,82 +2446,24 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma) ...@@ -1957,82 +2446,24 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
return rc; return rc;
} }
static void cifs_copy_cache_pages(struct address_space *mapping,
struct list_head *pages, int bytes_read, char *data)
{
struct page *page;
char *target;
while (bytes_read > 0) {
if (list_empty(pages))
break;
page = list_entry(pages->prev, struct page, lru);
list_del(&page->lru);
if (add_to_page_cache_lru(page, mapping, page->index,
GFP_KERNEL)) {
page_cache_release(page);
cFYI(1, "Add page cache failed");
data += PAGE_CACHE_SIZE;
bytes_read -= PAGE_CACHE_SIZE;
continue;
}
page_cache_release(page);
target = kmap_atomic(page, KM_USER0);
if (PAGE_CACHE_SIZE > bytes_read) {
memcpy(target, data, bytes_read);
/* zero the tail end of this partial page */
memset(target + bytes_read, 0,
PAGE_CACHE_SIZE - bytes_read);
bytes_read = 0;
} else {
memcpy(target, data, PAGE_CACHE_SIZE);
bytes_read -= PAGE_CACHE_SIZE;
}
kunmap_atomic(target, KM_USER0);
flush_dcache_page(page);
SetPageUptodate(page);
unlock_page(page);
data += PAGE_CACHE_SIZE;
/* add page to FS-Cache */
cifs_readpage_to_fscache(mapping->host, page);
}
return;
}
static int cifs_readpages(struct file *file, struct address_space *mapping, static int cifs_readpages(struct file *file, struct address_space *mapping,
struct list_head *page_list, unsigned num_pages) struct list_head *page_list, unsigned num_pages)
{ {
int rc = -EACCES; int rc;
int xid; struct list_head tmplist;
loff_t offset; struct cifsFileInfo *open_file = file->private_data;
struct page *page; struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
struct cifs_sb_info *cifs_sb; unsigned int rsize = cifs_sb->rsize;
struct cifs_tcon *pTcon; pid_t pid;
unsigned int bytes_read = 0;
unsigned int read_size, i;
char *smb_read_data = NULL;
struct smb_com_read_rsp *pSMBr;
struct cifsFileInfo *open_file;
struct cifs_io_parms io_parms;
int buf_type = CIFS_NO_BUFFER;
__u32 pid;
xid = GetXid(); /*
if (file->private_data == NULL) { * Give up immediately if rsize is too small to read an entire page.
rc = -EBADF; * The VFS will fall back to readpage. We should never reach this
FreeXid(xid); * point however since we set ra_pages to 0 when the rsize is smaller
return rc; * than a cache page.
} */
open_file = file->private_data; if (unlikely(rsize < PAGE_CACHE_SIZE))
cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); return 0;
pTcon = tlink_tcon(open_file->tlink);
/* /*
* Reads as many pages as possible from fscache. Returns -ENOBUFS * Reads as many pages as possible from fscache. Returns -ENOBUFS
...@@ -2041,125 +2472,127 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, ...@@ -2041,125 +2472,127 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list, rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
&num_pages); &num_pages);
if (rc == 0) if (rc == 0)
goto read_complete; return rc;
cFYI(DBG2, "rpages: num pages %d", num_pages);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
pid = open_file->pid; pid = open_file->pid;
else else
pid = current->tgid; pid = current->tgid;
for (i = 0; i < num_pages; ) { rc = 0;
unsigned contig_pages; INIT_LIST_HEAD(&tmplist);
struct page *tmp_page;
unsigned long expected_index;
if (list_empty(page_list)) cFYI(1, "%s: file=%p mapping=%p num_pages=%u", __func__, file,
break; mapping, num_pages);
/*
* Start with the page at end of list and move it to private
* list. Do the same with any following pages until we hit
* the rsize limit, hit an index discontinuity, or run out of
* pages. Issue the async read and then start the loop again
* until the list is empty.
*
* Note that list order is important. The page_list is in
* the order of declining indexes. When we put the pages in
* the rdata->pages, then we want them in increasing order.
*/
while (!list_empty(page_list)) {
unsigned int bytes = PAGE_CACHE_SIZE;
unsigned int expected_index;
unsigned int nr_pages = 1;
loff_t offset;
struct page *page, *tpage;
struct cifs_readdata *rdata;
page = list_entry(page_list->prev, struct page, lru); page = list_entry(page_list->prev, struct page, lru);
offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
/* count adjacent pages that we will read into */ /*
contig_pages = 0; * Lock the page and put it in the cache. Since no one else
expected_index = * should have access to this page, we're safe to simply set
list_entry(page_list->prev, struct page, lru)->index; * PG_locked without checking it first.
list_for_each_entry_reverse(tmp_page, page_list, lru) { */
if (tmp_page->index == expected_index) { __set_page_locked(page);
contig_pages++; rc = add_to_page_cache_locked(page, mapping,
expected_index++; page->index, GFP_KERNEL);
} else
/* give up if we can't stick it in the cache */
if (rc) {
__clear_page_locked(page);
break; break;
} }
if (contig_pages + i > num_pages)
contig_pages = num_pages - i;
/* for reads over a certain size could initiate async /* move first page to the tmplist */
read ahead */ offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
list_move_tail(&page->lru, &tmplist);
read_size = contig_pages * PAGE_CACHE_SIZE; /* now try and add more pages onto the request */
/* Read size needs to be in multiples of one page */ expected_index = page->index + 1;
read_size = min_t(const unsigned int, read_size, list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
cifs_sb->rsize & PAGE_CACHE_MASK); /* discontinuity ? */
cFYI(DBG2, "rpages: read size 0x%x contiguous pages %d", if (page->index != expected_index)
read_size, contig_pages); break;
rc = -EAGAIN;
while (rc == -EAGAIN) { /* would this page push the read over the rsize? */
if (open_file->invalidHandle) { if (bytes + PAGE_CACHE_SIZE > rsize)
rc = cifs_reopen_file(open_file, true); break;
if (rc != 0)
__set_page_locked(page);
if (add_to_page_cache_locked(page, mapping,
page->index, GFP_KERNEL)) {
__clear_page_locked(page);
break; break;
} }
io_parms.netfid = open_file->netfid; list_move_tail(&page->lru, &tmplist);
io_parms.pid = pid; bytes += PAGE_CACHE_SIZE;
io_parms.tcon = pTcon; expected_index++;
io_parms.offset = offset; nr_pages++;
io_parms.length = read_size;
rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
&smb_read_data, &buf_type);
/* BB more RC checks ? */
if (rc == -EAGAIN) {
if (smb_read_data) {
if (buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(smb_read_data);
else if (buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(smb_read_data);
smb_read_data = NULL;
}
} }
rdata = cifs_readdata_alloc(nr_pages);
if (!rdata) {
/* best to give up if we're out of mem */
list_for_each_entry_safe(page, tpage, &tmplist, lru) {
list_del(&page->lru);
lru_cache_add_file(page);
unlock_page(page);
page_cache_release(page);
} }
if ((rc < 0) || (smb_read_data == NULL)) { rc = -ENOMEM;
cFYI(1, "Read error in readpages: %d", rc);
break; break;
} else if (bytes_read > 0) { }
task_io_account_read(bytes_read);
pSMBr = (struct smb_com_read_rsp *)smb_read_data; spin_lock(&cifs_file_list_lock);
cifs_copy_cache_pages(mapping, page_list, bytes_read, cifsFileInfo_get(open_file);
smb_read_data + 4 /* RFC1001 hdr */ + spin_unlock(&cifs_file_list_lock);
le16_to_cpu(pSMBr->DataOffset)); rdata->cfile = open_file;
rdata->mapping = mapping;
i += bytes_read >> PAGE_CACHE_SHIFT; rdata->offset = offset;
cifs_stats_bytes_read(pTcon, bytes_read); rdata->bytes = bytes;
if ((bytes_read & PAGE_CACHE_MASK) != bytes_read) { rdata->pid = pid;
i++; /* account for partial page */ list_splice_init(&tmplist, &rdata->pages);
/* server copy of file can have smaller size do {
than client */ if (open_file->invalidHandle) {
/* BB do we need to verify this common case ? rc = cifs_reopen_file(open_file, true);
this case is ok - if we are at server EOF if (rc != 0)
we will hit it on next read */ continue;
}
rc = cifs_async_readv(rdata);
} while (rc == -EAGAIN);
/* break; */ if (rc != 0) {
list_for_each_entry_safe(page, tpage, &rdata->pages,
lru) {
list_del(&page->lru);
lru_cache_add_file(page);
unlock_page(page);
page_cache_release(page);
} }
} else { cifs_readdata_free(rdata);
cFYI(1, "No bytes read (%d) at offset %lld . "
"Cleaning remaining pages from readahead list",
bytes_read, offset);
/* BB turn off caching and do new lookup on
file size at server? */
break; break;
} }
if (smb_read_data) {
if (buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(smb_read_data);
else if (buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(smb_read_data);
smb_read_data = NULL;
}
bytes_read = 0;
}
/* need to free smb_read_data buf before exit */
if (smb_read_data) {
if (buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(smb_read_data);
else if (buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(smb_read_data);
smb_read_data = NULL;
} }
read_complete:
FreeXid(xid);
return rc; return rc;
} }
...@@ -2408,6 +2841,10 @@ void cifs_oplock_break(struct work_struct *work) ...@@ -2408,6 +2841,10 @@ void cifs_oplock_break(struct work_struct *work)
cFYI(1, "Oplock flush inode %p rc %d", inode, rc); cFYI(1, "Oplock flush inode %p rc %d", inode, rc);
} }
rc = cifs_push_locks(cfile);
if (rc)
cERROR(1, "Push locks rc = %d", rc);
/* /*
* releasing stale oplock after recent reconnect of smb session using * releasing stale oplock after recent reconnect of smb session using
* a now incorrect file handle is not a data integrity issue but do * a now incorrect file handle is not a data integrity issue but do
...@@ -2415,8 +2852,9 @@ void cifs_oplock_break(struct work_struct *work) ...@@ -2415,8 +2852,9 @@ void cifs_oplock_break(struct work_struct *work)
* disconnected since oplock already released by the server * disconnected since oplock already released by the server
*/ */
if (!cfile->oplock_break_cancelled) { if (!cfile->oplock_break_cancelled) {
rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid, 0, rc = CIFSSMBLock(0, tlink_tcon(cfile->tlink), cfile->netfid,
0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false, current->tgid, 0, 0, 0, 0,
LOCKING_ANDX_OPLOCK_RELEASE, false,
cinode->clientCanCacheRead ? 1 : 0); cinode->clientCanCacheRead ? 1 : 0);
cFYI(1, "Oplock release rc = %d", rc); cFYI(1, "Oplock release rc = %d", rc);
} }
......
...@@ -562,7 +562,16 @@ int cifs_get_file_info(struct file *filp) ...@@ -562,7 +562,16 @@ int cifs_get_file_info(struct file *filp)
xid = GetXid(); xid = GetXid();
rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data); rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data);
if (rc == -EOPNOTSUPP || rc == -EINVAL) { switch (rc) {
case 0:
cifs_all_info_to_fattr(&fattr, &find_data, cifs_sb, false);
break;
case -EREMOTE:
cifs_create_dfs_fattr(&fattr, inode->i_sb);
rc = 0;
break;
case -EOPNOTSUPP:
case -EINVAL:
/* /*
* FIXME: legacy server -- fall back to path-based call? * FIXME: legacy server -- fall back to path-based call?
* for now, just skip revalidating and mark inode for * for now, just skip revalidating and mark inode for
...@@ -570,18 +579,14 @@ int cifs_get_file_info(struct file *filp) ...@@ -570,18 +579,14 @@ int cifs_get_file_info(struct file *filp)
*/ */
rc = 0; rc = 0;
CIFS_I(inode)->time = 0; CIFS_I(inode)->time = 0;
default:
goto cgfi_exit; goto cgfi_exit;
} else if (rc == -EREMOTE) { }
cifs_create_dfs_fattr(&fattr, inode->i_sb);
rc = 0;
} else if (rc)
goto cgfi_exit;
/* /*
* don't bother with SFU junk here -- just mark inode as needing * don't bother with SFU junk here -- just mark inode as needing
* revalidation. * revalidation.
*/ */
cifs_all_info_to_fattr(&fattr, &find_data, cifs_sb, false);
fattr.cf_uniqueid = CIFS_I(inode)->uniqueid; fattr.cf_uniqueid = CIFS_I(inode)->uniqueid;
fattr.cf_flags |= CIFS_FATTR_NEED_REVAL; fattr.cf_flags |= CIFS_FATTR_NEED_REVAL;
cifs_fattr_to_inode(inode, &fattr); cifs_fattr_to_inode(inode, &fattr);
...@@ -2096,6 +2101,8 @@ static int ...@@ -2096,6 +2101,8 @@ static int
cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
{ {
int xid; int xid;
uid_t uid = NO_CHANGE_32;
gid_t gid = NO_CHANGE_32;
struct inode *inode = direntry->d_inode; struct inode *inode = direntry->d_inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct cifsInodeInfo *cifsInode = CIFS_I(inode); struct cifsInodeInfo *cifsInode = CIFS_I(inode);
...@@ -2146,13 +2153,25 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) ...@@ -2146,13 +2153,25 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
goto cifs_setattr_exit; goto cifs_setattr_exit;
} }
/* if (attrs->ia_valid & ATTR_UID)
* Without unix extensions we can't send ownership changes to the uid = attrs->ia_uid;
* server, so silently ignore them. This is consistent with how
* local DOS/Windows filesystems behave (VFAT, NTFS, etc). With if (attrs->ia_valid & ATTR_GID)
* CIFSACL support + proper Windows to Unix idmapping, we may be gid = attrs->ia_gid;
* able to support this in the future.
*/ #ifdef CONFIG_CIFS_ACL
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
if (uid != NO_CHANGE_32 || gid != NO_CHANGE_32) {
rc = id_mode_to_cifs_acl(inode, full_path, NO_CHANGE_64,
uid, gid);
if (rc) {
cFYI(1, "%s: Setting id failed with error: %d",
__func__, rc);
goto cifs_setattr_exit;
}
}
} else
#endif /* CONFIG_CIFS_ACL */
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)) if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID))
attrs->ia_valid &= ~(ATTR_UID | ATTR_GID); attrs->ia_valid &= ~(ATTR_UID | ATTR_GID);
...@@ -2161,15 +2180,12 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs) ...@@ -2161,15 +2180,12 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
attrs->ia_valid &= ~ATTR_MODE; attrs->ia_valid &= ~ATTR_MODE;
if (attrs->ia_valid & ATTR_MODE) { if (attrs->ia_valid & ATTR_MODE) {
cFYI(1, "Mode changed to 0%o", attrs->ia_mode);
mode = attrs->ia_mode; mode = attrs->ia_mode;
}
if (attrs->ia_valid & ATTR_MODE) {
rc = 0; rc = 0;
#ifdef CONFIG_CIFS_ACL #ifdef CONFIG_CIFS_ACL
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) { if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
rc = mode_to_cifs_acl(inode, full_path, mode); rc = id_mode_to_cifs_acl(inode, full_path, mode,
NO_CHANGE_32, NO_CHANGE_32);
if (rc) { if (rc) {
cFYI(1, "%s: Setting ACL failed with error: %d", cFYI(1, "%s: Setting ACL failed with error: %d",
__func__, rc); __func__, rc);
......
...@@ -183,14 +183,20 @@ CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str) ...@@ -183,14 +183,20 @@ CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str)
static int static int
CIFSCreateMFSymLink(const int xid, struct cifs_tcon *tcon, CIFSCreateMFSymLink(const int xid, struct cifs_tcon *tcon,
const char *fromName, const char *toName, const char *fromName, const char *toName,
const struct nls_table *nls_codepage, int remap) struct cifs_sb_info *cifs_sb)
{ {
int rc; int rc;
int oplock = 0; int oplock = 0;
int remap;
int create_options = CREATE_NOT_DIR;
__u16 netfid = 0; __u16 netfid = 0;
u8 *buf; u8 *buf;
unsigned int bytes_written = 0; unsigned int bytes_written = 0;
struct cifs_io_parms io_parms; struct cifs_io_parms io_parms;
struct nls_table *nls_codepage;
nls_codepage = cifs_sb->local_nls;
remap = cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR;
buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
if (!buf) if (!buf)
...@@ -202,8 +208,11 @@ CIFSCreateMFSymLink(const int xid, struct cifs_tcon *tcon, ...@@ -202,8 +208,11 @@ CIFSCreateMFSymLink(const int xid, struct cifs_tcon *tcon,
return rc; return rc;
} }
if (backup_cred(cifs_sb))
create_options |= CREATE_OPEN_BACKUP_INTENT;
rc = CIFSSMBOpen(xid, tcon, fromName, FILE_CREATE, GENERIC_WRITE, rc = CIFSSMBOpen(xid, tcon, fromName, FILE_CREATE, GENERIC_WRITE,
CREATE_NOT_DIR, &netfid, &oplock, NULL, create_options, &netfid, &oplock, NULL,
nls_codepage, remap); nls_codepage, remap);
if (rc != 0) { if (rc != 0) {
kfree(buf); kfree(buf);
...@@ -559,9 +568,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname) ...@@ -559,9 +568,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
/* BB what if DFS and this volume is on different share? BB */ /* BB what if DFS and this volume is on different share? BB */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
rc = CIFSCreateMFSymLink(xid, pTcon, full_path, symname, rc = CIFSCreateMFSymLink(xid, pTcon, full_path, symname,
cifs_sb->local_nls, cifs_sb);
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
else if (pTcon->unix_ext) else if (pTcon->unix_ext)
rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname, rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
cifs_sb->local_nls); cifs_sb->local_nls);
......
...@@ -420,19 +420,22 @@ check_smb_hdr(struct smb_hdr *smb, __u16 mid) ...@@ -420,19 +420,22 @@ check_smb_hdr(struct smb_hdr *smb, __u16 mid)
} }
int int
checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length) checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int total_read)
{ {
__u32 len = be32_to_cpu(smb->smb_buf_length); __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
__u32 clc_len; /* calculated length */ __u32 clc_len; /* calculated length */
cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len); cFYI(0, "checkSMB Length: 0x%x, smb_buf_length: 0x%x",
total_read, rfclen);
if (length < 2 + sizeof(struct smb_hdr)) { /* is this frame too small to even get to a BCC? */
if ((length >= sizeof(struct smb_hdr) - 1) if (total_read < 2 + sizeof(struct smb_hdr)) {
if ((total_read >= sizeof(struct smb_hdr) - 1)
&& (smb->Status.CifsError != 0)) { && (smb->Status.CifsError != 0)) {
/* it's an error return */
smb->WordCount = 0; smb->WordCount = 0;
/* some error cases do not return wct and bcc */ /* some error cases do not return wct and bcc */
return 0; return 0;
} else if ((length == sizeof(struct smb_hdr) + 1) && } else if ((total_read == sizeof(struct smb_hdr) + 1) &&
(smb->WordCount == 0)) { (smb->WordCount == 0)) {
char *tmp = (char *)smb; char *tmp = (char *)smb;
/* Need to work around a bug in two servers here */ /* Need to work around a bug in two servers here */
...@@ -452,39 +455,35 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length) ...@@ -452,39 +455,35 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
} else { } else {
cERROR(1, "Length less than smb header size"); cERROR(1, "Length less than smb header size");
} }
return 1; return -EIO;
}
if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
cERROR(1, "smb length greater than MaxBufSize, mid=%d",
smb->Mid);
return 1;
} }
/* otherwise, there is enough to get to the BCC */
if (check_smb_hdr(smb, mid)) if (check_smb_hdr(smb, mid))
return 1; return -EIO;
clc_len = smbCalcSize(smb); clc_len = smbCalcSize(smb);
if (4 + len != length) { if (4 + rfclen != total_read) {
cERROR(1, "Length read does not match RFC1001 length %d", cERROR(1, "Length read does not match RFC1001 length %d",
len); rfclen);
return 1; return -EIO;
} }
if (4 + len != clc_len) { if (4 + rfclen != clc_len) {
/* check if bcc wrapped around for large read responses */ /* check if bcc wrapped around for large read responses */
if ((len > 64 * 1024) && (len > clc_len)) { if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
/* check if lengths match mod 64K */ /* check if lengths match mod 64K */
if (((4 + len) & 0xFFFF) == (clc_len & 0xFFFF)) if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
return 0; /* bcc wrapped */ return 0; /* bcc wrapped */
} }
cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u", cFYI(1, "Calculated size %u vs length %u mismatch for mid=%u",
clc_len, 4 + len, smb->Mid); clc_len, 4 + rfclen, smb->Mid);
if (4 + len < clc_len) { if (4 + rfclen < clc_len) {
cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u", cERROR(1, "RFC1001 size %u smaller than SMB for mid=%u",
len, smb->Mid); rfclen, smb->Mid);
return 1; return -EIO;
} else if (len > clc_len + 512) { } else if (rfclen > clc_len + 512) {
/* /*
* Some servers (Windows XP in particular) send more * Some servers (Windows XP in particular) send more
* data than the lengths in the SMB packet would * data than the lengths in the SMB packet would
...@@ -495,8 +494,8 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length) ...@@ -495,8 +494,8 @@ checkSMB(struct smb_hdr *smb, __u16 mid, unsigned int length)
* data to 512 bytes. * data to 512 bytes.
*/ */
cERROR(1, "RFC1001 size %u more than 512 bytes larger " cERROR(1, "RFC1001 size %u more than 512 bytes larger "
"than SMB for mid=%u", len, smb->Mid); "than SMB for mid=%u", rfclen, smb->Mid);
return 1; return -EIO;
} }
} }
return 0; return 0;
...@@ -676,3 +675,18 @@ void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock) ...@@ -676,3 +675,18 @@ void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
cinode->clientCanCacheRead = false; cinode->clientCanCacheRead = false;
} }
} }
bool
backup_cred(struct cifs_sb_info *cifs_sb)
{
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
if (cifs_sb->mnt_backupuid == current_fsuid())
return true;
}
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
if (in_group_p(cifs_sb->mnt_backupgid))
return true;
}
return false;
}
...@@ -124,7 +124,9 @@ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB) ...@@ -124,7 +124,9 @@ static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB)
/* that we use in next few lines */ /* that we use in next few lines */
/* Note that header is initialized to zero in header_assemble */ /* Note that header is initialized to zero in header_assemble */
pSMB->req.AndXCommand = 0xFF; pSMB->req.AndXCommand = 0xFF;
pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf); pSMB->req.MaxBufferSize = cpu_to_le16(min_t(u32,
CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4,
USHRT_MAX));
pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq); pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
pSMB->req.VcNumber = get_next_vcnum(ses); pSMB->req.VcNumber = get_next_vcnum(ses);
......
...@@ -265,91 +265,6 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16) ...@@ -265,91 +265,6 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16)
return rc; return rc;
} }
#if 0 /* currently unused */
/* Does both the NT and LM owfs of a user's password */
static void
nt_lm_owf_gen(char *pwd, unsigned char nt_p16[16], unsigned char p16[16])
{
char passwd[514];
memset(passwd, '\0', 514);
if (strlen(pwd) < 513)
strcpy(passwd, pwd);
else
memcpy(passwd, pwd, 512);
/* Calculate the MD4 hash (NT compatible) of the password */
memset(nt_p16, '\0', 16);
E_md4hash(passwd, nt_p16);
/* Mangle the passwords into Lanman format */
passwd[14] = '\0';
/* strupper(passwd); */
/* Calculate the SMB (lanman) hash functions of the password */
memset(p16, '\0', 16);
E_P16((unsigned char *) passwd, (unsigned char *) p16);
/* clear out local copy of user's password (just being paranoid). */
memset(passwd, '\0', sizeof(passwd));
}
#endif
/* Does the NTLMv2 owfs of a user's password */
#if 0 /* function not needed yet - but will be soon */
static void
ntv2_owf_gen(const unsigned char owf[16], const char *user_n,
const char *domain_n, unsigned char kr_buf[16],
const struct nls_table *nls_codepage)
{
wchar_t *user_u;
wchar_t *dom_u;
int user_l, domain_l;
struct HMACMD5Context ctx;
/* might as well do one alloc to hold both (user_u and dom_u) */
user_u = kmalloc(2048 * sizeof(wchar_t), GFP_KERNEL);
if (user_u == NULL)
return;
dom_u = user_u + 1024;
/* push_ucs2(NULL, user_u, user_n, (user_l+1)*2,
STR_UNICODE|STR_NOALIGN|STR_TERMINATE|STR_UPPER);
push_ucs2(NULL, dom_u, domain_n, (domain_l+1)*2,
STR_UNICODE|STR_NOALIGN|STR_TERMINATE|STR_UPPER); */
/* BB user and domain may need to be uppercased */
user_l = cifs_strtoUCS(user_u, user_n, 511, nls_codepage);
domain_l = cifs_strtoUCS(dom_u, domain_n, 511, nls_codepage);
user_l++; /* trailing null */
domain_l++;
hmac_md5_init_limK_to_64(owf, 16, &ctx);
hmac_md5_update((const unsigned char *) user_u, user_l * 2, &ctx);
hmac_md5_update((const unsigned char *) dom_u, domain_l * 2, &ctx);
hmac_md5_final(kr_buf, &ctx);
kfree(user_u);
}
#endif
/* Does the des encryption from the FIRST 8 BYTES of the NT or LM MD4 hash. */
#if 0 /* currently unused */
static void
NTLMSSPOWFencrypt(unsigned char passwd[8],
unsigned char *ntlmchalresp, unsigned char p24[24])
{
unsigned char p21[21];
memset(p21, '\0', 21);
memcpy(p21, passwd, 8);
memset(p21 + 8, 0xbd, 8);
E_P24(p21, ntlmchalresp, p24);
}
#endif
/* Does the NT MD4 hash then des encryption. */ /* Does the NT MD4 hash then des encryption. */
int int
SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24) SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24)
...@@ -369,39 +284,3 @@ SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24) ...@@ -369,39 +284,3 @@ SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24)
rc = E_P24(p21, c8, p24); rc = E_P24(p21, c8, p24);
return rc; return rc;
} }
/* Does the md5 encryption from the NT hash for NTLMv2. */
/* These routines will be needed later */
#if 0
static void
SMBOWFencrypt_ntv2(const unsigned char kr[16],
const struct data_blob *srv_chal,
const struct data_blob *cli_chal, unsigned char resp_buf[16])
{
struct HMACMD5Context ctx;
hmac_md5_init_limK_to_64(kr, 16, &ctx);
hmac_md5_update(srv_chal->data, srv_chal->length, &ctx);
hmac_md5_update(cli_chal->data, cli_chal->length, &ctx);
hmac_md5_final(resp_buf, &ctx);
}
static void
SMBsesskeygen_ntv2(const unsigned char kr[16],
const unsigned char *nt_resp, __u8 sess_key[16])
{
struct HMACMD5Context ctx;
hmac_md5_init_limK_to_64(kr, 16, &ctx);
hmac_md5_update(nt_resp, 16, &ctx);
hmac_md5_final((unsigned char *) sess_key, &ctx);
}
static void
SMBsesskeygen_ntv1(const unsigned char kr[16],
const unsigned char *nt_resp, __u8 sess_key[16])
{
mdfour((unsigned char *) sess_key, (unsigned char *) kr, 16);
}
#endif
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/net.h> #include <linux/net.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/freezer.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <linux/mempool.h> #include <linux/mempool.h>
...@@ -324,7 +325,7 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ) ...@@ -324,7 +325,7 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
{ {
int error; int error;
error = wait_event_killable(server->response_q, error = wait_event_freezekillable(server->response_q,
midQ->midState != MID_REQUEST_SUBMITTED); midQ->midState != MID_REQUEST_SUBMITTED);
if (error < 0) if (error < 0)
return -ERESTARTSYS; return -ERESTARTSYS;
...@@ -339,8 +340,8 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ) ...@@ -339,8 +340,8 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ)
*/ */
int int
cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
unsigned int nvec, mid_callback_t *callback, void *cbdata, unsigned int nvec, mid_receive_t *receive,
bool ignore_pend) mid_callback_t *callback, void *cbdata, bool ignore_pend)
{ {
int rc; int rc;
struct mid_q_entry *mid; struct mid_q_entry *mid;
...@@ -374,6 +375,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, ...@@ -374,6 +375,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
goto out_err; goto out_err;
} }
mid->receive = receive;
mid->callback = callback; mid->callback = callback;
mid->callback_data = cbdata; mid->callback_data = cbdata;
mid->midState = MID_REQUEST_SUBMITTED; mid->midState = MID_REQUEST_SUBMITTED;
...@@ -496,13 +498,18 @@ int ...@@ -496,13 +498,18 @@ int
cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
bool log_error) bool log_error)
{ {
dump_smb(mid->resp_buf, unsigned int len = be32_to_cpu(mid->resp_buf->smb_buf_length) + 4;
min_t(u32, 92, be32_to_cpu(mid->resp_buf->smb_buf_length)));
dump_smb(mid->resp_buf, min_t(u32, 92, len));
/* convert the length into a more usable form */ /* convert the length into a more usable form */
if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) {
struct kvec iov;
iov.iov_base = mid->resp_buf;
iov.iov_len = len;
/* FIXME: add code to kill session */ /* FIXME: add code to kill session */
if (cifs_verify_signature(mid->resp_buf, server, if (cifs_verify_signature(&iov, 1, server,
mid->sequence_number + 1) != 0) mid->sequence_number + 1) != 0)
cERROR(1, "Unexpected SMB signature"); cERROR(1, "Unexpected SMB signature");
} }
......
...@@ -173,7 +173,7 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name, ...@@ -173,7 +173,7 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
#ifdef CONFIG_CIFS_ACL #ifdef CONFIG_CIFS_ACL
memcpy(pacl, ea_value, value_size); memcpy(pacl, ea_value, value_size);
rc = set_cifs_acl(pacl, value_size, rc = set_cifs_acl(pacl, value_size,
direntry->d_inode, full_path); direntry->d_inode, full_path, CIFS_ACL_DACL);
if (rc == 0) /* force revalidate of the inode */ if (rc == 0) /* force revalidate of the inode */
CIFS_I(direntry->d_inode)->time = 0; CIFS_I(direntry->d_inode)->time = 0;
kfree(pacl); kfree(pacl);
......
...@@ -135,10 +135,25 @@ static inline void set_freezable_with_signal(void) ...@@ -135,10 +135,25 @@ static inline void set_freezable_with_signal(void)
} }
/* /*
* Freezer-friendly wrappers around wait_event_interruptible() and * Freezer-friendly wrappers around wait_event_interruptible(),
* wait_event_interruptible_timeout(), originally defined in <linux/wait.h> * wait_event_killable() and wait_event_interruptible_timeout(), originally
* defined in <linux/wait.h>
*/ */
#define wait_event_freezekillable(wq, condition) \
({ \
int __retval; \
do { \
__retval = wait_event_killable(wq, \
(condition) || freezing(current)); \
if (__retval && !freezing(current)) \
break; \
else if (!(condition)) \
__retval = -ERESTARTSYS; \
} while (try_to_freeze()); \
__retval; \
})
#define wait_event_freezable(wq, condition) \ #define wait_event_freezable(wq, condition) \
({ \ ({ \
int __retval; \ int __retval; \
...@@ -190,6 +205,9 @@ static inline void set_freezable_with_signal(void) {} ...@@ -190,6 +205,9 @@ static inline void set_freezable_with_signal(void) {}
#define wait_event_freezable_timeout(wq, condition, timeout) \ #define wait_event_freezable_timeout(wq, condition, timeout) \
wait_event_interruptible_timeout(wq, condition, timeout) wait_event_interruptible_timeout(wq, condition, timeout)
#define wait_event_freezekillable(wq, condition) \
wait_event_killable(wq, condition)
#endif /* !CONFIG_FREEZER */ #endif /* !CONFIG_FREEZER */
#endif /* FREEZER_H_INCLUDED */ #endif /* FREEZER_H_INCLUDED */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment