Commit bd61e0a9 authored by Jeff Layton's avatar Jeff Layton Committed by Jeff Layton

locks: convert posix locks to file_lock_context

Signed-off-by: default avatarJeff Layton <jlayton@primarydata.com>
Acked-by: default avatarChristoph Hellwig <hch@lst.de>
parent 5263e31e
...@@ -253,18 +253,15 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count) ...@@ -253,18 +253,15 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
*fcntl_count = 0; *fcntl_count = 0;
*flock_count = 0; *flock_count = 0;
spin_lock(&inode->i_lock);
for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) {
if (lock->fl_flags & FL_POSIX)
++(*fcntl_count);
}
ctx = inode->i_flctx; ctx = inode->i_flctx;
if (ctx) { if (ctx) {
spin_lock(&inode->i_lock);
list_for_each_entry(lock, &ctx->flc_posix, fl_list)
++(*fcntl_count);
list_for_each_entry(lock, &ctx->flc_flock, fl_list) list_for_each_entry(lock, &ctx->flc_flock, fl_list)
++(*flock_count); ++(*flock_count);
spin_unlock(&inode->i_lock);
} }
spin_unlock(&inode->i_lock);
dout("counted %d flock locks and %d fcntl locks", dout("counted %d flock locks and %d fcntl locks",
*flock_count, *fcntl_count); *flock_count, *fcntl_count);
} }
...@@ -279,7 +276,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode, ...@@ -279,7 +276,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
int num_fcntl_locks, int num_flock_locks) int num_fcntl_locks, int num_flock_locks)
{ {
struct file_lock *lock; struct file_lock *lock;
struct file_lock_context *ctx; struct file_lock_context *ctx = inode->i_flctx;
int err = 0; int err = 0;
int seen_fcntl = 0; int seen_fcntl = 0;
int seen_flock = 0; int seen_flock = 0;
...@@ -288,34 +285,31 @@ int ceph_encode_locks_to_buffer(struct inode *inode, ...@@ -288,34 +285,31 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
dout("encoding %d flock and %d fcntl locks", num_flock_locks, dout("encoding %d flock and %d fcntl locks", num_flock_locks,
num_fcntl_locks); num_fcntl_locks);
if (!ctx)
return 0;
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
for (lock = inode->i_flock; lock != NULL; lock = lock->fl_next) { list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
if (lock->fl_flags & FL_POSIX) { ++seen_fcntl;
++seen_fcntl; if (seen_fcntl > num_fcntl_locks) {
if (seen_fcntl > num_fcntl_locks) { err = -ENOSPC;
err = -ENOSPC; goto fail;
goto fail;
}
err = lock_to_ceph_filelock(lock, &flocks[l]);
if (err)
goto fail;
++l;
} }
err = lock_to_ceph_filelock(lock, &flocks[l]);
if (err)
goto fail;
++l;
} }
list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
ctx = inode->i_flctx; ++seen_flock;
if (ctx) { if (seen_flock > num_flock_locks) {
list_for_each_entry(lock, &ctx->flc_flock, fl_list) { err = -ENOSPC;
++seen_flock; goto fail;
if (seen_flock > num_flock_locks) {
err = -ENOSPC;
goto fail;
}
err = lock_to_ceph_filelock(lock, &flocks[l]);
if (err)
goto fail;
++l;
} }
err = lock_to_ceph_filelock(lock, &flocks[l]);
if (err)
goto fail;
++l;
} }
fail: fail:
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
......
...@@ -1109,11 +1109,6 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile) ...@@ -1109,11 +1109,6 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
return rc; return rc;
} }
/* copied from fs/locks.c with a name change */
#define cifs_for_each_lock(inode, lockp) \
for (lockp = &inode->i_flock; *lockp != NULL; \
lockp = &(*lockp)->fl_next)
struct lock_to_push { struct lock_to_push {
struct list_head llist; struct list_head llist;
__u64 offset; __u64 offset;
...@@ -1128,8 +1123,9 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) ...@@ -1128,8 +1123,9 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
{ {
struct inode *inode = cfile->dentry->d_inode; struct inode *inode = cfile->dentry->d_inode;
struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
struct file_lock *flock, **before; struct file_lock *flock;
unsigned int count = 0, i = 0; struct file_lock_context *flctx = inode->i_flctx;
unsigned int count = 0, i;
int rc = 0, xid, type; int rc = 0, xid, type;
struct list_head locks_to_send, *el; struct list_head locks_to_send, *el;
struct lock_to_push *lck, *tmp; struct lock_to_push *lck, *tmp;
...@@ -1137,10 +1133,12 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) ...@@ -1137,10 +1133,12 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
xid = get_xid(); xid = get_xid();
if (!flctx)
goto out;
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
cifs_for_each_lock(inode, before) { list_for_each(el, &flctx->flc_posix) {
if ((*before)->fl_flags & FL_POSIX) count++;
count++;
} }
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
...@@ -1151,7 +1149,7 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) ...@@ -1151,7 +1149,7 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
* added to the list while we are holding cinode->lock_sem that * added to the list while we are holding cinode->lock_sem that
* protects locking operations of this inode. * protects locking operations of this inode.
*/ */
for (; i < count; i++) { for (i = 0; i < count; i++) {
lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL); lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
if (!lck) { if (!lck) {
rc = -ENOMEM; rc = -ENOMEM;
...@@ -1162,10 +1160,7 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) ...@@ -1162,10 +1160,7 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
el = locks_to_send.next; el = locks_to_send.next;
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
cifs_for_each_lock(inode, before) { list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
flock = *before;
if ((flock->fl_flags & FL_POSIX) == 0)
continue;
if (el == &locks_to_send) { if (el == &locks_to_send) {
/* /*
* The list ended. We don't have enough allocated * The list ended. We don't have enough allocated
...@@ -1185,7 +1180,6 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile) ...@@ -1185,7 +1180,6 @@ cifs_push_posix_locks(struct cifsFileInfo *cfile)
lck->length = length; lck->length = length;
lck->type = type; lck->type = type;
lck->offset = flock->fl_start; lck->offset = flock->fl_start;
el = el->next;
} }
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
......
...@@ -164,12 +164,15 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file, ...@@ -164,12 +164,15 @@ nlm_traverse_locks(struct nlm_host *host, struct nlm_file *file,
{ {
struct inode *inode = nlmsvc_file_inode(file); struct inode *inode = nlmsvc_file_inode(file);
struct file_lock *fl; struct file_lock *fl;
struct file_lock_context *flctx = inode->i_flctx;
struct nlm_host *lockhost; struct nlm_host *lockhost;
if (!flctx || list_empty_careful(&flctx->flc_posix))
return 0;
again: again:
file->f_locks = 0; file->f_locks = 0;
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
for (fl = inode->i_flock; fl; fl = fl->fl_next) { list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
if (fl->fl_lmops != &nlmsvc_lock_operations) if (fl->fl_lmops != &nlmsvc_lock_operations)
continue; continue;
...@@ -223,18 +226,21 @@ nlm_file_inuse(struct nlm_file *file) ...@@ -223,18 +226,21 @@ nlm_file_inuse(struct nlm_file *file)
{ {
struct inode *inode = nlmsvc_file_inode(file); struct inode *inode = nlmsvc_file_inode(file);
struct file_lock *fl; struct file_lock *fl;
struct file_lock_context *flctx = inode->i_flctx;
if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares) if (file->f_count || !list_empty(&file->f_blocks) || file->f_shares)
return 1; return 1;
spin_lock(&inode->i_lock); if (flctx && !list_empty_careful(&flctx->flc_posix)) {
for (fl = inode->i_flock; fl; fl = fl->fl_next) { spin_lock(&inode->i_lock);
if (fl->fl_lmops == &nlmsvc_lock_operations) { list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
spin_unlock(&inode->i_lock); if (fl->fl_lmops == &nlmsvc_lock_operations) {
return 1; spin_unlock(&inode->i_lock);
return 1;
}
} }
spin_unlock(&inode->i_lock);
} }
spin_unlock(&inode->i_lock);
file->f_locks = 0; file->f_locks = 0;
return 0; return 0;
} }
......
...@@ -157,9 +157,6 @@ static int target_leasetype(struct file_lock *fl) ...@@ -157,9 +157,6 @@ static int target_leasetype(struct file_lock *fl)
int leases_enable = 1; int leases_enable = 1;
int lease_break_time = 45; int lease_break_time = 45;
#define for_each_lock(inode, lockp) \
for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next)
/* /*
* The global file_lock_list is only used for displaying /proc/locks, so we * The global file_lock_list is only used for displaying /proc/locks, so we
* keep a list on each CPU, with each list protected by its own spinlock via * keep a list on each CPU, with each list protected by its own spinlock via
...@@ -218,6 +215,7 @@ locks_get_lock_context(struct inode *inode) ...@@ -218,6 +215,7 @@ locks_get_lock_context(struct inode *inode)
goto out; goto out;
INIT_LIST_HEAD(&new->flc_flock); INIT_LIST_HEAD(&new->flc_flock);
INIT_LIST_HEAD(&new->flc_posix);
/* /*
* Assign the pointer if it's not already assigned. If it is, then * Assign the pointer if it's not already assigned. If it is, then
...@@ -241,6 +239,7 @@ locks_free_lock_context(struct file_lock_context *ctx) ...@@ -241,6 +239,7 @@ locks_free_lock_context(struct file_lock_context *ctx)
{ {
if (ctx) { if (ctx) {
WARN_ON_ONCE(!list_empty(&ctx->flc_flock)); WARN_ON_ONCE(!list_empty(&ctx->flc_flock));
WARN_ON_ONCE(!list_empty(&ctx->flc_posix));
kmem_cache_free(flctx_cache, ctx); kmem_cache_free(flctx_cache, ctx);
} }
} }
...@@ -809,21 +808,26 @@ void ...@@ -809,21 +808,26 @@ void
posix_test_lock(struct file *filp, struct file_lock *fl) posix_test_lock(struct file *filp, struct file_lock *fl)
{ {
struct file_lock *cfl; struct file_lock *cfl;
struct file_lock_context *ctx;
struct inode *inode = file_inode(filp); struct inode *inode = file_inode(filp);
ctx = inode->i_flctx;
if (!ctx || list_empty_careful(&ctx->flc_posix)) {
fl->fl_type = F_UNLCK;
return;
}
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
for (cfl = file_inode(filp)->i_flock; cfl; cfl = cfl->fl_next) { list_for_each_entry(cfl, &ctx->flc_posix, fl_list) {
if (!IS_POSIX(cfl)) if (posix_locks_conflict(fl, cfl)) {
continue; locks_copy_conflock(fl, cfl);
if (posix_locks_conflict(fl, cfl)) if (cfl->fl_nspid)
break; fl->fl_pid = pid_vnr(cfl->fl_nspid);
goto out;
}
} }
if (cfl) { fl->fl_type = F_UNLCK;
locks_copy_conflock(fl, cfl); out:
if (cfl->fl_nspid)
fl->fl_pid = pid_vnr(cfl->fl_nspid);
} else
fl->fl_type = F_UNLCK;
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
return; return;
} }
...@@ -983,16 +987,20 @@ static int flock_lock_file(struct file *filp, struct file_lock *request) ...@@ -983,16 +987,20 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock) static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
{ {
struct file_lock *fl; struct file_lock *fl, *tmp;
struct file_lock *new_fl = NULL; struct file_lock *new_fl = NULL;
struct file_lock *new_fl2 = NULL; struct file_lock *new_fl2 = NULL;
struct file_lock *left = NULL; struct file_lock *left = NULL;
struct file_lock *right = NULL; struct file_lock *right = NULL;
struct file_lock **before; struct file_lock_context *ctx;
int error; int error;
bool added = false; bool added = false;
LIST_HEAD(dispose); LIST_HEAD(dispose);
ctx = locks_get_lock_context(inode);
if (!ctx)
return -ENOMEM;
/* /*
* We may need two file_lock structures for this operation, * We may need two file_lock structures for this operation,
* so we get them in advance to avoid races. * so we get them in advance to avoid races.
...@@ -1013,8 +1021,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str ...@@ -1013,8 +1021,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
* blocker's list of waiters and the global blocked_hash. * blocker's list of waiters and the global blocked_hash.
*/ */
if (request->fl_type != F_UNLCK) { if (request->fl_type != F_UNLCK) {
for_each_lock(inode, before) { list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
fl = *before;
if (!IS_POSIX(fl)) if (!IS_POSIX(fl))
continue; continue;
if (!posix_locks_conflict(request, fl)) if (!posix_locks_conflict(request, fl))
...@@ -1044,29 +1051,25 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str ...@@ -1044,29 +1051,25 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
if (request->fl_flags & FL_ACCESS) if (request->fl_flags & FL_ACCESS)
goto out; goto out;
/* /* Find the first old lock with the same owner as the new lock */
* Find the first old lock with the same owner as the new lock. list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
*/ if (posix_same_owner(request, fl))
break;
before = &inode->i_flock;
/* First skip locks owned by other processes. */
while ((fl = *before) && (!IS_POSIX(fl) ||
!posix_same_owner(request, fl))) {
before = &fl->fl_next;
} }
/* Process locks with this owner. */ /* Process locks with this owner. */
while ((fl = *before) && posix_same_owner(request, fl)) { list_for_each_entry_safe_from(fl, tmp, &ctx->flc_posix, fl_list) {
/* Detect adjacent or overlapping regions (if same lock type) if (!posix_same_owner(request, fl))
*/ break;
/* Detect adjacent or overlapping regions (if same lock type) */
if (request->fl_type == fl->fl_type) { if (request->fl_type == fl->fl_type) {
/* In all comparisons of start vs end, use /* In all comparisons of start vs end, use
* "start - 1" rather than "end + 1". If end * "start - 1" rather than "end + 1". If end
* is OFFSET_MAX, end + 1 will become negative. * is OFFSET_MAX, end + 1 will become negative.
*/ */
if (fl->fl_end < request->fl_start - 1) if (fl->fl_end < request->fl_start - 1)
goto next_lock; continue;
/* If the next lock in the list has entirely bigger /* If the next lock in the list has entirely bigger
* addresses than the new one, insert the lock here. * addresses than the new one, insert the lock here.
*/ */
...@@ -1087,18 +1090,17 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str ...@@ -1087,18 +1090,17 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
else else
request->fl_end = fl->fl_end; request->fl_end = fl->fl_end;
if (added) { if (added) {
locks_delete_lock(before, &dispose); locks_delete_lock_ctx(fl, &dispose);
continue; continue;
} }
request = fl; request = fl;
added = true; added = true;
} } else {
else {
/* Processing for different lock types is a bit /* Processing for different lock types is a bit
* more complex. * more complex.
*/ */
if (fl->fl_end < request->fl_start) if (fl->fl_end < request->fl_start)
goto next_lock; continue;
if (fl->fl_start > request->fl_end) if (fl->fl_start > request->fl_end)
break; break;
if (request->fl_type == F_UNLCK) if (request->fl_type == F_UNLCK)
...@@ -1117,7 +1119,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str ...@@ -1117,7 +1119,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
* one (This may happen several times). * one (This may happen several times).
*/ */
if (added) { if (added) {
locks_delete_lock(before, &dispose); locks_delete_lock_ctx(fl, &dispose);
continue; continue;
} }
/* /*
...@@ -1133,15 +1135,11 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str ...@@ -1133,15 +1135,11 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
locks_copy_lock(new_fl, request); locks_copy_lock(new_fl, request);
request = new_fl; request = new_fl;
new_fl = NULL; new_fl = NULL;
locks_delete_lock(before, &dispose); locks_insert_lock_ctx(request, &fl->fl_list);
locks_insert_lock(before, request); locks_delete_lock_ctx(fl, &dispose);
added = true; added = true;
} }
} }
/* Go on to next lock.
*/
next_lock:
before = &fl->fl_next;
} }
/* /*
...@@ -1166,7 +1164,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str ...@@ -1166,7 +1164,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
goto out; goto out;
} }
locks_copy_lock(new_fl, request); locks_copy_lock(new_fl, request);
locks_insert_lock(before, new_fl); locks_insert_lock_ctx(new_fl, &fl->fl_list);
new_fl = NULL; new_fl = NULL;
} }
if (right) { if (right) {
...@@ -1177,7 +1175,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str ...@@ -1177,7 +1175,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request, str
left = new_fl2; left = new_fl2;
new_fl2 = NULL; new_fl2 = NULL;
locks_copy_lock(left, right); locks_copy_lock(left, right);
locks_insert_lock(before, left); locks_insert_lock_ctx(left, &fl->fl_list);
} }
right->fl_start = request->fl_end + 1; right->fl_start = request->fl_end + 1;
locks_wake_up_blocks(right); locks_wake_up_blocks(right);
...@@ -1257,22 +1255,29 @@ EXPORT_SYMBOL(posix_lock_file_wait); ...@@ -1257,22 +1255,29 @@ EXPORT_SYMBOL(posix_lock_file_wait);
*/ */
int locks_mandatory_locked(struct file *file) int locks_mandatory_locked(struct file *file)
{ {
int ret;
struct inode *inode = file_inode(file); struct inode *inode = file_inode(file);
struct file_lock_context *ctx;
struct file_lock *fl; struct file_lock *fl;
ctx = inode->i_flctx;
if (!ctx || list_empty_careful(&ctx->flc_posix))
return 0;
/* /*
* Search the lock list for this inode for any POSIX locks. * Search the lock list for this inode for any POSIX locks.
*/ */
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { ret = 0;
if (!IS_POSIX(fl)) list_for_each_entry(fl, &ctx->flc_posix, fl_list) {
continue;
if (fl->fl_owner != current->files && if (fl->fl_owner != current->files &&
fl->fl_owner != file) fl->fl_owner != file) {
ret = -EAGAIN;
break; break;
}
} }
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
return fl ? -EAGAIN : 0; return ret;
} }
/** /**
...@@ -2389,13 +2394,14 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, ...@@ -2389,13 +2394,14 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd,
void locks_remove_posix(struct file *filp, fl_owner_t owner) void locks_remove_posix(struct file *filp, fl_owner_t owner)
{ {
struct file_lock lock; struct file_lock lock;
struct file_lock_context *ctx = file_inode(filp)->i_flctx;
/* /*
* If there are no locks held on this file, we don't need to call * If there are no locks held on this file, we don't need to call
* posix_lock_file(). Another process could be setting a lock on this * posix_lock_file(). Another process could be setting a lock on this
* file at the same time, but we wouldn't remove that lock anyway. * file at the same time, but we wouldn't remove that lock anyway.
*/ */
if (!file_inode(filp)->i_flock) if (!ctx || list_empty(&ctx->flc_posix))
return; return;
lock.fl_type = F_UNLCK; lock.fl_type = F_UNLCK;
......
...@@ -85,17 +85,17 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_ ...@@ -85,17 +85,17 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_
{ {
struct inode *inode = state->inode; struct inode *inode = state->inode;
struct file_lock *fl; struct file_lock *fl;
struct file_lock_context *flctx; struct file_lock_context *flctx = inode->i_flctx;
struct list_head *list;
int status = 0; int status = 0;
if (inode->i_flock == NULL && inode->i_flctx == NULL) if (flctx == NULL)
goto out; goto out;
/* Protect inode->i_flock using the i_lock */ list = &flctx->flc_posix;
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { restart:
if (!(fl->fl_flags & (FL_POSIX))) list_for_each_entry(fl, list, fl_list) {
continue;
if (nfs_file_open_context(fl->fl_file) != ctx) if (nfs_file_open_context(fl->fl_file) != ctx)
continue; continue;
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
...@@ -104,19 +104,9 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_ ...@@ -104,19 +104,9 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_
goto out; goto out;
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
} }
if (list == &flctx->flc_posix) {
flctx = inode->i_flctx; list = &flctx->flc_flock;
if (flctx) { goto restart;
list_for_each_entry(fl, &flctx->flc_flock, fl_list) {
if (nfs_file_open_context(fl->fl_file) != ctx)
continue;
spin_unlock(&inode->i_lock);
status = nfs4_lock_delegation_recall(fl, state,
stateid);
if (status < 0)
goto out;
spin_lock(&inode->i_lock);
}
} }
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
out: out:
......
...@@ -1367,53 +1367,18 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_ ...@@ -1367,53 +1367,18 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
struct file_lock *fl; struct file_lock *fl;
int status = 0; int status = 0;
struct file_lock_context *flctx = inode->i_flctx; struct file_lock_context *flctx = inode->i_flctx;
struct list_head *list;
if (inode->i_flock == NULL && flctx == NULL) if (flctx == NULL)
return 0; return 0;
list = &flctx->flc_posix;
/* Guard against delegation returns and new lock/unlock calls */ /* Guard against delegation returns and new lock/unlock calls */
down_write(&nfsi->rwsem); down_write(&nfsi->rwsem);
/* Protect inode->i_flock using the BKL */
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { restart:
if (!(fl->fl_flags & FL_POSIX)) list_for_each_entry(fl, list, fl_list) {
continue;
if (nfs_file_open_context(fl->fl_file)->state != state)
continue;
spin_unlock(&inode->i_lock);
status = ops->recover_lock(state, fl);
switch (status) {
case 0:
break;
case -ESTALE:
case -NFS4ERR_ADMIN_REVOKED:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_BAD_STATEID:
case -NFS4ERR_EXPIRED:
case -NFS4ERR_NO_GRACE:
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_BADSESSION:
case -NFS4ERR_BADSLOT:
case -NFS4ERR_BAD_HIGH_SLOT:
case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
goto out;
default:
printk(KERN_ERR "NFS: %s: unhandled error %d\n",
__func__, status);
case -ENOMEM:
case -NFS4ERR_DENIED:
case -NFS4ERR_RECLAIM_BAD:
case -NFS4ERR_RECLAIM_CONFLICT:
/* kill_proc(fl->fl_pid, SIGLOST, 1); */
status = 0;
}
spin_lock(&inode->i_lock);
}
if (!flctx)
goto out_unlock;
list_for_each_entry(fl, &flctx->flc_flock, fl_list) {
if (nfs_file_open_context(fl->fl_file)->state != state) if (nfs_file_open_context(fl->fl_file)->state != state)
continue; continue;
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
...@@ -1445,7 +1410,10 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_ ...@@ -1445,7 +1410,10 @@ static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_
} }
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
} }
out_unlock: if (list == &flctx->flc_posix) {
list = &flctx->flc_flock;
goto restart;
}
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
out: out:
up_write(&nfsi->rwsem); up_write(&nfsi->rwsem);
......
...@@ -831,12 +831,10 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev, ...@@ -831,12 +831,10 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
if (prev) { if (prev) {
if (!nfs_match_open_context(req->wb_context, prev->wb_context)) if (!nfs_match_open_context(req->wb_context, prev->wb_context))
return false; return false;
if (req->wb_context->dentry->d_inode->i_flock != NULL &&
!nfs_match_lock_context(req->wb_lock_context,
prev->wb_lock_context))
return false;
flctx = req->wb_context->dentry->d_inode->i_flctx; flctx = req->wb_context->dentry->d_inode->i_flctx;
if (flctx != NULL && !list_empty_careful(&flctx->flc_flock) && if (flctx != NULL &&
!(list_empty_careful(&flctx->flc_posix) &&
list_empty_careful(&flctx->flc_flock)) &&
!nfs_match_lock_context(req->wb_lock_context, !nfs_match_lock_context(req->wb_lock_context,
prev->wb_lock_context)) prev->wb_lock_context))
return false; return false;
......
...@@ -1091,6 +1091,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page) ...@@ -1091,6 +1091,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
{ {
struct nfs_open_context *ctx = nfs_file_open_context(file); struct nfs_open_context *ctx = nfs_file_open_context(file);
struct nfs_lock_context *l_ctx; struct nfs_lock_context *l_ctx;
struct file_lock_context *flctx = file_inode(file)->i_flctx;
struct nfs_page *req; struct nfs_page *req;
int do_flush, status; int do_flush, status;
/* /*
...@@ -1109,12 +1110,9 @@ int nfs_flush_incompatible(struct file *file, struct page *page) ...@@ -1109,12 +1110,9 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
do_flush = req->wb_page != page || req->wb_context != ctx; do_flush = req->wb_page != page || req->wb_context != ctx;
/* for now, flush if more than 1 request in page_group */ /* for now, flush if more than 1 request in page_group */
do_flush |= req->wb_this_page != req; do_flush |= req->wb_this_page != req;
if (l_ctx && ctx->dentry->d_inode->i_flock != NULL) { if (l_ctx && flctx &&
do_flush |= l_ctx->lockowner.l_owner != current->files !(list_empty_careful(&flctx->flc_posix) &&
|| l_ctx->lockowner.l_pid != current->tgid; list_empty_careful(&flctx->flc_flock))) {
}
if (l_ctx && ctx->dentry->d_inode->i_flctx &&
!list_empty_careful(&ctx->dentry->d_inode->i_flctx->flc_flock)) {
do_flush |= l_ctx->lockowner.l_owner != current->files do_flush |= l_ctx->lockowner.l_owner != current->files
|| l_ctx->lockowner.l_pid != current->tgid; || l_ctx->lockowner.l_pid != current->tgid;
} }
...@@ -1202,26 +1200,24 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino ...@@ -1202,26 +1200,24 @@ static int nfs_can_extend_write(struct file *file, struct page *page, struct ino
return 0; return 0;
if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE))
return 1; return 1;
if (!inode->i_flock && !flctx) if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
list_empty_careful(&flctx->flc_posix)))
return 0; return 0;
/* Check to see if there are whole file write locks */ /* Check to see if there are whole file write locks */
spin_lock(&inode->i_lock);
ret = 0; ret = 0;
spin_lock(&inode->i_lock);
fl = inode->i_flock; if (!list_empty(&flctx->flc_posix)) {
if (fl && is_whole_file_wrlock(fl)) { fl = list_first_entry(&flctx->flc_posix, struct file_lock,
ret = 1; fl_list);
goto out; if (is_whole_file_wrlock(fl))
} ret = 1;
} else if (!list_empty(&flctx->flc_flock)) {
if (!list_empty(&flctx->flc_flock)) {
fl = list_first_entry(&flctx->flc_flock, struct file_lock, fl = list_first_entry(&flctx->flc_flock, struct file_lock,
fl_list); fl_list);
if (fl->fl_type == F_WRLCK) if (fl->fl_type == F_WRLCK)
ret = 1; ret = 1;
} }
out:
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
return ret; return ret;
} }
......
...@@ -5556,10 +5556,11 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, ...@@ -5556,10 +5556,11 @@ nfsd4_locku(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
static bool static bool
check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
{ {
struct file_lock **flpp; struct file_lock *fl;
int status = false; int status = false;
struct file *filp = find_any_file(fp); struct file *filp = find_any_file(fp);
struct inode *inode; struct inode *inode;
struct file_lock_context *flctx;
if (!filp) { if (!filp) {
/* Any valid lock stateid should have some sort of access */ /* Any valid lock stateid should have some sort of access */
...@@ -5568,15 +5569,18 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner) ...@@ -5568,15 +5569,18 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
} }
inode = file_inode(filp); inode = file_inode(filp);
flctx = inode->i_flctx;
spin_lock(&inode->i_lock); if (flctx && !list_empty_careful(&flctx->flc_posix)) {
for (flpp = &inode->i_flock; *flpp != NULL; flpp = &(*flpp)->fl_next) { spin_lock(&inode->i_lock);
if ((*flpp)->fl_owner == (fl_owner_t)lowner) { list_for_each_entry(fl, &flctx->flc_posix, fl_list) {
status = true; if (fl->fl_owner == (fl_owner_t)lowner) {
break; status = true;
break;
}
} }
spin_unlock(&inode->i_lock);
} }
spin_unlock(&inode->i_lock);
fput(filp); fput(filp);
return status; return status;
} }
......
...@@ -358,7 +358,7 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t ...@@ -358,7 +358,7 @@ int rw_verify_area(int read_write, struct file *file, const loff_t *ppos, size_t
return retval; return retval;
} }
if (unlikely(inode->i_flock && mandatory_lock(inode))) { if (unlikely(inode->i_flctx && mandatory_lock(inode))) {
retval = locks_mandatory_area( retval = locks_mandatory_area(
read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE, read_write == READ ? FLOCK_VERIFY_READ : FLOCK_VERIFY_WRITE,
inode, file, pos, count); inode, file, pos, count);
......
...@@ -968,6 +968,7 @@ struct file_lock { ...@@ -968,6 +968,7 @@ struct file_lock {
struct file_lock_context { struct file_lock_context {
struct list_head flc_flock; struct list_head flc_flock;
struct list_head flc_posix;
}; };
/* The following constant reflects the upper bound of the file/locking space */ /* The following constant reflects the upper bound of the file/locking space */
...@@ -1971,7 +1972,7 @@ static inline int locks_verify_truncate(struct inode *inode, ...@@ -1971,7 +1972,7 @@ static inline int locks_verify_truncate(struct inode *inode,
struct file *filp, struct file *filp,
loff_t size) loff_t size)
{ {
if (inode->i_flock && mandatory_lock(inode)) if (inode->i_flctx && mandatory_lock(inode))
return locks_mandatory_area( return locks_mandatory_area(
FLOCK_VERIFY_WRITE, inode, filp, FLOCK_VERIFY_WRITE, inode, filp,
size < inode->i_size ? size : inode->i_size, size < inode->i_size ? size : inode->i_size,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment