Commit f8397191 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'gfs2-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-fixes

Pull gfs2 fixes from Steven Whitehouse:
 "This patch set contains two minor docs/spelling fixes, some fixes for
  flock, a change to use GFP_NOFS to avoid recursion on a rarely used
  code path and a fix for a race relating to the glock lru"

* tag 'gfs2-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/steve/gfs2-3.0-fixes:
  GFS2: fs/gfs2/rgrp.c: kernel-doc warning fixes
  GFS2: memcontrol: Spelling s/invlidate/invalidate/
  GFS2: Allow caching of glocks for flock
  GFS2: Allow flocks to use normal glock dq rather than dq_wait
  GFS2: replace count*size kzalloc by kcalloc
  GFS2: Use GFP_NOFS when allocating glocks
  GFS2: Fix race in glock lru glock disposal
  GFS2: Only wait for demote when last holder is dequeued
parents 55ae1bd0 27ff6a0f
...@@ -981,7 +981,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl) ...@@ -981,7 +981,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
int error = 0; int error = 0;
state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE; flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT;
mutex_lock(&fp->f_fl_mutex); mutex_lock(&fp->f_fl_mutex);
...@@ -991,7 +991,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl) ...@@ -991,7 +991,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
goto out; goto out;
flock_lock_file_wait(file, flock_lock_file_wait(file,
&(struct file_lock){.fl_type = F_UNLCK}); &(struct file_lock){.fl_type = F_UNLCK});
gfs2_glock_dq_wait(fl_gh); gfs2_glock_dq(fl_gh);
gfs2_holder_reinit(state, flags, fl_gh); gfs2_holder_reinit(state, flags, fl_gh);
} else { } else {
error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr, error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
......
...@@ -731,14 +731,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number, ...@@ -731,14 +731,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
cachep = gfs2_glock_aspace_cachep; cachep = gfs2_glock_aspace_cachep;
else else
cachep = gfs2_glock_cachep; cachep = gfs2_glock_cachep;
gl = kmem_cache_alloc(cachep, GFP_KERNEL); gl = kmem_cache_alloc(cachep, GFP_NOFS);
if (!gl) if (!gl)
return -ENOMEM; return -ENOMEM;
memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
if (glops->go_flags & GLOF_LVB) { if (glops->go_flags & GLOF_LVB) {
gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL); gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
if (!gl->gl_lksb.sb_lvbptr) { if (!gl->gl_lksb.sb_lvbptr) {
kmem_cache_free(cachep, gl); kmem_cache_free(cachep, gl);
return -ENOMEM; return -ENOMEM;
...@@ -1404,12 +1404,16 @@ __acquires(&lru_lock) ...@@ -1404,12 +1404,16 @@ __acquires(&lru_lock)
gl = list_entry(list->next, struct gfs2_glock, gl_lru); gl = list_entry(list->next, struct gfs2_glock, gl_lru);
list_del_init(&gl->gl_lru); list_del_init(&gl->gl_lru);
if (!spin_trylock(&gl->gl_spin)) { if (!spin_trylock(&gl->gl_spin)) {
add_back_to_lru:
list_add(&gl->gl_lru, &lru_list); list_add(&gl->gl_lru, &lru_list);
atomic_inc(&lru_count); atomic_inc(&lru_count);
continue; continue;
} }
if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
spin_unlock(&gl->gl_spin);
goto add_back_to_lru;
}
clear_bit(GLF_LRU, &gl->gl_flags); clear_bit(GLF_LRU, &gl->gl_flags);
spin_unlock(&lru_lock);
gl->gl_lockref.count++; gl->gl_lockref.count++;
if (demote_ok(gl)) if (demote_ok(gl))
handle_callback(gl, LM_ST_UNLOCKED, 0, false); handle_callback(gl, LM_ST_UNLOCKED, 0, false);
...@@ -1417,7 +1421,7 @@ __acquires(&lru_lock) ...@@ -1417,7 +1421,7 @@ __acquires(&lru_lock)
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gl->gl_lockref.count--; gl->gl_lockref.count--;
spin_unlock(&gl->gl_spin); spin_unlock(&gl->gl_spin);
spin_lock(&lru_lock); cond_resched_lock(&lru_lock);
} }
} }
...@@ -1442,7 +1446,7 @@ static long gfs2_scan_glock_lru(int nr) ...@@ -1442,7 +1446,7 @@ static long gfs2_scan_glock_lru(int nr)
gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
/* Test for being demotable */ /* Test for being demotable */
if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
list_move(&gl->gl_lru, &dispose); list_move(&gl->gl_lru, &dispose);
atomic_dec(&lru_count); atomic_dec(&lru_count);
freed++; freed++;
......
...@@ -234,8 +234,8 @@ static void inode_go_sync(struct gfs2_glock *gl) ...@@ -234,8 +234,8 @@ static void inode_go_sync(struct gfs2_glock *gl)
* inode_go_inval - prepare a inode glock to be released * inode_go_inval - prepare a inode glock to be released
* @gl: the glock * @gl: the glock
* @flags: * @flags:
* *
* Normally we invlidate everything, but if we are moving into * Normally we invalidate everything, but if we are moving into
* LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
* can keep hold of the metadata, since it won't have changed. * can keep hold of the metadata, since it won't have changed.
* *
......
...@@ -1036,8 +1036,8 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots, ...@@ -1036,8 +1036,8 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
new_size = old_size + RECOVER_SIZE_INC; new_size = old_size + RECOVER_SIZE_INC;
submit = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS); submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
result = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS); result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
if (!submit || !result) { if (!submit || !result) {
kfree(submit); kfree(submit);
kfree(result); kfree(result);
......
...@@ -337,7 +337,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le ...@@ -337,7 +337,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
/** /**
* gfs2_free_extlen - Return extent length of free blocks * gfs2_free_extlen - Return extent length of free blocks
* @rbm: Starting position * @rrbm: Starting position
* @len: Max length to check * @len: Max length to check
* *
* Starting at the block specified by the rbm, see how many free blocks * Starting at the block specified by the rbm, see how many free blocks
...@@ -2522,7 +2522,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state) ...@@ -2522,7 +2522,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
/** /**
* gfs2_rlist_free - free a resource group list * gfs2_rlist_free - free a resource group list
* @list: the list of resource groups * @rlist: the list of resource groups
* *
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment