Commit bc015cb8 authored by Steven Whitehouse's avatar Steven Whitehouse

GFS2: Use RCU for glock hash table

This has a number of advantages:

 - Reduces contention on the hash table lock
 - Makes the code smaller and simpler
 - Should speed up glock dumps when under load
 - Removes ref count changing in examine_bucket
 - No longer need hash chain lock in glock_put() in common case

There are some further changes which this enables and which
we may do in the future. One is to look at using SLAB_RCU,
and another is to look at using a per-cpu counter for the
per-sb glock counter, since that is touched twice in the
lifetime of each glock (but only used at umount time).
Signed-off-by: default avatarSteven Whitehouse <swhiteho@redhat.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 2b1caf6e
This diff is collapsed.
...@@ -118,7 +118,7 @@ struct lm_lockops { ...@@ -118,7 +118,7 @@ struct lm_lockops {
int (*lm_mount) (struct gfs2_sbd *sdp, const char *fsname); int (*lm_mount) (struct gfs2_sbd *sdp, const char *fsname);
void (*lm_unmount) (struct gfs2_sbd *sdp); void (*lm_unmount) (struct gfs2_sbd *sdp);
void (*lm_withdraw) (struct gfs2_sbd *sdp); void (*lm_withdraw) (struct gfs2_sbd *sdp);
void (*lm_put_lock) (struct kmem_cache *cachep, struct gfs2_glock *gl); void (*lm_put_lock) (struct gfs2_glock *gl);
int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state, int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state,
unsigned int flags); unsigned int flags);
void (*lm_cancel) (struct gfs2_glock *gl); void (*lm_cancel) (struct gfs2_glock *gl);
...@@ -174,7 +174,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, ...@@ -174,7 +174,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp,
int create, struct gfs2_glock **glp); int create, struct gfs2_glock **glp);
void gfs2_glock_hold(struct gfs2_glock *gl); void gfs2_glock_hold(struct gfs2_glock *gl);
void gfs2_glock_put_nolock(struct gfs2_glock *gl); void gfs2_glock_put_nolock(struct gfs2_glock *gl);
int gfs2_glock_put(struct gfs2_glock *gl); void gfs2_glock_put(struct gfs2_glock *gl);
void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags, void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
struct gfs2_holder *gh); struct gfs2_holder *gh);
void gfs2_holder_reinit(unsigned int state, unsigned flags, void gfs2_holder_reinit(unsigned int state, unsigned flags,
...@@ -223,25 +223,22 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl, ...@@ -223,25 +223,22 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
return error; return error;
} }
/* Lock Value Block functions */ extern void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
extern void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
int gfs2_lvb_hold(struct gfs2_glock *gl); extern void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
void gfs2_lvb_unhold(struct gfs2_glock *gl); extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state); extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
void gfs2_glock_complete(struct gfs2_glock *gl, int ret); extern void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl);
void gfs2_reclaim_glock(struct gfs2_sbd *sdp); extern void gfs2_glock_free(struct rcu_head *rcu);
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
void gfs2_glock_finish_truncate(struct gfs2_inode *ip); extern int __init gfs2_glock_init(void);
void gfs2_glock_thaw(struct gfs2_sbd *sdp); extern void gfs2_glock_exit(void);
int __init gfs2_glock_init(void); extern int gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
void gfs2_glock_exit(void); extern void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
extern int gfs2_register_debugfs(void);
int gfs2_create_debugfs_file(struct gfs2_sbd *sdp); extern void gfs2_unregister_debugfs(void);
void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
int gfs2_register_debugfs(void);
void gfs2_unregister_debugfs(void);
extern const struct lm_lockops gfs2_dlm_ops; extern const struct lm_lockops gfs2_dlm_ops;
......
...@@ -206,8 +206,17 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags) ...@@ -206,8 +206,17 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
static int inode_go_demote_ok(const struct gfs2_glock *gl) static int inode_go_demote_ok(const struct gfs2_glock *gl)
{ {
struct gfs2_sbd *sdp = gl->gl_sbd; struct gfs2_sbd *sdp = gl->gl_sbd;
struct gfs2_holder *gh;
if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object) if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
return 0; return 0;
if (!list_empty(&gl->gl_holders)) {
gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
if (gh->gh_list.next != &gl->gl_holders)
return 0;
}
return 1; return 1;
} }
...@@ -271,19 +280,6 @@ static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl) ...@@ -271,19 +280,6 @@ static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
return 0; return 0;
} }
/**
* rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
* @gl: the glock
*
* Returns: 1 if it's ok
*/
static int rgrp_go_demote_ok(const struct gfs2_glock *gl)
{
const struct address_space *mapping = (const struct address_space *)(gl + 1);
return !mapping->nrpages;
}
/** /**
* rgrp_go_lock - operation done after an rgrp lock is locked by * rgrp_go_lock - operation done after an rgrp lock is locked by
* a first holder on this node. * a first holder on this node.
...@@ -410,7 +406,6 @@ const struct gfs2_glock_operations gfs2_inode_glops = { ...@@ -410,7 +406,6 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
const struct gfs2_glock_operations gfs2_rgrp_glops = { const struct gfs2_glock_operations gfs2_rgrp_glops = {
.go_xmote_th = rgrp_go_sync, .go_xmote_th = rgrp_go_sync,
.go_inval = rgrp_go_inval, .go_inval = rgrp_go_inval,
.go_demote_ok = rgrp_go_demote_ok,
.go_lock = rgrp_go_lock, .go_lock = rgrp_go_lock,
.go_unlock = rgrp_go_unlock, .go_unlock = rgrp_go_unlock,
.go_dump = gfs2_rgrp_dump, .go_dump = gfs2_rgrp_dump,
......
...@@ -15,6 +15,8 @@ ...@@ -15,6 +15,8 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/dlm.h> #include <linux/dlm.h>
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
#define DIO_WAIT 0x00000010 #define DIO_WAIT 0x00000010
#define DIO_METADATA 0x00000020 #define DIO_METADATA 0x00000020
...@@ -201,7 +203,7 @@ enum { ...@@ -201,7 +203,7 @@ enum {
}; };
struct gfs2_glock { struct gfs2_glock {
struct hlist_node gl_list; struct hlist_bl_node gl_list;
unsigned long gl_flags; /* GLF_... */ unsigned long gl_flags; /* GLF_... */
struct lm_lockname gl_name; struct lm_lockname gl_name;
atomic_t gl_ref; atomic_t gl_ref;
...@@ -234,6 +236,7 @@ struct gfs2_glock { ...@@ -234,6 +236,7 @@ struct gfs2_glock {
atomic_t gl_ail_count; atomic_t gl_ail_count;
struct delayed_work gl_work; struct delayed_work gl_work;
struct work_struct gl_delete; struct work_struct gl_delete;
struct rcu_head gl_rcu;
}; };
#define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */ #define GFS2_MIN_LVB_SIZE 32 /* Min size of LVB that gfs2 supports */
......
...@@ -22,7 +22,6 @@ static void gdlm_ast(void *arg) ...@@ -22,7 +22,6 @@ static void gdlm_ast(void *arg)
{ {
struct gfs2_glock *gl = arg; struct gfs2_glock *gl = arg;
unsigned ret = gl->gl_state; unsigned ret = gl->gl_state;
struct gfs2_sbd *sdp = gl->gl_sbd;
BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED); BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
...@@ -31,12 +30,7 @@ static void gdlm_ast(void *arg) ...@@ -31,12 +30,7 @@ static void gdlm_ast(void *arg)
switch (gl->gl_lksb.sb_status) { switch (gl->gl_lksb.sb_status) {
case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */ case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
if (gl->gl_ops->go_flags & GLOF_ASPACE) call_rcu(&gl->gl_rcu, gfs2_glock_free);
kmem_cache_free(gfs2_glock_aspace_cachep, gl);
else
kmem_cache_free(gfs2_glock_cachep, gl);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait);
return; return;
case -DLM_ECANCEL: /* Cancel while getting lock */ case -DLM_ECANCEL: /* Cancel while getting lock */
ret |= LM_OUT_CANCELED; ret |= LM_OUT_CANCELED;
...@@ -164,16 +158,14 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, ...@@ -164,16 +158,14 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast); GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
} }
static void gdlm_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl) static void gdlm_put_lock(struct gfs2_glock *gl)
{ {
struct gfs2_sbd *sdp = gl->gl_sbd; struct gfs2_sbd *sdp = gl->gl_sbd;
struct lm_lockstruct *ls = &sdp->sd_lockstruct; struct lm_lockstruct *ls = &sdp->sd_lockstruct;
int error; int error;
if (gl->gl_lksb.sb_lkid == 0) { if (gl->gl_lksb.sb_lkid == 0) {
kmem_cache_free(cachep, gl); call_rcu(&gl->gl_rcu, gfs2_glock_free);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait);
return; return;
} }
......
...@@ -91,7 +91,8 @@ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh, ...@@ -91,7 +91,8 @@ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
} }
bd->bd_ail = ai; bd->bd_ail = ai;
list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list); list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); if (test_and_clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags))
gfs2_glock_schedule_for_reclaim(bd->bd_gl);
trace_gfs2_pin(bd, 0); trace_gfs2_pin(bd, 0);
gfs2_log_unlock(sdp); gfs2_log_unlock(sdp);
unlock_buffer(bh); unlock_buffer(bh);
......
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/gfs2_ondisk.h> #include <linux/gfs2_ondisk.h>
#include <linux/rcupdate.h>
#include <linux/rculist_bl.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include "gfs2.h" #include "gfs2.h"
...@@ -45,7 +47,7 @@ static void gfs2_init_glock_once(void *foo) ...@@ -45,7 +47,7 @@ static void gfs2_init_glock_once(void *foo)
{ {
struct gfs2_glock *gl = foo; struct gfs2_glock *gl = foo;
INIT_HLIST_NODE(&gl->gl_list); INIT_HLIST_BL_NODE(&gl->gl_list);
spin_lock_init(&gl->gl_spin); spin_lock_init(&gl->gl_spin);
INIT_LIST_HEAD(&gl->gl_holders); INIT_LIST_HEAD(&gl->gl_holders);
INIT_LIST_HEAD(&gl->gl_lru); INIT_LIST_HEAD(&gl->gl_lru);
...@@ -198,6 +200,8 @@ static void __exit exit_gfs2_fs(void) ...@@ -198,6 +200,8 @@ static void __exit exit_gfs2_fs(void)
unregister_filesystem(&gfs2meta_fs_type); unregister_filesystem(&gfs2meta_fs_type);
destroy_workqueue(gfs_recovery_wq); destroy_workqueue(gfs_recovery_wq);
rcu_barrier();
kmem_cache_destroy(gfs2_quotad_cachep); kmem_cache_destroy(gfs2_quotad_cachep);
kmem_cache_destroy(gfs2_rgrpd_cachep); kmem_cache_destroy(gfs2_rgrpd_cachep);
kmem_cache_destroy(gfs2_bufdata_cachep); kmem_cache_destroy(gfs2_bufdata_cachep);
......
...@@ -928,12 +928,9 @@ static const match_table_t nolock_tokens = { ...@@ -928,12 +928,9 @@ static const match_table_t nolock_tokens = {
{ Opt_err, NULL }, { Opt_err, NULL },
}; };
static void nolock_put_lock(struct kmem_cache *cachep, struct gfs2_glock *gl) static void nolock_put_lock(struct gfs2_glock *gl)
{ {
struct gfs2_sbd *sdp = gl->gl_sbd; call_rcu(&gl->gl_rcu, gfs2_glock_free);
kmem_cache_free(cachep, gl);
if (atomic_dec_and_test(&sdp->sd_glock_disposal))
wake_up(&sdp->sd_glock_wait);
} }
static const struct lm_lockops nolock_ops = { static const struct lm_lockops nolock_ops = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment