Commit 47a25380 authored by Steven Whitehouse's avatar Steven Whitehouse

GFS2: Merge glock state fields into a bitfield

We can only merge the fields into a bitfield if the locking
rules for them are the same. In this case gl_spin covers all
of the fields (write side) but a couple of them are used
with GLF_LOCK as the read side lock, which should be ok
since we know that the field in question won't be changing
at the time.

The gl_req setting has to be done earlier (in glock.c) in order
to place it under gl_spin. The gl_reply setting also has to be
brought under gl_spin in order to comply with the new rules.

This saves 4*sizeof(unsigned int) per glock.
Signed-off-by: default avatarSteven Whitehouse <swhiteho@redhat.com>
Cc: Bob Peterson <rpeterso@redhat.com>
parent e06dfc49
...@@ -567,6 +567,7 @@ __acquires(&gl->gl_spin) ...@@ -567,6 +567,7 @@ __acquires(&gl->gl_spin)
set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags); set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
do_error(gl, 0); /* Fail queued try locks */ do_error(gl, 0); /* Fail queued try locks */
} }
gl->gl_req = target;
spin_unlock(&gl->gl_spin); spin_unlock(&gl->gl_spin);
if (glops->go_xmote_th) if (glops->go_xmote_th)
glops->go_xmote_th(gl); glops->go_xmote_th(gl);
...@@ -1353,24 +1354,28 @@ static int gfs2_should_freeze(const struct gfs2_glock *gl) ...@@ -1353,24 +1354,28 @@ static int gfs2_should_freeze(const struct gfs2_glock *gl)
* @gl: Pointer to the glock * @gl: Pointer to the glock
* @ret: The return value from the dlm * @ret: The return value from the dlm
* *
* The gl_reply field is under the gl_spin lock so that it is ok
* to use a bitfield shared with other glock state fields.
*/ */
void gfs2_glock_complete(struct gfs2_glock *gl, int ret) void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
{ {
struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct; struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
spin_lock(&gl->gl_spin);
gl->gl_reply = ret; gl->gl_reply = ret;
if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) { if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
spin_lock(&gl->gl_spin);
if (gfs2_should_freeze(gl)) { if (gfs2_should_freeze(gl)) {
set_bit(GLF_FROZEN, &gl->gl_flags); set_bit(GLF_FROZEN, &gl->gl_flags);
spin_unlock(&gl->gl_spin); spin_unlock(&gl->gl_spin);
return; return;
} }
spin_unlock(&gl->gl_spin);
} }
spin_unlock(&gl->gl_spin);
set_bit(GLF_REPLY_PENDING, &gl->gl_flags); set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
smp_wmb();
gfs2_glock_hold(gl); gfs2_glock_hold(gl);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put(gl); gfs2_glock_put(gl);
......
...@@ -207,12 +207,14 @@ struct gfs2_glock { ...@@ -207,12 +207,14 @@ struct gfs2_glock {
spinlock_t gl_spin; spinlock_t gl_spin;
unsigned int gl_state; /* State fields protected by gl_spin */
unsigned int gl_target; unsigned int gl_state:2, /* Current state */
unsigned int gl_reply; gl_target:2, /* Target state */
gl_demote_state:2, /* State requested by remote node */
gl_req:2, /* State in last dlm request */
gl_reply:8; /* Last reply from the dlm */
unsigned int gl_hash; unsigned int gl_hash;
unsigned int gl_req;
unsigned int gl_demote_state; /* state requested by remote node */
unsigned long gl_demote_time; /* time of first demote request */ unsigned long gl_demote_time; /* time of first demote request */
struct list_head gl_holders; struct list_head gl_holders;
......
...@@ -153,7 +153,6 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state, ...@@ -153,7 +153,6 @@ static int gdlm_lock(struct gfs2_glock *gl, unsigned int req_state,
int req; int req;
u32 lkf; u32 lkf;
gl->gl_req = req_state;
req = make_mode(req_state); req = make_mode(req_state);
lkf = make_flags(gl->gl_lksb.sb_lkid, flags, req); lkf = make_flags(gl->gl_lksb.sb_lkid, flags, req);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment