Commit a72d2401 authored by Bob Peterson's avatar Bob Peterson

gfs2: Allow some glocks to be used during withdraw

We need to allow some glocks to be enqueued, dequeued, promoted, and demoted
when we're withdrawn. For example, to maintain metadata integrity, we should
disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like
iopen or the transaction glocks may be safely used because none of their
metadata goes through the journal. So in general, we should disallow all
glocks with an address space, and allow all the others. One exception is:
we need to allow our active journal to be demoted so others may recover it.

Allowing glocks after withdraw gives us the ability to take appropriate
action (in a following patch) to have our journal properly replayed by
another node rather than just abandoning the current transactions and
pretending nothing bad happened, leaving the other nodes free to modify
the blocks we had in our journal, which may result in file system
corruption.
Signed-off-by: default avatarBob Peterson <rpeterso@redhat.com>
parent 0d91061a
...@@ -133,6 +133,33 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu) ...@@ -133,6 +133,33 @@ static void gfs2_glock_dealloc(struct rcu_head *rcu)
} }
} }
/**
* glock_blocked_by_withdraw - determine if we can still use a glock
* @gl: the glock
*
* We need to allow some glocks to be enqueued, dequeued, promoted, and demoted
* when we're withdrawn. For example, to maintain metadata integrity, we should
* disallow the use of inode and rgrp glocks when withdrawn. Other glocks, like
* iopen or the transaction glocks may be safely used because none of their
* metadata goes through the journal. So in general, we should disallow all
* glocks that are journaled, and allow all the others. One exception is:
* we need to allow our active journal to be promoted and demoted so others
* may recover it and we can reacquire it when they're done.
*/
static bool glock_blocked_by_withdraw(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
if (likely(!gfs2_withdrawn(sdp)))
return false;
if (gl->gl_ops->go_flags & GLOF_NONDISK)
return false;
if (!sdp->sd_jdesc ||
gl->gl_name.ln_number == sdp->sd_jdesc->jd_no_addr)
return false;
return true;
}
void gfs2_glock_free(struct gfs2_glock *gl) void gfs2_glock_free(struct gfs2_glock *gl)
{ {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
...@@ -549,8 +576,7 @@ __acquires(&gl->gl_lockref.lock) ...@@ -549,8 +576,7 @@ __acquires(&gl->gl_lockref.lock)
unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0); unsigned int lck_flags = (unsigned int)(gh ? gh->gh_flags : 0);
int ret; int ret;
if (unlikely(gfs2_withdrawn(sdp)) && if (target != LM_ST_UNLOCKED && glock_blocked_by_withdraw(gl))
target != LM_ST_UNLOCKED)
return; return;
lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP | lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
LM_FLAG_PRIORITY); LM_FLAG_PRIORITY);
...@@ -1194,10 +1220,9 @@ __acquires(&gl->gl_lockref.lock) ...@@ -1194,10 +1220,9 @@ __acquires(&gl->gl_lockref.lock)
int gfs2_glock_nq(struct gfs2_holder *gh) int gfs2_glock_nq(struct gfs2_holder *gh)
{ {
struct gfs2_glock *gl = gh->gh_gl; struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
int error = 0; int error = 0;
if (unlikely(gfs2_withdrawn(sdp))) if (glock_blocked_by_withdraw(gl))
return -EIO; return -EIO;
if (test_bit(GLF_LRU, &gl->gl_flags)) if (test_bit(GLF_LRU, &gl->gl_flags))
......
...@@ -579,6 +579,7 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote) ...@@ -579,6 +579,7 @@ static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
const struct gfs2_glock_operations gfs2_meta_glops = { const struct gfs2_glock_operations gfs2_meta_glops = {
.go_type = LM_TYPE_META, .go_type = LM_TYPE_META,
.go_flags = GLOF_NONDISK,
}; };
const struct gfs2_glock_operations gfs2_inode_glops = { const struct gfs2_glock_operations gfs2_inode_glops = {
...@@ -605,30 +606,33 @@ const struct gfs2_glock_operations gfs2_freeze_glops = { ...@@ -605,30 +606,33 @@ const struct gfs2_glock_operations gfs2_freeze_glops = {
.go_xmote_bh = freeze_go_xmote_bh, .go_xmote_bh = freeze_go_xmote_bh,
.go_demote_ok = freeze_go_demote_ok, .go_demote_ok = freeze_go_demote_ok,
.go_type = LM_TYPE_NONDISK, .go_type = LM_TYPE_NONDISK,
.go_flags = GLOF_NONDISK,
}; };
const struct gfs2_glock_operations gfs2_iopen_glops = { const struct gfs2_glock_operations gfs2_iopen_glops = {
.go_type = LM_TYPE_IOPEN, .go_type = LM_TYPE_IOPEN,
.go_callback = iopen_go_callback, .go_callback = iopen_go_callback,
.go_flags = GLOF_LRU, .go_flags = GLOF_LRU | GLOF_NONDISK,
}; };
const struct gfs2_glock_operations gfs2_flock_glops = { const struct gfs2_glock_operations gfs2_flock_glops = {
.go_type = LM_TYPE_FLOCK, .go_type = LM_TYPE_FLOCK,
.go_flags = GLOF_LRU, .go_flags = GLOF_LRU | GLOF_NONDISK,
}; };
const struct gfs2_glock_operations gfs2_nondisk_glops = { const struct gfs2_glock_operations gfs2_nondisk_glops = {
.go_type = LM_TYPE_NONDISK, .go_type = LM_TYPE_NONDISK,
.go_flags = GLOF_NONDISK,
}; };
const struct gfs2_glock_operations gfs2_quota_glops = { const struct gfs2_glock_operations gfs2_quota_glops = {
.go_type = LM_TYPE_QUOTA, .go_type = LM_TYPE_QUOTA,
.go_flags = GLOF_LVB | GLOF_LRU, .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
}; };
const struct gfs2_glock_operations gfs2_journal_glops = { const struct gfs2_glock_operations gfs2_journal_glops = {
.go_type = LM_TYPE_JOURNAL, .go_type = LM_TYPE_JOURNAL,
.go_flags = GLOF_NONDISK,
}; };
const struct gfs2_glock_operations *gfs2_glops_list[] = { const struct gfs2_glock_operations *gfs2_glops_list[] = {
......
...@@ -244,9 +244,10 @@ struct gfs2_glock_operations { ...@@ -244,9 +244,10 @@ struct gfs2_glock_operations {
void (*go_callback)(struct gfs2_glock *gl, bool remote); void (*go_callback)(struct gfs2_glock *gl, bool remote);
const int go_type; const int go_type;
const unsigned long go_flags; const unsigned long go_flags;
#define GLOF_ASPACE 1 #define GLOF_ASPACE 1 /* address space attached */
#define GLOF_LVB 2 #define GLOF_LVB 2 /* Lock Value Block attached */
#define GLOF_LRU 4 #define GLOF_LRU 4 /* LRU managed */
#define GLOF_NONDISK 8 /* not I/O related */
}; };
enum { enum {
...@@ -541,6 +542,7 @@ struct gfs2_jdesc { ...@@ -541,6 +542,7 @@ struct gfs2_jdesc {
struct list_head jd_revoke_list; struct list_head jd_revoke_list;
unsigned int jd_replay_tail; unsigned int jd_replay_tail;
u64 jd_no_addr;
}; };
struct gfs2_statfs_change_host { struct gfs2_statfs_change_host {
......
...@@ -552,6 +552,8 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh) ...@@ -552,6 +552,8 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
mutex_lock(&sdp->sd_jindex_mutex); mutex_lock(&sdp->sd_jindex_mutex);
for (;;) { for (;;) {
struct gfs2_inode *jip;
error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh); error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, ji_gh);
if (error) if (error)
break; break;
...@@ -591,6 +593,8 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh) ...@@ -591,6 +593,8 @@ static int gfs2_jindex_hold(struct gfs2_sbd *sdp, struct gfs2_holder *ji_gh)
spin_lock(&sdp->sd_jindex_spin); spin_lock(&sdp->sd_jindex_spin);
jd->jd_jid = sdp->sd_journals++; jd->jd_jid = sdp->sd_journals++;
jip = GFS2_I(jd->jd_inode);
jd->jd_no_addr = jip->i_no_addr;
list_add_tail(&jd->jd_list, &sdp->sd_jindex_list); list_add_tail(&jd->jd_list, &sdp->sd_jindex_list);
spin_unlock(&sdp->sd_jindex_spin); spin_unlock(&sdp->sd_jindex_spin);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment