Commit 6f6597ba authored by Andreas Gruenbacher's avatar Andreas Gruenbacher Committed by Bob Peterson

gfs2: Protect gl->gl_object by spin lock

Put all remaining accesses to gl->gl_object under the
gl->gl_lockref.lock spinlock to prevent races.
Signed-off-by: default avatarAndreas Gruenbacher <agruenba@redhat.com>
Signed-off-by: default avatarBob Peterson <rpeterso@redhat.com>
parent 4fd1a579
...@@ -970,7 +970,7 @@ static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh, ...@@ -970,7 +970,7 @@ static int sweep_bh_for_rgrps(struct gfs2_inode *ip, struct gfs2_holder *rd_gh,
continue; continue;
bn = be64_to_cpu(*p); bn = be64_to_cpu(*p);
if (gfs2_holder_initialized(rd_gh)) { if (gfs2_holder_initialized(rd_gh)) {
rgd = (struct gfs2_rgrpd *)rd_gh->gh_gl->gl_object; rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
gfs2_assert_withdraw(sdp, gfs2_assert_withdraw(sdp,
gfs2_glock_is_locked_by_me(rd_gh->gh_gl)); gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
} else { } else {
......
...@@ -2032,8 +2032,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len, ...@@ -2032,8 +2032,8 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE); gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
for (x = 0; x < rlist.rl_rgrps; x++) { for (x = 0; x < rlist.rl_rgrps; x++) {
struct gfs2_rgrpd *rgd; struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
rgd = rlist.rl_ghs[x].gh_gl->gl_object;
rg_blocks += rgd->rd_length; rg_blocks += rgd->rd_length;
} }
......
...@@ -137,7 +137,7 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync) ...@@ -137,7 +137,7 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
* *
* Called when demoting or unlocking an EX glock. We must flush * Called when demoting or unlocking an EX glock. We must flush
* to disk all dirty buffers/pages relating to this glock, and must not * to disk all dirty buffers/pages relating to this glock, and must not
* not return to caller to demote/unlock the glock until I/O is complete. * return to caller to demote/unlock the glock until I/O is complete.
*/ */
static void rgrp_go_sync(struct gfs2_glock *gl) static void rgrp_go_sync(struct gfs2_glock *gl)
...@@ -184,7 +184,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags) ...@@ -184,7 +184,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
{ {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct address_space *mapping = &sdp->sd_aspace; struct address_space *mapping = &sdp->sd_aspace;
struct gfs2_rgrpd *rgd = gl->gl_object; struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
if (rgd) if (rgd)
gfs2_rgrp_brelse(rgd); gfs2_rgrp_brelse(rgd);
...@@ -209,6 +209,17 @@ static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl) ...@@ -209,6 +209,17 @@ static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
return ip; return ip;
} }
struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
{
struct gfs2_rgrpd *rgd;
spin_lock(&gl->gl_lockref.lock);
rgd = gl->gl_object;
spin_unlock(&gl->gl_lockref.lock);
return rgd;
}
static void gfs2_clear_glop_pending(struct gfs2_inode *ip) static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
{ {
if (!ip) if (!ip)
...@@ -566,7 +577,7 @@ static int freeze_go_demote_ok(const struct gfs2_glock *gl) ...@@ -566,7 +577,7 @@ static int freeze_go_demote_ok(const struct gfs2_glock *gl)
*/ */
static void iopen_go_callback(struct gfs2_glock *gl, bool remote) static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
{ {
struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object; struct gfs2_inode *ip = gl->gl_object;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY)) if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
......
...@@ -857,5 +857,7 @@ static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which) ...@@ -857,5 +857,7 @@ static inline void gfs2_sbstats_inc(const struct gfs2_glock *gl, int which)
preempt_enable(); preempt_enable();
} }
extern struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
#endif /* __INCORE_DOT_H__ */ #endif /* __INCORE_DOT_H__ */
...@@ -202,14 +202,14 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type, ...@@ -202,14 +202,14 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
fail_refresh: fail_refresh:
ip->i_iopen_gh.gh_flags |= GL_NOCACHE; ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
ip->i_iopen_gh.gh_gl->gl_object = NULL; glock_set_object(ip->i_iopen_gh.gh_gl, NULL);
gfs2_glock_dq_uninit(&ip->i_iopen_gh); gfs2_glock_dq_uninit(&ip->i_iopen_gh);
fail_put: fail_put:
if (io_gl) if (io_gl)
gfs2_glock_put(io_gl); gfs2_glock_put(io_gl);
if (gfs2_holder_initialized(&i_gh)) if (gfs2_holder_initialized(&i_gh))
gfs2_glock_dq_uninit(&i_gh); gfs2_glock_dq_uninit(&i_gh);
ip->i_gl->gl_object = NULL; glock_set_object(ip->i_gl, NULL);
fail: fail:
iget_failed(inode); iget_failed(inode);
return ERR_PTR(error); return ERR_PTR(error);
...@@ -706,7 +706,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, ...@@ -706,7 +706,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (error) if (error)
goto fail_free_inode; goto fail_free_inode;
ip->i_gl->gl_object = ip; glock_set_object(ip->i_gl, ip);
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1); error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
if (error) if (error)
goto fail_free_inode; goto fail_free_inode;
...@@ -732,7 +732,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry, ...@@ -732,7 +732,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (error) if (error)
goto fail_gunlock2; goto fail_gunlock2;
ip->i_iopen_gh.gh_gl->gl_object = ip; glock_set_object(ip->i_iopen_gh.gh_gl, ip);
gfs2_glock_put(io_gl); gfs2_glock_put(io_gl);
gfs2_set_iop(inode); gfs2_set_iop(inode);
insert_inode_hash(inode); insert_inode_hash(inode);
......
...@@ -71,7 +71,7 @@ static void maybe_release_space(struct gfs2_bufdata *bd) ...@@ -71,7 +71,7 @@ static void maybe_release_space(struct gfs2_bufdata *bd)
{ {
struct gfs2_glock *gl = bd->bd_gl; struct gfs2_glock *gl = bd->bd_gl;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct gfs2_rgrpd *rgd = gl->gl_object; struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
struct gfs2_bitmap *bi = rgd->rd_bits + index; struct gfs2_bitmap *bi = rgd->rd_bits + index;
......
...@@ -705,9 +705,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) ...@@ -705,9 +705,7 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
rb_erase(n, &sdp->sd_rindex_tree); rb_erase(n, &sdp->sd_rindex_tree);
if (gl) { if (gl) {
spin_lock(&gl->gl_lockref.lock); glock_set_object(gl, NULL);
gl->gl_object = NULL;
spin_unlock(&gl->gl_lockref.lock);
gfs2_glock_add_to_lru(gl); gfs2_glock_add_to_lru(gl);
gfs2_glock_put(gl); gfs2_glock_put(gl);
} }
...@@ -917,7 +915,7 @@ static int read_rindex_entry(struct gfs2_inode *ip) ...@@ -917,7 +915,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
error = rgd_insert(rgd); error = rgd_insert(rgd);
spin_unlock(&sdp->sd_rindex_spin); spin_unlock(&sdp->sd_rindex_spin);
if (!error) { if (!error) {
rgd->rd_gl->gl_object = rgd; glock_set_object(rgd->rd_gl, rgd);
rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK; rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
rgd->rd_length) * bsize) - 1; rgd->rd_length) * bsize) - 1;
......
...@@ -1105,9 +1105,12 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host ...@@ -1105,9 +1105,12 @@ static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host
gfs2_holder_uninit(gh); gfs2_holder_uninit(gh);
error = err; error = err;
} else { } else {
if (!error) if (!error) {
error = statfs_slow_fill( struct gfs2_rgrpd *rgd =
gh->gh_gl->gl_object, sc); gfs2_glock2rgrp(gh->gh_gl);
error = statfs_slow_fill(rgd, sc);
}
gfs2_glock_dq_uninit(gh); gfs2_glock_dq_uninit(gh);
} }
} }
...@@ -1637,7 +1640,7 @@ static void gfs2_evict_inode(struct inode *inode) ...@@ -1637,7 +1640,7 @@ static void gfs2_evict_inode(struct inode *inode)
gfs2_glock_put(ip->i_gl); gfs2_glock_put(ip->i_gl);
ip->i_gl = NULL; ip->i_gl = NULL;
if (gfs2_holder_initialized(&ip->i_iopen_gh)) { if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
ip->i_iopen_gh.gh_gl->gl_object = NULL; glock_set_object(ip->i_iopen_gh.gh_gl, NULL);
ip->i_iopen_gh.gh_flags |= GL_NOCACHE; ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_uninit(&ip->i_iopen_gh); gfs2_glock_dq_uninit(&ip->i_iopen_gh);
} }
......
...@@ -1327,8 +1327,8 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip) ...@@ -1327,8 +1327,8 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE); gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
for (x = 0; x < rlist.rl_rgrps; x++) { for (x = 0; x < rlist.rl_rgrps; x++) {
struct gfs2_rgrpd *rgd; struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
rgd = rlist.rl_ghs[x].gh_gl->gl_object;
rg_blocks += rgd->rd_length; rg_blocks += rgd->rd_length;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment