Commit 78805cbe authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'gfs2-v5.15-rc5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull gfs2 updates from Andreas Gruenbacher:

 - Fix a locking order inversion between the inode and iopen glocks in
   gfs2_inode_lookup.

 - Implement proper queuing of glock holders for glocks that require
   instantiation (like reading an inode or bitmap blocks from disk).
   Before, multiple glock holders could race with each other and
   half-initialized objects could be exposed; the GL_SKIP flag further
   exacerbated this problem.

 - Fix a rare deadlock between inode lookup / creation and remote delete
   work.

 - Fix a rare scheduling-while-atomic bug in dlm during glock hash table
   walks.

 - Various other minor fixes and cleanups.

* tag 'gfs2-v5.15-rc5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2: (21 commits)
  gfs2: Fix unused value warning in do_gfs2_set_flags()
  gfs2: check context in gfs2_glock_put
  gfs2: Fix glock_hash_walk bugs
  gfs2: Cancel remote delete work asynchronously
  gfs2: set glock object after nq
  gfs2: remove RDF_UPTODATE flag
  gfs2: Eliminate GIF_INVALID flag
  gfs2: fix GL_SKIP node_scope problems
  gfs2: split glock instantiation off from do_promote
  gfs2: further simplify do_promote
  gfs2: re-factor function do_promote
  gfs2: Remove 'first' trace_gfs2_promote argument
  gfs2: change go_lock to go_instantiate
  gfs2: dump glocks from gfs2_consist_OBJ_i
  gfs2: dequeue iopen holder in gfs2_inode_lookup error
  gfs2: Save ip from gfs2_glock_nq_init
  gfs2: Allow append and immutable bits to coexist
  gfs2: Switch some BUG_ON to GLOCK_BUG_ON for debug
  gfs2: move GL_SKIP check from glops to do_promote
  gfs2: Add GL_SKIP holder flag to dump_holder
  ...
parents c03098d4 e34e6f81
......@@ -213,11 +213,9 @@ void gfs2_set_inode_flags(struct inode *inode)
* @inode: The inode
* @reqflags: The flags to set
* @mask: Indicates which flags are valid
* @fsflags: The FS_* inode flags passed in
*
*/
static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask,
const u32 fsflags)
static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
......@@ -236,11 +234,6 @@ static int do_gfs2_set_flags(struct inode *inode, u32 reqflags, u32 mask,
if ((new_flags ^ flags) == 0)
goto out;
error = -EPERM;
if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
goto out;
if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
goto out;
if (!IS_IMMUTABLE(inode)) {
error = gfs2_permission(&init_user_ns, inode, MAY_WRITE);
if (error)
......@@ -313,7 +306,7 @@ int gfs2_fileattr_set(struct user_namespace *mnt_userns,
mask &= ~(GFS2_DIF_TOPDIR | GFS2_DIF_INHERIT_JDATA);
}
return do_gfs2_set_flags(inode, gfsflags, mask, fsflags);
return do_gfs2_set_flags(inode, gfsflags, mask);
}
static int gfs2_getlabel(struct file *filp, char __user *label)
......
......@@ -301,6 +301,9 @@ void gfs2_glock_queue_put(struct gfs2_glock *gl)
void gfs2_glock_put(struct gfs2_glock *gl)
{
/* last put could call sleepable dlm api */
might_sleep();
if (lockref_put_or_lock(&gl->gl_lockref))
return;
......@@ -472,6 +475,51 @@ find_first_strong_holder(struct gfs2_glock *gl)
return NULL;
}
/*
* gfs2_instantiate - Call the glops instantiate function
* @gl: The glock
*
* Returns: 0 if instantiate was successful, 2 if type specific operation is
* underway, or error.
*/
int gfs2_instantiate(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
const struct gfs2_glock_operations *glops = gl->gl_ops;
int ret;
again:
if (!test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags))
return 0;
/*
* Since we unlock the lockref lock, we set a flag to indicate
* instantiate is in progress.
*/
if (test_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) {
wait_on_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG,
TASK_UNINTERRUPTIBLE);
/*
* Here we just waited for a different instantiate to finish.
* But that may not have been successful, as when a process
* locks an inode glock _before_ it has an actual inode to
* instantiate into. So we check again. This process might
* have an inode to instantiate, so might be successful.
*/
goto again;
}
set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
ret = glops->go_instantiate(gh);
if (!ret)
clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
clear_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
smp_mb__after_atomic();
wake_up_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG);
return ret;
}
/**
* do_promote - promote as many requests as possible on the current queue
* @gl: The glock
......@@ -484,56 +532,59 @@ static int do_promote(struct gfs2_glock *gl)
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
{
const struct gfs2_glock_operations *glops = gl->gl_ops;
struct gfs2_holder *gh, *tmp, *first_gh;
bool incompat_holders_demoted = false;
bool lock_released;
int ret;
restart:
first_gh = find_first_strong_holder(gl);
list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
if (!test_bit(HIF_WAIT, &gh->gh_iflags))
lock_released = false;
if (test_bit(HIF_HOLDER, &gh->gh_iflags))
continue;
if (may_grant(gl, first_gh, gh)) {
if (!incompat_holders_demoted) {
demote_incompat_holders(gl, first_gh);
incompat_holders_demoted = true;
first_gh = gh;
}
if (gh->gh_list.prev == &gl->gl_holders &&
glops->go_lock) {
spin_unlock(&gl->gl_lockref.lock);
/* FIXME: eliminate this eventually */
ret = glops->go_lock(gh);
spin_lock(&gl->gl_lockref.lock);
if (ret) {
if (ret == 1)
return 2;
gh->gh_error = ret;
list_del_init(&gh->gh_list);
trace_gfs2_glock_queue(gh, 0);
gfs2_holder_wake(gh);
goto restart;
}
set_bit(HIF_HOLDER, &gh->gh_iflags);
trace_gfs2_promote(gh, 1);
if (!may_grant(gl, first_gh, gh)) {
/*
* If we get here, it means we may not grant this holder for
* some reason. If this holder is the head of the list, it
* means we have a blocked holder at the head, so return 1.
*/
if (gh->gh_list.prev == &gl->gl_holders)
return 1;
do_error(gl, 0);
break;
}
if (!incompat_holders_demoted) {
demote_incompat_holders(gl, first_gh);
incompat_holders_demoted = true;
first_gh = gh;
}
if (test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags) &&
!(gh->gh_flags & GL_SKIP) && gl->gl_ops->go_instantiate) {
lock_released = true;
spin_unlock(&gl->gl_lockref.lock);
ret = gfs2_instantiate(gh);
spin_lock(&gl->gl_lockref.lock);
if (ret) {
if (ret == 1)
return 2;
gh->gh_error = ret;
list_del_init(&gh->gh_list);
trace_gfs2_glock_queue(gh, 0);
gfs2_holder_wake(gh);
goto restart;
}
set_bit(HIF_HOLDER, &gh->gh_iflags);
trace_gfs2_promote(gh, 0);
gfs2_holder_wake(gh);
continue;
}
set_bit(HIF_HOLDER, &gh->gh_iflags);
trace_gfs2_promote(gh);
gfs2_holder_wake(gh);
/*
* If we get here, it means we may not grant this holder for
* some reason. If this holder is the head of the list, it
* means we have a blocked holder at the head, so return 1.
* If we released the gl_lockref.lock the holders list may have
* changed. For that reason, we start again at the start of
* the holders queue.
*/
if (gh->gh_list.prev == &gl->gl_holders)
return 1;
do_error(gl, 0);
break;
if (lock_released)
goto restart;
}
return 0;
}
......@@ -909,7 +960,7 @@ static void gfs2_glock_poke(struct gfs2_glock *gl)
struct gfs2_holder gh;
int error;
gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh);
__gfs2_holder_init(gl, LM_ST_SHARED, flags, &gh, _RET_IP_);
error = gfs2_glock_nq(&gh);
if (!error)
gfs2_glock_dq(&gh);
......@@ -1144,7 +1195,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
atomic_inc(&sdp->sd_glock_disposal);
gl->gl_node.next = NULL;
gl->gl_flags = 0;
gl->gl_flags = glops->go_instantiate ? BIT(GLF_INSTANTIATE_NEEDED) : 0;
gl->gl_name = name;
lockdep_set_subclass(&gl->gl_lockref.lock, glops->go_subclass);
gl->gl_lockref.count = 1;
......@@ -1206,12 +1257,12 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
*
*/
void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
struct gfs2_holder *gh)
void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, u16 flags,
struct gfs2_holder *gh, unsigned long ip)
{
INIT_LIST_HEAD(&gh->gh_list);
gh->gh_gl = gl;
gh->gh_ip = _RET_IP_;
gh->gh_ip = ip;
gh->gh_owner_pid = get_pid(task_pid(current));
gh->gh_state = state;
gh->gh_flags = flags;
......@@ -2053,10 +2104,10 @@ static void glock_hash_walk(glock_examiner examiner, const struct gfs2_sbd *sdp)
do {
rhashtable_walk_start(&iter);
while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl))
if (gl->gl_name.ln_sbd == sdp &&
lockref_get_not_dead(&gl->gl_lockref))
while ((gl = rhashtable_walk_next(&iter)) && !IS_ERR(gl)) {
if (gl->gl_name.ln_sbd == sdp)
examiner(gl);
}
rhashtable_walk_stop(&iter);
} while (cond_resched(), gl == ERR_PTR(-EAGAIN));
......@@ -2079,7 +2130,7 @@ bool gfs2_queue_delete_work(struct gfs2_glock *gl, unsigned long delay)
void gfs2_cancel_delete_work(struct gfs2_glock *gl)
{
if (cancel_delayed_work_sync(&gl->gl_delete)) {
if (cancel_delayed_work(&gl->gl_delete)) {
clear_bit(GLF_PENDING_DELETE, &gl->gl_flags);
gfs2_glock_put(gl);
}
......@@ -2098,7 +2149,6 @@ static void flush_delete_work(struct gfs2_glock *gl)
&gl->gl_delete, 0);
}
}
gfs2_glock_queue_work(gl, 0);
}
void gfs2_flush_delete_work(struct gfs2_sbd *sdp)
......@@ -2115,10 +2165,10 @@ void gfs2_flush_delete_work(struct gfs2_sbd *sdp)
static void thaw_glock(struct gfs2_glock *gl)
{
if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags)) {
gfs2_glock_put(gl);
if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
return;
if (!lockref_get_not_dead(&gl->gl_lockref))
return;
}
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
gfs2_glock_queue_work(gl, 0);
}
......@@ -2134,9 +2184,12 @@ static void clear_glock(struct gfs2_glock *gl)
gfs2_glock_remove_from_lru(gl);
spin_lock(&gl->gl_lockref.lock);
if (gl->gl_state != LM_ST_UNLOCKED)
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
__gfs2_glock_queue_work(gl, 0);
if (!__lockref_is_dead(&gl->gl_lockref)) {
gl->gl_lockref.count++;
if (gl->gl_state != LM_ST_UNLOCKED)
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
__gfs2_glock_queue_work(gl, 0);
}
spin_unlock(&gl->gl_lockref.lock);
}
......@@ -2238,6 +2291,8 @@ static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
*p++ = 'W';
if (test_bit(HIF_MAY_DEMOTE, &iflags))
*p++ = 'D';
if (flags & GL_SKIP)
*p++ = 's';
*p = 0;
return buf;
}
......@@ -2306,6 +2361,10 @@ static const char *gflags2str(char *buf, const struct gfs2_glock *gl)
*p++ = 'P';
if (test_bit(GLF_FREEING, gflags))
*p++ = 'x';
if (test_bit(GLF_INSTANTIATE_NEEDED, gflags))
*p++ = 'n';
if (test_bit(GLF_INSTANTIATE_IN_PROG, gflags))
*p++ = 'N';
*p = 0;
return buf;
}
......
......@@ -190,13 +190,21 @@ extern int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
extern void gfs2_glock_hold(struct gfs2_glock *gl);
extern void gfs2_glock_put(struct gfs2_glock *gl);
extern void gfs2_glock_queue_put(struct gfs2_glock *gl);
extern void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
u16 flags, struct gfs2_holder *gh);
extern void __gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
u16 flags, struct gfs2_holder *gh,
unsigned long ip);
static inline void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state,
u16 flags, struct gfs2_holder *gh) {
__gfs2_holder_init(gl, state, flags, gh, _RET_IP_);
}
extern void gfs2_holder_reinit(unsigned int state, u16 flags,
struct gfs2_holder *gh);
extern void gfs2_holder_uninit(struct gfs2_holder *gh);
extern int gfs2_glock_nq(struct gfs2_holder *gh);
extern int gfs2_glock_poll(struct gfs2_holder *gh);
extern int gfs2_instantiate(struct gfs2_holder *gh);
extern int gfs2_glock_wait(struct gfs2_holder *gh);
extern int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs);
extern void gfs2_glock_dq(struct gfs2_holder *gh);
......@@ -241,7 +249,7 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
{
int error;
gfs2_holder_init(gl, state, flags, gh);
__gfs2_holder_init(gl, state, flags, gh, _RET_IP_);
error = gfs2_glock_nq(gh);
if (error)
......
......@@ -228,7 +228,7 @@ static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
gfs2_rgrp_brelse(rgd);
WARN_ON_ONCE(!(flags & DIO_METADATA));
truncate_inode_pages_range(mapping, start, end);
rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
}
static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
......@@ -356,7 +356,7 @@ static void inode_go_inval(struct gfs2_glock *gl, int flags)
struct address_space *mapping = gfs2_glock2aspace(gl);
truncate_inode_pages(mapping, 0);
if (ip) {
set_bit(GIF_INVALID, &ip->i_flags);
set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
forget_all_cached_acls(&ip->i_inode);
security_inode_invalidate_secctx(&ip->i_inode);
gfs2_dir_hash_inval(ip);
......@@ -476,33 +476,29 @@ int gfs2_inode_refresh(struct gfs2_inode *ip)
error = gfs2_dinode_in(ip, dibh->b_data);
brelse(dibh);
clear_bit(GIF_INVALID, &ip->i_flags);
return error;
}
/**
* inode_go_lock - operation done after an inode lock is locked by a process
* inode_go_instantiate - read in an inode if necessary
* @gh: The glock holder
*
* Returns: errno
*/
static int inode_go_lock(struct gfs2_holder *gh)
static int inode_go_instantiate(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct gfs2_inode *ip = gl->gl_object;
int error = 0;
if (!ip || (gh->gh_flags & GL_SKIP))
return 0;
if (!ip) /* no inode to populate - read it in later */
goto out;
if (test_bit(GIF_INVALID, &ip->i_flags)) {
error = gfs2_inode_refresh(ip);
if (error)
return error;
}
error = gfs2_inode_refresh(ip);
if (error)
goto out;
if (gh->gh_state != LM_ST_DEFERRED)
inode_dio_wait(&ip->i_inode);
......@@ -515,9 +511,10 @@ static int inode_go_lock(struct gfs2_holder *gh)
list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
spin_unlock(&sdp->sd_trunc_lock);
wake_up(&sdp->sd_quota_wait);
return 1;
error = 1;
}
out:
return error;
}
......@@ -740,7 +737,7 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
.go_sync = inode_go_sync,
.go_inval = inode_go_inval,
.go_demote_ok = inode_go_demote_ok,
.go_lock = inode_go_lock,
.go_instantiate = inode_go_instantiate,
.go_dump = inode_go_dump,
.go_type = LM_TYPE_INODE,
.go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
......@@ -750,7 +747,7 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
const struct gfs2_glock_operations gfs2_rgrp_glops = {
.go_sync = rgrp_go_sync,
.go_inval = rgrp_go_inval,
.go_lock = gfs2_rgrp_go_lock,
.go_instantiate = gfs2_rgrp_go_instantiate,
.go_dump = gfs2_rgrp_go_dump,
.go_type = LM_TYPE_RGRP,
.go_flags = GLOF_LVB,
......
......@@ -119,7 +119,6 @@ struct gfs2_rgrpd {
u32 rd_flags;
u32 rd_extfail_pt; /* extent failure point */
#define GFS2_RDF_CHECK 0x10000000 /* check for unlinked inodes */
#define GFS2_RDF_UPTODATE 0x20000000 /* rg is up to date */
#define GFS2_RDF_ERROR 0x40000000 /* error in rg */
#define GFS2_RDF_PREFERRED 0x80000000 /* This rgrp is preferred */
#define GFS2_RDF_MASK 0xf0000000 /* mask for internal flags */
......@@ -220,7 +219,7 @@ struct gfs2_glock_operations {
int (*go_xmote_bh)(struct gfs2_glock *gl);
void (*go_inval) (struct gfs2_glock *gl, int flags);
int (*go_demote_ok) (const struct gfs2_glock *gl);
int (*go_lock) (struct gfs2_holder *gh);
int (*go_instantiate) (struct gfs2_holder *gh);
void (*go_dump)(struct seq_file *seq, struct gfs2_glock *gl,
const char *fs_id_buf);
void (*go_callback)(struct gfs2_glock *gl, bool remote);
......@@ -316,6 +315,7 @@ struct gfs2_alloc_parms {
enum {
GLF_LOCK = 1,
GLF_INSTANTIATE_NEEDED = 2, /* needs instantiate */
GLF_DEMOTE = 3,
GLF_PENDING_DEMOTE = 4,
GLF_DEMOTE_IN_PROGRESS = 5,
......@@ -325,6 +325,7 @@ enum {
GLF_REPLY_PENDING = 9,
GLF_INITIAL = 10,
GLF_FROZEN = 11,
GLF_INSTANTIATE_IN_PROG = 12, /* instantiate happening now */
GLF_LRU = 13,
GLF_OBJECT = 14, /* Used only for tracing */
GLF_BLOCKING = 15,
......@@ -371,7 +372,6 @@ struct gfs2_glock {
};
enum {
GIF_INVALID = 0,
GIF_QD_LOCKED = 1,
GIF_ALLOC_FAILED = 2,
GIF_SW_PAGED = 3,
......
......@@ -182,7 +182,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
}
glock_set_object(ip->i_gl, ip);
set_bit(GIF_INVALID, &ip->i_flags);
set_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags);
error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh);
if (unlikely(error))
goto fail;
......@@ -196,7 +196,7 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
if (type == DT_UNKNOWN) {
/* Inode glock must be locked already */
error = gfs2_inode_refresh(GFS2_I(inode));
error = gfs2_instantiate(&i_gh);
if (error)
goto fail;
} else {
......@@ -225,6 +225,10 @@ struct inode *gfs2_inode_lookup(struct super_block *sb, unsigned int type,
return inode;
fail:
if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
}
if (io_gl)
gfs2_glock_put(io_gl);
if (gfs2_holder_initialized(&i_gh))
......@@ -727,18 +731,17 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (error)
goto fail_free_inode;
flush_delayed_work(&ip->i_gl->gl_work);
glock_set_object(ip->i_gl, ip);
error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
if (error)
goto fail_free_inode;
gfs2_cancel_delete_work(io_gl);
glock_set_object(io_gl, ip);
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
if (error)
goto fail_gunlock2;
glock_set_object(ip->i_gl, ip);
error = gfs2_trans_begin(sdp, blocks, 0);
if (error)
goto fail_gunlock2;
......@@ -754,6 +757,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
if (error)
goto fail_gunlock2;
glock_set_object(io_gl, ip);
gfs2_set_iop(inode);
insert_inode_hash(inode);
......
......@@ -932,7 +932,7 @@ static int read_rindex_entry(struct gfs2_inode *ip)
goto fail;
rgd->rd_rgl = (struct gfs2_rgrp_lvb *)rgd->rd_gl->gl_lksb.sb_lvbptr;
rgd->rd_flags &= ~(GFS2_RDF_UPTODATE | GFS2_RDF_PREFERRED);
rgd->rd_flags &= ~GFS2_RDF_PREFERRED;
if (rgd->rd_data > sdp->sd_max_rg_data)
sdp->sd_max_rg_data = rgd->rd_data;
spin_lock(&sdp->sd_rindex_spin);
......@@ -1185,8 +1185,8 @@ static void rgrp_set_bitmap_flags(struct gfs2_rgrpd *rgd)
}
/**
* gfs2_rgrp_bh_get - Read in a RG's header and bitmaps
* @rgd: the struct gfs2_rgrpd describing the RG to read in
* gfs2_rgrp_go_instantiate - Read in a RG's header and bitmaps
* @gh: the glock holder representing the rgrpd to read in
*
* Read in all of a Resource Group's header and bitmap blocks.
* Caller must eventually call gfs2_rgrp_brelse() to free the bitmaps.
......@@ -1194,10 +1194,11 @@ static void rgrp_set_bitmap_flags(struct gfs2_rgrpd *rgd)
* Returns: errno
*/
static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
int gfs2_rgrp_go_instantiate(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_rgrpd *rgd = gl->gl_object;
struct gfs2_sbd *sdp = rgd->rd_sbd;
struct gfs2_glock *gl = rgd->rd_gl;
unsigned int length = rgd->rd_length;
struct gfs2_bitmap *bi;
unsigned int x, y;
......@@ -1225,21 +1226,18 @@ static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
}
}
if (!(rgd->rd_flags & GFS2_RDF_UPTODATE)) {
gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
rgrp_set_bitmap_flags(rgd);
rgd->rd_flags |= (GFS2_RDF_UPTODATE | GFS2_RDF_CHECK);
rgd->rd_free_clone = rgd->rd_free;
BUG_ON(rgd->rd_reserved);
/* max out the rgrp allocation failure point */
rgd->rd_extfail_pt = rgd->rd_free;
}
gfs2_rgrp_in(rgd, (rgd->rd_bits[0].bi_bh)->b_data);
rgrp_set_bitmap_flags(rgd);
rgd->rd_flags |= GFS2_RDF_CHECK;
rgd->rd_free_clone = rgd->rd_free;
GLOCK_BUG_ON(rgd->rd_gl, rgd->rd_reserved);
/* max out the rgrp allocation failure point */
rgd->rd_extfail_pt = rgd->rd_free;
if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic) {
rgd->rd_rgl->rl_unlinked = cpu_to_be32(count_unlinked(rgd));
gfs2_rgrp_ondisk2lvb(rgd->rd_rgl,
rgd->rd_bits[0].bi_bh->b_data);
}
else if (sdp->sd_args.ar_rgrplvb) {
} else if (sdp->sd_args.ar_rgrplvb) {
if (!gfs2_rgrp_lvb_valid(rgd)){
gfs2_consist_rgrpd(rgd);
error = -EIO;
......@@ -1257,19 +1255,18 @@ static int gfs2_rgrp_bh_get(struct gfs2_rgrpd *rgd)
bi->bi_bh = NULL;
gfs2_assert_warn(sdp, !bi->bi_clone);
}
return error;
}
static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
static int update_rgrp_lvb(struct gfs2_rgrpd *rgd, struct gfs2_holder *gh)
{
u32 rl_flags;
if (rgd->rd_flags & GFS2_RDF_UPTODATE)
if (!test_bit(GLF_INSTANTIATE_NEEDED, &gh->gh_gl->gl_flags))
return 0;
if (cpu_to_be32(GFS2_MAGIC) != rgd->rd_rgl->rl_magic)
return gfs2_rgrp_bh_get(rgd);
return gfs2_instantiate(gh);
rl_flags = be32_to_cpu(rgd->rd_rgl->rl_flags);
rl_flags &= ~GFS2_RDF_MASK;
......@@ -1280,7 +1277,7 @@ static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
rgd->rd_free = be32_to_cpu(rgd->rd_rgl->rl_free);
rgrp_set_bitmap_flags(rgd);
rgd->rd_free_clone = rgd->rd_free;
BUG_ON(rgd->rd_reserved);
GLOCK_BUG_ON(rgd->rd_gl, rgd->rd_reserved);
/* max out the rgrp allocation failure point */
rgd->rd_extfail_pt = rgd->rd_free;
rgd->rd_dinodes = be32_to_cpu(rgd->rd_rgl->rl_dinodes);
......@@ -1288,16 +1285,6 @@ static int update_rgrp_lvb(struct gfs2_rgrpd *rgd)
return 0;
}
int gfs2_rgrp_go_lock(struct gfs2_holder *gh)
{
struct gfs2_rgrpd *rgd = gh->gh_gl->gl_object;
struct gfs2_sbd *sdp = rgd->rd_sbd;
if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb)
return 0;
return gfs2_rgrp_bh_get(rgd);
}
/**
* gfs2_rgrp_brelse - Release RG bitmaps read in with gfs2_rgrp_bh_get()
* @rgd: The resource group
......@@ -1315,6 +1302,7 @@ void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd)
bi->bi_bh = NULL;
}
}
set_bit(GLF_INSTANTIATE_NEEDED, &rgd->rd_gl->gl_flags);
}
int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset,
......@@ -2113,7 +2101,8 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
gfs2_rgrp_congested(rs->rs_rgd, loops))
goto skip_rgrp;
if (sdp->sd_args.ar_rgrplvb) {
error = update_rgrp_lvb(rs->rs_rgd);
error = update_rgrp_lvb(rs->rs_rgd,
&ip->i_rgd_gh);
if (unlikely(error)) {
rgrp_unlock_local(rs->rs_rgd);
gfs2_glock_dq_uninit(&ip->i_rgd_gh);
......@@ -2128,8 +2117,11 @@ int gfs2_inplace_reserve(struct gfs2_inode *ip, struct gfs2_alloc_parms *ap)
(loops == 0 && target > rs->rs_rgd->rd_extfail_pt))
goto skip_rgrp;
if (sdp->sd_args.ar_rgrplvb)
gfs2_rgrp_bh_get(rs->rs_rgd);
if (sdp->sd_args.ar_rgrplvb) {
error = gfs2_instantiate(&ip->i_rgd_gh);
if (error)
goto skip_rgrp;
}
/* Get a reservation if we don't already have one */
if (!gfs2_rs_active(rs))
......@@ -2215,7 +2207,7 @@ void gfs2_inplace_release(struct gfs2_inode *ip)
struct gfs2_rgrpd *rgd = rs->rs_rgd;
spin_lock(&rgd->rd_rsspin);
BUG_ON(rgd->rd_reserved < rs->rs_reserved);
GLOCK_BUG_ON(rgd->rd_gl, rgd->rd_reserved < rs->rs_reserved);
rgd->rd_reserved -= rs->rs_reserved;
spin_unlock(&rgd->rd_rsspin);
rs->rs_reserved = 0;
......@@ -2476,9 +2468,9 @@ int gfs2_alloc_blocks(struct gfs2_inode *ip, u64 *bn, unsigned int *nblocks,
spin_unlock(&rbm.rgd->rd_rsspin);
goto rgrp_error;
}
BUG_ON(rbm.rgd->rd_reserved < *nblocks);
BUG_ON(rbm.rgd->rd_free_clone < *nblocks);
BUG_ON(rbm.rgd->rd_free < *nblocks);
GLOCK_BUG_ON(rbm.rgd->rd_gl, rbm.rgd->rd_reserved < *nblocks);
GLOCK_BUG_ON(rbm.rgd->rd_gl, rbm.rgd->rd_free_clone < *nblocks);
GLOCK_BUG_ON(rbm.rgd->rd_gl, rbm.rgd->rd_free < *nblocks);
rbm.rgd->rd_reserved -= *nblocks;
rbm.rgd->rd_free_clone -= *nblocks;
rbm.rgd->rd_free -= *nblocks;
......@@ -2765,8 +2757,6 @@ void gfs2_rlist_free(struct gfs2_rgrp_list *rlist)
void rgrp_lock_local(struct gfs2_rgrpd *rgd)
{
BUG_ON(!gfs2_glock_is_held_excl(rgd->rd_gl) &&
!test_bit(SDF_NORECOVERY, &rgd->rd_sbd->sd_flags));
mutex_lock(&rgd->rd_mutex);
}
......
......@@ -31,7 +31,7 @@ extern struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd);
extern void gfs2_clear_rgrpd(struct gfs2_sbd *sdp);
extern int gfs2_rindex_update(struct gfs2_sbd *sdp);
extern void gfs2_free_clones(struct gfs2_rgrpd *rgd);
extern int gfs2_rgrp_go_lock(struct gfs2_holder *gh);
extern int gfs2_rgrp_go_instantiate(struct gfs2_holder *gh);
extern void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd);
extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
......
......@@ -1244,8 +1244,8 @@ static enum dinode_demise evict_should_delete(struct inode *inode,
if (ret)
return SHOULD_NOT_DELETE_DINODE;
if (test_bit(GIF_INVALID, &ip->i_flags)) {
ret = gfs2_inode_refresh(ip);
if (test_bit(GLF_INSTANTIATE_NEEDED, &ip->i_gl->gl_flags)) {
ret = gfs2_instantiate(gh);
if (ret)
return SHOULD_NOT_DELETE_DINODE;
}
......
......@@ -197,15 +197,14 @@ TRACE_EVENT(gfs2_demote_rq,
/* Promotion/grant of a glock */
TRACE_EVENT(gfs2_promote,
TP_PROTO(const struct gfs2_holder *gh, int first),
TP_PROTO(const struct gfs2_holder *gh),
TP_ARGS(gh, first),
TP_ARGS(gh),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( u64, glnum )
__field( u32, gltype )
__field( int, first )
__field( u8, state )
),
......@@ -213,14 +212,12 @@ TRACE_EVENT(gfs2_promote,
__entry->dev = gh->gh_gl->gl_name.ln_sbd->sd_vfs->s_dev;
__entry->glnum = gh->gh_gl->gl_name.ln_number;
__entry->gltype = gh->gh_gl->gl_name.ln_type;
__entry->first = first;
__entry->state = glock_trace_state(gh->gh_state);
),
TP_printk("%u,%u glock %u:%llu promote %s %s",
TP_printk("%u,%u glock %u:%llu promote %s",
MAJOR(__entry->dev), MINOR(__entry->dev), __entry->gltype,
(unsigned long long)__entry->glnum,
__entry->first ? "first": "other",
glock_trace_name(__entry->state))
);
......
......@@ -454,6 +454,7 @@ void gfs2_consist_inode_i(struct gfs2_inode *ip,
(unsigned long long)ip->i_no_formal_ino,
(unsigned long long)ip->i_no_addr,
function, file, line);
gfs2_dump_glock(NULL, ip->i_gl, 1);
gfs2_withdraw(sdp);
}
......@@ -475,6 +476,7 @@ void gfs2_consist_rgrpd_i(struct gfs2_rgrpd *rgd,
" function = %s, file = %s, line = %u\n",
(unsigned long long)rgd->rd_addr,
function, file, line);
gfs2_dump_glock(NULL, rgd->rd_gl, 1);
gfs2_withdraw(sdp);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment