Commit c42b729e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'gfs2-v5.19-rc4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull gfs2 updates from Andreas Gruenbacher:

 - Instantiate glocks ouside of the glock state engine, in the contect
   of the process taking the glock. This moves unnecessary complexity
   out of the core glock code. Clean up the instantiate logic to be more
   sensible.

 - In gfs2_glock_async_wait(), cancel pending locking request upon
   failure. Make sure all glocks are left in a consistent state.

 - Various other minor cleanups and fixes.

* tag 'gfs2-v5.19-rc4-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2:
  gfs2: List traversal in do_promote is safe
  gfs2: do_promote glock holder stealing fix
  gfs2: Use better variable name
  gfs2: Make go_instantiate take a glock
  gfs2: Add new go_held glock operation
  gfs2: Revert 'Fix "truncate in progress" hang'
  gfs2: Instantiate glocks ouside of glock state engine
  gfs2: Fix up gfs2_glock_async_wait
  gfs2: Minor gfs2_glock_nq_m cleanup
  gfs2: Fix spelling mistake in comment
  gfs2: Rewrap overlong comment in do_promote
  gfs2: Remove redundant NULL check before kfree
parents af3e9579 44627916
...@@ -2016,7 +2016,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len, ...@@ -2016,7 +2016,7 @@ static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
l_blocks++; l_blocks++;
} }
gfs2_rlist_alloc(&rlist); gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE);
for (x = 0; x < rlist.rl_rgrps; x++) { for (x = 0; x < rlist.rl_rgrps; x++) {
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl); struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
......
...@@ -1066,7 +1066,6 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb, ...@@ -1066,7 +1066,6 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
gfs2_glock_dq(gh); gfs2_glock_dq(gh);
out_uninit: out_uninit:
gfs2_holder_uninit(gh); gfs2_holder_uninit(gh);
if (statfs_gh)
kfree(statfs_gh); kfree(statfs_gh);
from->count = orig_count - written; from->count = orig_count - written;
return written ? written : ret; return written ? written : ret;
......
...@@ -405,10 +405,13 @@ static void do_error(struct gfs2_glock *gl, const int ret) ...@@ -405,10 +405,13 @@ static void do_error(struct gfs2_glock *gl, const int ret)
/** /**
* demote_incompat_holders - demote incompatible demoteable holders * demote_incompat_holders - demote incompatible demoteable holders
* @gl: the glock we want to promote * @gl: the glock we want to promote
* @new_gh: the new holder to be promoted * @current_gh: the newly promoted holder
*
* We're passing the newly promoted holder in @current_gh, but actually, any of
* the strong holders would do.
*/ */
static void demote_incompat_holders(struct gfs2_glock *gl, static void demote_incompat_holders(struct gfs2_glock *gl,
struct gfs2_holder *new_gh) struct gfs2_holder *current_gh)
{ {
struct gfs2_holder *gh, *tmp; struct gfs2_holder *gh, *tmp;
...@@ -424,8 +427,10 @@ static void demote_incompat_holders(struct gfs2_glock *gl, ...@@ -424,8 +427,10 @@ static void demote_incompat_holders(struct gfs2_glock *gl,
*/ */
if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
return; return;
if (gh == current_gh)
continue;
if (test_bit(HIF_MAY_DEMOTE, &gh->gh_iflags) && if (test_bit(HIF_MAY_DEMOTE, &gh->gh_iflags) &&
!may_grant(gl, new_gh, gh)) { !may_grant(gl, current_gh, gh)) {
/* /*
* We should not recurse into do_promote because * We should not recurse into do_promote because
* __gfs2_glock_dq only calls handle_callback, * __gfs2_glock_dq only calls handle_callback,
...@@ -478,8 +483,7 @@ find_first_strong_holder(struct gfs2_glock *gl) ...@@ -478,8 +483,7 @@ find_first_strong_holder(struct gfs2_glock *gl)
* gfs2_instantiate - Call the glops instantiate function * gfs2_instantiate - Call the glops instantiate function
* @gh: The glock holder * @gh: The glock holder
* *
* Returns: 0 if instantiate was successful, 2 if type specific operation is * Returns: 0 if instantiate was successful, or error.
* underway, or error.
*/ */
int gfs2_instantiate(struct gfs2_holder *gh) int gfs2_instantiate(struct gfs2_holder *gh)
{ {
...@@ -489,7 +493,7 @@ int gfs2_instantiate(struct gfs2_holder *gh) ...@@ -489,7 +493,7 @@ int gfs2_instantiate(struct gfs2_holder *gh)
again: again:
if (!test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags)) if (!test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags))
return 0; goto done;
/* /*
* Since we unlock the lockref lock, we set a flag to indicate * Since we unlock the lockref lock, we set a flag to indicate
...@@ -508,78 +512,55 @@ int gfs2_instantiate(struct gfs2_holder *gh) ...@@ -508,78 +512,55 @@ int gfs2_instantiate(struct gfs2_holder *gh)
goto again; goto again;
} }
ret = glops->go_instantiate(gh); ret = glops->go_instantiate(gl);
if (!ret) if (!ret)
clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags); clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags); clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
if (ret)
return ret; return ret;
done:
if (glops->go_held)
return glops->go_held(gh);
return 0;
} }
/** /**
* do_promote - promote as many requests as possible on the current queue * do_promote - promote as many requests as possible on the current queue
* @gl: The glock * @gl: The glock
* *
* Returns: 1 if there is a blocked holder at the head of the list, or 2 * Returns: 1 if there is a blocked holder at the head of the list
* if a type specific operation is underway.
*/ */
static int do_promote(struct gfs2_glock *gl) static int do_promote(struct gfs2_glock *gl)
__releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock)
{ {
struct gfs2_holder *gh, *tmp, *first_gh; struct gfs2_holder *gh, *current_gh;
bool incompat_holders_demoted = false; bool incompat_holders_demoted = false;
bool lock_released;
int ret;
restart: current_gh = find_first_strong_holder(gl);
first_gh = find_first_strong_holder(gl); list_for_each_entry(gh, &gl->gl_holders, gh_list) {
list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
lock_released = false;
if (test_bit(HIF_HOLDER, &gh->gh_iflags)) if (test_bit(HIF_HOLDER, &gh->gh_iflags))
continue; continue;
if (!may_grant(gl, first_gh, gh)) { if (!may_grant(gl, current_gh, gh)) {
/* /*
* If we get here, it means we may not grant this holder for * If we get here, it means we may not grant this
* some reason. If this holder is the head of the list, it * holder for some reason. If this holder is at the
* means we have a blocked holder at the head, so return 1. * head of the list, it means we have a blocked holder
* at the head, so return 1.
*/ */
if (list_is_first(&gh->gh_list, &gl->gl_holders)) if (list_is_first(&gh->gh_list, &gl->gl_holders))
return 1; return 1;
do_error(gl, 0); do_error(gl, 0);
break; break;
} }
if (!incompat_holders_demoted) {
demote_incompat_holders(gl, first_gh);
incompat_holders_demoted = true;
first_gh = gh;
}
if (test_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags) &&
!(gh->gh_flags & GL_SKIP) && gl->gl_ops->go_instantiate) {
lock_released = true;
spin_unlock(&gl->gl_lockref.lock);
ret = gfs2_instantiate(gh);
spin_lock(&gl->gl_lockref.lock);
if (ret) {
if (ret == 1)
return 2;
gh->gh_error = ret;
list_del_init(&gh->gh_list);
trace_gfs2_glock_queue(gh, 0);
gfs2_holder_wake(gh);
goto restart;
}
}
set_bit(HIF_HOLDER, &gh->gh_iflags); set_bit(HIF_HOLDER, &gh->gh_iflags);
trace_gfs2_promote(gh); trace_gfs2_promote(gh);
gfs2_holder_wake(gh); gfs2_holder_wake(gh);
/* if (!incompat_holders_demoted) {
* If we released the gl_lockref.lock the holders list may have current_gh = gh;
* changed. For that reason, we start again at the start of demote_incompat_holders(gl, current_gh);
* the holders queue. incompat_holders_demoted = true;
*/ }
if (lock_released)
goto restart;
} }
return 0; return 0;
} }
...@@ -657,7 +638,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) ...@@ -657,7 +638,6 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
const struct gfs2_glock_operations *glops = gl->gl_ops; const struct gfs2_glock_operations *glops = gl->gl_ops;
struct gfs2_holder *gh; struct gfs2_holder *gh;
unsigned state = ret & LM_OUT_ST_MASK; unsigned state = ret & LM_OUT_ST_MASK;
int rv;
spin_lock(&gl->gl_lockref.lock); spin_lock(&gl->gl_lockref.lock);
trace_gfs2_glock_state_change(gl, state); trace_gfs2_glock_state_change(gl, state);
...@@ -715,6 +695,8 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) ...@@ -715,6 +695,8 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
gfs2_demote_wake(gl); gfs2_demote_wake(gl);
if (state != LM_ST_UNLOCKED) { if (state != LM_ST_UNLOCKED) {
if (glops->go_xmote_bh) { if (glops->go_xmote_bh) {
int rv;
spin_unlock(&gl->gl_lockref.lock); spin_unlock(&gl->gl_lockref.lock);
rv = glops->go_xmote_bh(gl); rv = glops->go_xmote_bh(gl);
spin_lock(&gl->gl_lockref.lock); spin_lock(&gl->gl_lockref.lock);
...@@ -723,13 +705,10 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret) ...@@ -723,13 +705,10 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
goto out; goto out;
} }
} }
rv = do_promote(gl); do_promote(gl);
if (rv == 2)
goto out_locked;
} }
out: out:
clear_bit(GLF_LOCK, &gl->gl_flags); clear_bit(GLF_LOCK, &gl->gl_flags);
out_locked:
spin_unlock(&gl->gl_lockref.lock); spin_unlock(&gl->gl_lockref.lock);
} }
...@@ -886,7 +865,6 @@ __releases(&gl->gl_lockref.lock) ...@@ -886,7 +865,6 @@ __releases(&gl->gl_lockref.lock)
__acquires(&gl->gl_lockref.lock) __acquires(&gl->gl_lockref.lock)
{ {
struct gfs2_holder *gh = NULL; struct gfs2_holder *gh = NULL;
int ret;
if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
return; return;
...@@ -905,18 +883,14 @@ __acquires(&gl->gl_lockref.lock) ...@@ -905,18 +883,14 @@ __acquires(&gl->gl_lockref.lock)
} else { } else {
if (test_bit(GLF_DEMOTE, &gl->gl_flags)) if (test_bit(GLF_DEMOTE, &gl->gl_flags))
gfs2_demote_wake(gl); gfs2_demote_wake(gl);
ret = do_promote(gl); if (do_promote(gl) == 0)
if (ret == 0)
goto out_unlock; goto out_unlock;
if (ret == 2)
goto out;
gh = find_first_waiter(gl); gh = find_first_waiter(gl);
gl->gl_target = gh->gh_state; gl->gl_target = gh->gh_state;
if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
do_error(gl, 0); /* Fail queued try locks */ do_error(gl, 0); /* Fail queued try locks */
} }
do_xmote(gl, gh, gl->gl_target); do_xmote(gl, gh, gl->gl_target);
out:
return; return;
out_sched: out_sched:
...@@ -1313,6 +1287,25 @@ static void gfs2_glock_update_hold_time(struct gfs2_glock *gl, ...@@ -1313,6 +1287,25 @@ static void gfs2_glock_update_hold_time(struct gfs2_glock *gl,
} }
} }
/**
* gfs2_glock_holder_ready - holder is ready and its error code can be collected
* @gh: the glock holder
*
* Called when a glock holder no longer needs to be waited for because it is
* now either held (HIF_HOLDER set; gh_error == 0), or acquiring the lock has
* failed (gh_error != 0).
*/
int gfs2_glock_holder_ready(struct gfs2_holder *gh)
{
if (gh->gh_error || (gh->gh_flags & GL_SKIP))
return gh->gh_error;
gh->gh_error = gfs2_instantiate(gh);
if (gh->gh_error)
gfs2_glock_dq(gh);
return gh->gh_error;
}
/** /**
* gfs2_glock_wait - wait on a glock acquisition * gfs2_glock_wait - wait on a glock acquisition
* @gh: the glock holder * @gh: the glock holder
...@@ -1327,7 +1320,7 @@ int gfs2_glock_wait(struct gfs2_holder *gh) ...@@ -1327,7 +1320,7 @@ int gfs2_glock_wait(struct gfs2_holder *gh)
might_sleep(); might_sleep();
wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE); wait_on_bit(&gh->gh_iflags, HIF_WAIT, TASK_UNINTERRUPTIBLE);
gfs2_glock_update_hold_time(gh->gh_gl, start_time); gfs2_glock_update_hold_time(gh->gh_gl, start_time);
return gh->gh_error; return gfs2_glock_holder_ready(gh);
} }
static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs) static int glocks_pending(unsigned int num_gh, struct gfs2_holder *ghs)
...@@ -1355,7 +1348,6 @@ int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs) ...@@ -1355,7 +1348,6 @@ int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd; struct gfs2_sbd *sdp = ghs[0].gh_gl->gl_name.ln_sbd;
int i, ret = 0, timeout = 0; int i, ret = 0, timeout = 0;
unsigned long start_time = jiffies; unsigned long start_time = jiffies;
bool keep_waiting;
might_sleep(); might_sleep();
/* /*
...@@ -1365,53 +1357,33 @@ int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs) ...@@ -1365,53 +1357,33 @@ int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs)
for (i = 0; i < num_gh; i++) for (i = 0; i < num_gh; i++)
timeout += ghs[i].gh_gl->gl_hold_time << 1; timeout += ghs[i].gh_gl->gl_hold_time << 1;
wait_for_dlm:
if (!wait_event_timeout(sdp->sd_async_glock_wait, if (!wait_event_timeout(sdp->sd_async_glock_wait,
!glocks_pending(num_gh, ghs), timeout)) !glocks_pending(num_gh, ghs), timeout)) {
ret = -ESTALE; /* request timed out. */ ret = -ESTALE; /* request timed out. */
goto out;
}
/*
* If dlm granted all our requests, we need to adjust the glock
* minimum hold time values according to how long we waited.
*
* If our request timed out, we need to repeatedly release any held
* glocks we acquired thus far to allow dlm to acquire the remaining
* glocks without deadlocking. We cannot currently cancel outstanding
* glock acquisitions.
*
* The HIF_WAIT bit tells us which requests still need a response from
* dlm.
*
* If dlm sent us any errors, we return the first error we find.
*/
keep_waiting = false;
for (i = 0; i < num_gh; i++) { for (i = 0; i < num_gh; i++) {
/* Skip holders we have already dequeued below. */ struct gfs2_holder *gh = &ghs[i];
if (!gfs2_holder_queued(&ghs[i])) int ret2;
continue;
/* Skip holders with a pending DLM response. */
if (test_bit(HIF_WAIT, &ghs[i].gh_iflags)) {
keep_waiting = true;
continue;
}
if (test_bit(HIF_HOLDER, &ghs[i].gh_iflags)) { if (test_bit(HIF_HOLDER, &gh->gh_iflags)) {
if (ret == -ESTALE) gfs2_glock_update_hold_time(gh->gh_gl,
gfs2_glock_dq(&ghs[i]);
else
gfs2_glock_update_hold_time(ghs[i].gh_gl,
start_time); start_time);
} }
ret2 = gfs2_glock_holder_ready(gh);
if (!ret) if (!ret)
ret = ghs[i].gh_error; ret = ret2;
} }
if (keep_waiting) out:
goto wait_for_dlm; if (ret) {
for (i = 0; i < num_gh; i++) {
struct gfs2_holder *gh = &ghs[i];
/* gfs2_glock_dq(gh);
* At this point, we've either acquired all locks or released them all. }
*/ }
return ret; return ret;
} }
...@@ -1490,10 +1462,10 @@ __acquires(&gl->gl_lockref.lock) ...@@ -1490,10 +1462,10 @@ __acquires(&gl->gl_lockref.lock)
if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) { if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
if (test_bit(GLF_LOCK, &gl->gl_flags)) { if (test_bit(GLF_LOCK, &gl->gl_flags)) {
struct gfs2_holder *first_gh; struct gfs2_holder *current_gh;
first_gh = find_first_strong_holder(gl); current_gh = find_first_strong_holder(gl);
try_futile = !may_grant(gl, first_gh, gh); try_futile = !may_grant(gl, current_gh, gh);
} }
if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags)) if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
goto fail; goto fail;
...@@ -1779,7 +1751,7 @@ static int glock_compare(const void *arg_a, const void *arg_b) ...@@ -1779,7 +1751,7 @@ static int glock_compare(const void *arg_a, const void *arg_b)
} }
/** /**
* nq_m_sync - synchonously acquire more than one glock in deadlock free order * nq_m_sync - synchronously acquire more than one glock in deadlock free order
* @num_gh: the number of structures * @num_gh: the number of structures
* @ghs: an array of struct gfs2_holder structures * @ghs: an array of struct gfs2_holder structures
* @p: placeholder for the holder structure to pass back * @p: placeholder for the holder structure to pass back
...@@ -1800,8 +1772,6 @@ static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, ...@@ -1800,8 +1772,6 @@ static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL); sort(p, num_gh, sizeof(struct gfs2_holder *), glock_compare, NULL);
for (x = 0; x < num_gh; x++) { for (x = 0; x < num_gh; x++) {
p[x]->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
error = gfs2_glock_nq(p[x]); error = gfs2_glock_nq(p[x]);
if (error) { if (error) {
while (x--) while (x--)
...@@ -1818,7 +1788,6 @@ static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs, ...@@ -1818,7 +1788,6 @@ static int nq_m_sync(unsigned int num_gh, struct gfs2_holder *ghs,
* @num_gh: the number of structures * @num_gh: the number of structures
* @ghs: an array of struct gfs2_holder structures * @ghs: an array of struct gfs2_holder structures
* *
*
* Returns: 0 on success (all glocks acquired), * Returns: 0 on success (all glocks acquired),
* errno on failure (no glocks acquired) * errno on failure (no glocks acquired)
*/ */
...@@ -1833,7 +1802,6 @@ int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs) ...@@ -1833,7 +1802,6 @@ int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
case 0: case 0:
return 0; return 0;
case 1: case 1:
ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
return gfs2_glock_nq(ghs); return gfs2_glock_nq(ghs);
default: default:
if (num_gh <= 4) if (num_gh <= 4)
...@@ -2245,20 +2213,6 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp) ...@@ -2245,20 +2213,6 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp)
glock_hash_walk(dump_glock_func, sdp); glock_hash_walk(dump_glock_func, sdp);
} }
void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
{
struct gfs2_glock *gl = ip->i_gl;
int ret;
ret = gfs2_truncatei_resume(ip);
gfs2_glock_assert_withdraw(gl, ret == 0);
spin_lock(&gl->gl_lockref.lock);
clear_bit(GLF_LOCK, &gl->gl_flags);
run_queue(gl, 1);
spin_unlock(&gl->gl_lockref.lock);
}
static const char *state2str(unsigned state) static const char *state2str(unsigned state)
{ {
switch(state) { switch(state) {
......
...@@ -213,6 +213,7 @@ extern void gfs2_holder_uninit(struct gfs2_holder *gh); ...@@ -213,6 +213,7 @@ extern void gfs2_holder_uninit(struct gfs2_holder *gh);
extern int gfs2_glock_nq(struct gfs2_holder *gh); extern int gfs2_glock_nq(struct gfs2_holder *gh);
extern int gfs2_glock_poll(struct gfs2_holder *gh); extern int gfs2_glock_poll(struct gfs2_holder *gh);
extern int gfs2_instantiate(struct gfs2_holder *gh); extern int gfs2_instantiate(struct gfs2_holder *gh);
extern int gfs2_glock_holder_ready(struct gfs2_holder *gh);
extern int gfs2_glock_wait(struct gfs2_holder *gh); extern int gfs2_glock_wait(struct gfs2_holder *gh);
extern int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs); extern int gfs2_glock_async_wait(unsigned int num_gh, struct gfs2_holder *ghs);
extern void gfs2_glock_dq(struct gfs2_holder *gh); extern void gfs2_glock_dq(struct gfs2_holder *gh);
...@@ -273,7 +274,6 @@ extern void gfs2_cancel_delete_work(struct gfs2_glock *gl); ...@@ -273,7 +274,6 @@ extern void gfs2_cancel_delete_work(struct gfs2_glock *gl);
extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl); extern bool gfs2_delete_work_queued(const struct gfs2_glock *gl);
extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp); extern void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp); extern void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
extern void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
extern void gfs2_glock_thaw(struct gfs2_sbd *sdp); extern void gfs2_glock_thaw(struct gfs2_sbd *sdp);
extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl); extern void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
extern void gfs2_glock_free(struct gfs2_glock *gl); extern void gfs2_glock_free(struct gfs2_glock *gl);
......
...@@ -485,35 +485,33 @@ int gfs2_inode_refresh(struct gfs2_inode *ip) ...@@ -485,35 +485,33 @@ int gfs2_inode_refresh(struct gfs2_inode *ip)
* Returns: errno * Returns: errno
*/ */
static int inode_go_instantiate(struct gfs2_holder *gh) static int inode_go_instantiate(struct gfs2_glock *gl)
{
struct gfs2_inode *ip = gl->gl_object;
if (!ip) /* no inode to populate - read it in later */
return 0;
return gfs2_inode_refresh(ip);
}
static int inode_go_held(struct gfs2_holder *gh)
{ {
struct gfs2_glock *gl = gh->gh_gl; struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct gfs2_inode *ip = gl->gl_object; struct gfs2_inode *ip = gl->gl_object;
int error = 0; int error = 0;
if (!ip) /* no inode to populate - read it in later */ if (!ip) /* no inode to populate - read it in later */
goto out; return 0;
error = gfs2_inode_refresh(ip);
if (error)
goto out;
if (gh->gh_state != LM_ST_DEFERRED) if (gh->gh_state != LM_ST_DEFERRED)
inode_dio_wait(&ip->i_inode); inode_dio_wait(&ip->i_inode);
if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) && if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
(gl->gl_state == LM_ST_EXCLUSIVE) && (gl->gl_state == LM_ST_EXCLUSIVE) &&
(gh->gh_state == LM_ST_EXCLUSIVE)) { (gh->gh_state == LM_ST_EXCLUSIVE))
spin_lock(&sdp->sd_trunc_lock); error = gfs2_truncatei_resume(ip);
if (list_empty(&ip->i_trunc_list))
list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
spin_unlock(&sdp->sd_trunc_lock);
wake_up(&sdp->sd_quota_wait);
error = 1;
}
out:
return error; return error;
} }
...@@ -737,6 +735,7 @@ const struct gfs2_glock_operations gfs2_inode_glops = { ...@@ -737,6 +735,7 @@ const struct gfs2_glock_operations gfs2_inode_glops = {
.go_inval = inode_go_inval, .go_inval = inode_go_inval,
.go_demote_ok = inode_go_demote_ok, .go_demote_ok = inode_go_demote_ok,
.go_instantiate = inode_go_instantiate, .go_instantiate = inode_go_instantiate,
.go_held = inode_go_held,
.go_dump = inode_go_dump, .go_dump = inode_go_dump,
.go_type = LM_TYPE_INODE, .go_type = LM_TYPE_INODE,
.go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB, .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
......
...@@ -219,7 +219,8 @@ struct gfs2_glock_operations { ...@@ -219,7 +219,8 @@ struct gfs2_glock_operations {
int (*go_xmote_bh)(struct gfs2_glock *gl); int (*go_xmote_bh)(struct gfs2_glock *gl);
void (*go_inval) (struct gfs2_glock *gl, int flags); void (*go_inval) (struct gfs2_glock *gl, int flags);
int (*go_demote_ok) (const struct gfs2_glock *gl); int (*go_demote_ok) (const struct gfs2_glock *gl);
int (*go_instantiate) (struct gfs2_holder *gh); int (*go_instantiate) (struct gfs2_glock *gl);
int (*go_held)(struct gfs2_holder *gh);
void (*go_dump)(struct seq_file *seq, struct gfs2_glock *gl, void (*go_dump)(struct seq_file *seq, struct gfs2_glock *gl,
const char *fs_id_buf); const char *fs_id_buf);
void (*go_callback)(struct gfs2_glock *gl, bool remote); void (*go_callback)(struct gfs2_glock *gl, bool remote);
...@@ -396,7 +397,6 @@ struct gfs2_inode { ...@@ -396,7 +397,6 @@ struct gfs2_inode {
atomic_t i_sizehint; /* hint of the write size */ atomic_t i_sizehint; /* hint of the write size */
struct rw_semaphore i_rw_mutex; struct rw_semaphore i_rw_mutex;
struct list_head i_ordered; struct list_head i_ordered;
struct list_head i_trunc_list;
__be64 *i_hash_cache; __be64 *i_hash_cache;
u32 i_entries; u32 i_entries;
u32 i_diskflags; u32 i_diskflags;
...@@ -784,8 +784,6 @@ struct gfs2_sbd { ...@@ -784,8 +784,6 @@ struct gfs2_sbd {
struct mutex sd_quota_mutex; struct mutex sd_quota_mutex;
struct mutex sd_quota_sync_mutex; struct mutex sd_quota_sync_mutex;
wait_queue_head_t sd_quota_wait; wait_queue_head_t sd_quota_wait;
struct list_head sd_trunc_list;
spinlock_t sd_trunc_lock;
unsigned int sd_quota_slots; unsigned int sd_quota_slots;
unsigned long *sd_quota_bitmap; unsigned long *sd_quota_bitmap;
......
...@@ -1058,7 +1058,7 @@ static int control_first_done(struct gfs2_sbd *sdp) ...@@ -1058,7 +1058,7 @@ static int control_first_done(struct gfs2_sbd *sdp)
/* /*
* Expand static jid arrays if necessary (by increments of RECOVER_SIZE_INC) * Expand static jid arrays if necessary (by increments of RECOVER_SIZE_INC)
* to accomodate the largest slot number. (NB dlm slot numbers start at 1, * to accommodate the largest slot number. (NB dlm slot numbers start at 1,
* gfs2 jids start at 0, so jid = slot - 1) * gfs2 jids start at 0, so jid = slot - 1)
*/ */
......
...@@ -38,7 +38,6 @@ static void gfs2_init_inode_once(void *foo) ...@@ -38,7 +38,6 @@ static void gfs2_init_inode_once(void *foo)
inode_init_once(&ip->i_inode); inode_init_once(&ip->i_inode);
atomic_set(&ip->i_sizehint, 0); atomic_set(&ip->i_sizehint, 0);
init_rwsem(&ip->i_rw_mutex); init_rwsem(&ip->i_rw_mutex);
INIT_LIST_HEAD(&ip->i_trunc_list);
INIT_LIST_HEAD(&ip->i_ordered); INIT_LIST_HEAD(&ip->i_ordered);
ip->i_qadata = NULL; ip->i_qadata = NULL;
gfs2_holder_mark_uninitialized(&ip->i_rgd_gh); gfs2_holder_mark_uninitialized(&ip->i_rgd_gh);
......
...@@ -106,8 +106,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb) ...@@ -106,8 +106,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
mutex_init(&sdp->sd_quota_mutex); mutex_init(&sdp->sd_quota_mutex);
mutex_init(&sdp->sd_quota_sync_mutex); mutex_init(&sdp->sd_quota_sync_mutex);
init_waitqueue_head(&sdp->sd_quota_wait); init_waitqueue_head(&sdp->sd_quota_wait);
INIT_LIST_HEAD(&sdp->sd_trunc_list);
spin_lock_init(&sdp->sd_trunc_lock);
spin_lock_init(&sdp->sd_bitmap_lock); spin_lock_init(&sdp->sd_bitmap_lock);
INIT_LIST_HEAD(&sdp->sd_sc_inodes_list); INIT_LIST_HEAD(&sdp->sd_sc_inodes_list);
......
...@@ -1517,25 +1517,6 @@ static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg, ...@@ -1517,25 +1517,6 @@ static void quotad_check_timeo(struct gfs2_sbd *sdp, const char *msg,
} }
} }
static void quotad_check_trunc_list(struct gfs2_sbd *sdp)
{
struct gfs2_inode *ip;
while(1) {
ip = NULL;
spin_lock(&sdp->sd_trunc_lock);
if (!list_empty(&sdp->sd_trunc_list)) {
ip = list_first_entry(&sdp->sd_trunc_list,
struct gfs2_inode, i_trunc_list);
list_del_init(&ip->i_trunc_list);
}
spin_unlock(&sdp->sd_trunc_lock);
if (ip == NULL)
return;
gfs2_glock_finish_truncate(ip);
}
}
void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) { void gfs2_wake_up_statfs(struct gfs2_sbd *sdp) {
if (!sdp->sd_statfs_force_sync) { if (!sdp->sd_statfs_force_sync) {
sdp->sd_statfs_force_sync = 1; sdp->sd_statfs_force_sync = 1;
...@@ -1558,7 +1539,6 @@ int gfs2_quotad(void *data) ...@@ -1558,7 +1539,6 @@ int gfs2_quotad(void *data)
unsigned long quotad_timeo = 0; unsigned long quotad_timeo = 0;
unsigned long t = 0; unsigned long t = 0;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
int empty;
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
...@@ -1579,19 +1559,13 @@ int gfs2_quotad(void *data) ...@@ -1579,19 +1559,13 @@ int gfs2_quotad(void *data)
quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t, quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
&quotad_timeo, &tune->gt_quota_quantum); &quotad_timeo, &tune->gt_quota_quantum);
/* Check for & recover partially truncated inodes */
quotad_check_trunc_list(sdp);
try_to_freeze(); try_to_freeze();
bypass: bypass:
t = min(quotad_timeo, statfs_timeo); t = min(quotad_timeo, statfs_timeo);
prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE); prepare_to_wait(&sdp->sd_quota_wait, &wait, TASK_INTERRUPTIBLE);
spin_lock(&sdp->sd_trunc_lock); if (!sdp->sd_statfs_force_sync)
empty = list_empty(&sdp->sd_trunc_list);
spin_unlock(&sdp->sd_trunc_lock);
if (empty && !sdp->sd_statfs_force_sync)
t -= schedule_timeout(t); t -= schedule_timeout(t);
else else
t = 0; t = 0;
......
...@@ -1196,9 +1196,8 @@ static void rgrp_set_bitmap_flags(struct gfs2_rgrpd *rgd) ...@@ -1196,9 +1196,8 @@ static void rgrp_set_bitmap_flags(struct gfs2_rgrpd *rgd)
* Returns: errno * Returns: errno
*/ */
int gfs2_rgrp_go_instantiate(struct gfs2_holder *gh) int gfs2_rgrp_go_instantiate(struct gfs2_glock *gl)
{ {
struct gfs2_glock *gl = gh->gh_gl;
struct gfs2_rgrpd *rgd = gl->gl_object; struct gfs2_rgrpd *rgd = gl->gl_object;
struct gfs2_sbd *sdp = rgd->rd_sbd; struct gfs2_sbd *sdp = rgd->rd_sbd;
unsigned int length = rgd->rd_length; unsigned int length = rgd->rd_length;
...@@ -2720,12 +2719,15 @@ void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist, ...@@ -2720,12 +2719,15 @@ void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
* gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate * gfs2_rlist_alloc - all RGs have been added to the rlist, now allocate
* and initialize an array of glock holders for them * and initialize an array of glock holders for them
* @rlist: the list of resource groups * @rlist: the list of resource groups
* @state: the state we're requesting
* @flags: the modifier flags
* *
* FIXME: Don't use NOFAIL * FIXME: Don't use NOFAIL
* *
*/ */
void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist) void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist,
unsigned int state, u16 flags)
{ {
unsigned int x; unsigned int x;
...@@ -2733,8 +2735,8 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist) ...@@ -2733,8 +2735,8 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist)
sizeof(struct gfs2_holder), sizeof(struct gfs2_holder),
GFP_NOFS | __GFP_NOFAIL); GFP_NOFS | __GFP_NOFAIL);
for (x = 0; x < rlist->rl_rgrps; x++) for (x = 0; x < rlist->rl_rgrps; x++)
gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, LM_ST_EXCLUSIVE, gfs2_holder_init(rlist->rl_rgd[x]->rd_gl, state, flags,
LM_FLAG_NODE_SCOPE, &rlist->rl_ghs[x]); &rlist->rl_ghs[x]);
} }
/** /**
......
...@@ -31,7 +31,7 @@ extern struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd); ...@@ -31,7 +31,7 @@ extern struct gfs2_rgrpd *gfs2_rgrpd_get_next(struct gfs2_rgrpd *rgd);
extern void gfs2_clear_rgrpd(struct gfs2_sbd *sdp); extern void gfs2_clear_rgrpd(struct gfs2_sbd *sdp);
extern int gfs2_rindex_update(struct gfs2_sbd *sdp); extern int gfs2_rindex_update(struct gfs2_sbd *sdp);
extern void gfs2_free_clones(struct gfs2_rgrpd *rgd); extern void gfs2_free_clones(struct gfs2_rgrpd *rgd);
extern int gfs2_rgrp_go_instantiate(struct gfs2_holder *gh); extern int gfs2_rgrp_go_instantiate(struct gfs2_glock *gl);
extern void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd); extern void gfs2_rgrp_brelse(struct gfs2_rgrpd *rgd);
extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip); extern struct gfs2_alloc *gfs2_alloc_get(struct gfs2_inode *ip);
...@@ -64,7 +64,8 @@ struct gfs2_rgrp_list { ...@@ -64,7 +64,8 @@ struct gfs2_rgrp_list {
extern void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist, extern void gfs2_rlist_add(struct gfs2_inode *ip, struct gfs2_rgrp_list *rlist,
u64 block); u64 block);
extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist); extern void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist,
unsigned int state, u16 flags);
extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist); extern void gfs2_rlist_free(struct gfs2_rgrp_list *rlist);
extern u64 gfs2_ri_total(struct gfs2_sbd *sdp); extern u64 gfs2_ri_total(struct gfs2_sbd *sdp);
extern void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd, extern void gfs2_rgrp_dump(struct seq_file *seq, struct gfs2_rgrpd *rgd,
......
...@@ -1196,7 +1196,7 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode) ...@@ -1196,7 +1196,7 @@ static bool gfs2_upgrade_iopen_glock(struct inode *inode)
gfs2_glock_dq(gh); gfs2_glock_dq(gh);
return false; return false;
} }
return true; return gfs2_glock_holder_ready(gh) == 0;
} }
/** /**
......
...@@ -1313,7 +1313,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip) ...@@ -1313,7 +1313,7 @@ static int ea_dealloc_indirect(struct gfs2_inode *ip)
else else
goto out; goto out;
gfs2_rlist_alloc(&rlist); gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE, LM_FLAG_NODE_SCOPE);
for (x = 0; x < rlist.rl_rgrps; x++) { for (x = 0; x < rlist.rl_rgrps; x++) {
rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl); rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment