Commit b75ad4cd authored by Trond Myklebust's avatar Trond Myklebust

NFSv4.1: Ensure smooth handover of slots from one task to the next waiting

Currently, we see a lot of bouncing for the value of highest_used_slotid
due to the fact that slots are getting freed, instead of getting instantly
transmitted to the next waiting task.
Signed-off-by: default avatarTrond Myklebust <Trond.Myklebust@netapp.com>
parent 62ae082d
......@@ -401,14 +401,15 @@ static void nfs41_sequence_free_slot(struct nfs4_sequence_res *res)
if (tbl->highest_used_slotid > tbl->target_highest_slotid)
send_new_highest_used_slotid = true;
if (nfs41_wake_and_assign_slot(tbl, res->sr_slot)) {
send_new_highest_used_slotid = false;
goto out_unlock;
}
nfs4_free_slot(tbl, res->sr_slot);
if (tbl->highest_used_slotid != NFS4_NO_SLOT)
send_new_highest_used_slotid = false;
if (!nfs4_session_draining(session)) {
if (rpc_wake_up_next(&tbl->slot_tbl_waitq) != NULL)
send_new_highest_used_slotid = false;
}
out_unlock:
spin_unlock(&tbl->slot_tbl_lock);
res->sr_slot = NULL;
if (send_new_highest_used_slotid)
......@@ -1465,6 +1466,7 @@ static void nfs4_open_prepare(struct rpc_task *task, void *calldata)
rcu_read_unlock();
out_no_action:
task->tk_action = NULL;
nfs4_sequence_done(task, &data->o_res.seq_res);
}
static void nfs4_open_done(struct rpc_task *task, void *calldata)
......@@ -2135,6 +2137,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
if (!call_close) {
/* Note: exit _without_ calling nfs4_close_done */
task->tk_action = NULL;
nfs4_sequence_done(task, &calldata->res.seq_res);
goto out;
}
......@@ -4384,6 +4387,7 @@ static void nfs4_locku_prepare(struct rpc_task *task, void *data)
if (test_bit(NFS_LOCK_INITIALIZED, &calldata->lsp->ls_flags) == 0) {
/* Note: exit _without_ running nfs4_locku_done */
task->tk_action = NULL;
nfs4_sequence_done(task, &calldata->res.seq_res);
return;
}
calldata->timestamp = jiffies;
......
......@@ -217,11 +217,65 @@ static void nfs4_destroy_slot_tables(struct nfs4_session *session)
nfs4_shrink_slot_table(&session->bc_slot_table, 0);
}
static bool nfs41_assign_slot(struct rpc_task *task, void *pslot)
{
struct nfs4_sequence_args *args = task->tk_msg.rpc_argp;
struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
struct nfs4_slot *slot = pslot;
struct nfs4_slot_table *tbl = slot->table;
if (nfs4_session_draining(tbl->session) && !args->sa_privileged)
return false;
slot->renewal_time = jiffies;
slot->generation = tbl->generation;
args->sa_slot = slot;
res->sr_slot = slot;
res->sr_status_flags = 0;
res->sr_status = 1;
return true;
}
static bool __nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
struct nfs4_slot *slot)
{
if (rpc_wake_up_first(&tbl->slot_tbl_waitq, nfs41_assign_slot, slot))
return true;
return false;
}
bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
struct nfs4_slot *slot)
{
if (slot->slot_nr > tbl->max_slotid)
return false;
return __nfs41_wake_and_assign_slot(tbl, slot);
}
static bool nfs41_try_wake_next_slot_table_entry(struct nfs4_slot_table *tbl)
{
struct nfs4_slot *slot = nfs4_alloc_slot(tbl);
if (!IS_ERR(slot)) {
bool ret = __nfs41_wake_and_assign_slot(tbl, slot);
if (ret)
return ret;
nfs4_free_slot(tbl, slot);
}
return false;
}
void nfs41_wake_slot_table(struct nfs4_slot_table *tbl)
{
for (;;) {
if (!nfs41_try_wake_next_slot_table_entry(tbl))
break;
}
}
/* Update the client's idea of target_highest_slotid */
static void nfs41_set_target_slotid_locked(struct nfs4_slot_table *tbl,
u32 target_highest_slotid)
{
unsigned int max_slotid, i;
unsigned int max_slotid;
if (tbl->target_highest_slotid == target_highest_slotid)
return;
......@@ -229,9 +283,8 @@ static void nfs41_set_target_slotid_locked(struct nfs4_slot_table *tbl,
tbl->generation++;
max_slotid = min(NFS4_MAX_SLOT_TABLE - 1, tbl->target_highest_slotid);
for (i = tbl->max_slotid + 1; i <= max_slotid; i++)
rpc_wake_up_next(&tbl->slot_tbl_waitq);
tbl->max_slotid = max_slotid;
nfs41_wake_slot_table(tbl);
}
void nfs41_set_target_slotid(struct nfs4_slot_table *tbl,
......
......@@ -94,6 +94,10 @@ static inline bool nfs4_session_draining(struct nfs4_session *session)
return !!test_bit(NFS4_SESSION_DRAINING, &session->session_state);
}
bool nfs41_wake_and_assign_slot(struct nfs4_slot_table *tbl,
struct nfs4_slot *slot);
void nfs41_wake_slot_table(struct nfs4_slot_table *tbl);
/*
* Determine if sessions are in use.
*/
......
......@@ -255,17 +255,13 @@ static void nfs4_end_drain_session(struct nfs_client *clp)
{
struct nfs4_session *ses = clp->cl_session;
struct nfs4_slot_table *tbl;
unsigned int i;
if (ses == NULL)
return;
tbl = &ses->fc_slot_table;
if (test_and_clear_bit(NFS4_SESSION_DRAINING, &ses->session_state)) {
spin_lock(&tbl->slot_tbl_lock);
for (i = 0; i <= tbl->max_slotid; i++) {
if (rpc_wake_up_next(&tbl->slot_tbl_waitq) == NULL)
break;
}
nfs41_wake_slot_table(tbl);
spin_unlock(&tbl->slot_tbl_lock);
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment