Commit 8e316b9e authored by Ursula Braun's avatar Ursula Braun Committed by Jakub Kicinski

net/smc: improve link group freeing

Usually link groups are freed delayed to enable quick connection
creation for a follow-on SMC socket. Terminated link groups are
freed faster. This patch makes sure, fast schedule of link group
freeing is not rescheduled by a delayed schedule. And it makes sure
link group freeing is not rescheduled, if the real freeing is already
running.
Signed-off-by: default avatarUrsula Braun <ubraun@linux.ibm.com>
Signed-off-by: default avatarKarsten Graul <kgraul@linux.ibm.com>
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
parent 69318b52
...@@ -61,14 +61,21 @@ static void smc_lgr_schedule_free_work(struct smc_link_group *lgr) ...@@ -61,14 +61,21 @@ static void smc_lgr_schedule_free_work(struct smc_link_group *lgr)
* creation. For client use a somewhat higher removal delay time, * creation. For client use a somewhat higher removal delay time,
* otherwise there is a risk of out-of-sync link groups. * otherwise there is a risk of out-of-sync link groups.
*/ */
if (!lgr->freeing && !lgr->freefast) {
mod_delayed_work(system_wq, &lgr->free_work, mod_delayed_work(system_wq, &lgr->free_work,
(!lgr->is_smcd && lgr->role == SMC_CLNT) ? (!lgr->is_smcd && lgr->role == SMC_CLNT) ?
SMC_LGR_FREE_DELAY_CLNT : SMC_LGR_FREE_DELAY_SERV); SMC_LGR_FREE_DELAY_CLNT :
SMC_LGR_FREE_DELAY_SERV);
}
} }
void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr) void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr)
{ {
mod_delayed_work(system_wq, &lgr->free_work, SMC_LGR_FREE_DELAY_FAST); if (!lgr->freeing && !lgr->freefast) {
lgr->freefast = 1;
mod_delayed_work(system_wq, &lgr->free_work,
SMC_LGR_FREE_DELAY_FAST);
}
} }
/* Register connection's alert token in our lookup structure. /* Register connection's alert token in our lookup structure.
...@@ -171,10 +178,15 @@ static void smc_lgr_free_work(struct work_struct *work) ...@@ -171,10 +178,15 @@ static void smc_lgr_free_work(struct work_struct *work)
struct smc_link_group, struct smc_link_group,
free_work); free_work);
spinlock_t *lgr_lock; spinlock_t *lgr_lock;
struct smc_link *lnk;
bool conns; bool conns;
smc_lgr_list_head(lgr, &lgr_lock); smc_lgr_list_head(lgr, &lgr_lock);
spin_lock_bh(lgr_lock); spin_lock_bh(lgr_lock);
if (lgr->freeing) {
spin_unlock_bh(lgr_lock);
return;
}
read_lock_bh(&lgr->conns_lock); read_lock_bh(&lgr->conns_lock);
conns = RB_EMPTY_ROOT(&lgr->conns_all); conns = RB_EMPTY_ROOT(&lgr->conns_all);
read_unlock_bh(&lgr->conns_lock); read_unlock_bh(&lgr->conns_lock);
...@@ -183,29 +195,27 @@ static void smc_lgr_free_work(struct work_struct *work) ...@@ -183,29 +195,27 @@ static void smc_lgr_free_work(struct work_struct *work)
return; return;
} }
list_del_init(&lgr->list); /* remove from smc_lgr_list */ list_del_init(&lgr->list); /* remove from smc_lgr_list */
spin_unlock_bh(lgr_lock);
lnk = &lgr->lnk[SMC_SINGLE_LINK];
if (!lgr->is_smcd && !lgr->terminating) { if (!lgr->is_smcd && !lgr->terminating) {
struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];
/* try to send del link msg, on error free lgr immediately */ /* try to send del link msg, on error free lgr immediately */
if (lnk->state == SMC_LNK_ACTIVE && if (lnk->state == SMC_LNK_ACTIVE &&
!smc_link_send_delete(lnk)) { !smc_link_send_delete(lnk)) {
/* reschedule in case we never receive a response */ /* reschedule in case we never receive a response */
smc_lgr_schedule_free_work(lgr); smc_lgr_schedule_free_work(lgr);
spin_unlock_bh(lgr_lock);
return; return;
} }
} }
lgr->freeing = 1; /* this instance does the freeing, no new schedule */
if (!delayed_work_pending(&lgr->free_work)) { spin_unlock_bh(lgr_lock);
struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK]; cancel_delayed_work(&lgr->free_work);
if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE) if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
smc_llc_link_inactive(lnk); smc_llc_link_inactive(lnk);
if (lgr->is_smcd) if (lgr->is_smcd)
smc_ism_signal_shutdown(lgr); smc_ism_signal_shutdown(lgr);
smc_lgr_free(lgr); smc_lgr_free(lgr);
}
} }
/* create a new SMC link group */ /* create a new SMC link group */
...@@ -233,6 +243,9 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini) ...@@ -233,6 +243,9 @@ static int smc_lgr_create(struct smc_sock *smc, struct smc_init_info *ini)
} }
lgr->is_smcd = ini->is_smcd; lgr->is_smcd = ini->is_smcd;
lgr->sync_err = 0; lgr->sync_err = 0;
lgr->terminating = 0;
lgr->freefast = 0;
lgr->freeing = 0;
lgr->vlan_id = ini->vlan_id; lgr->vlan_id = ini->vlan_id;
rwlock_init(&lgr->sndbufs_lock); rwlock_init(&lgr->sndbufs_lock);
rwlock_init(&lgr->rmbs_lock); rwlock_init(&lgr->rmbs_lock);
...@@ -513,7 +526,7 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr) ...@@ -513,7 +526,7 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr)
read_unlock_bh(&lgr->conns_lock); read_unlock_bh(&lgr->conns_lock);
if (!lgr->is_smcd) if (!lgr->is_smcd)
wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait); wake_up(&lgr->lnk[SMC_SINGLE_LINK].wr_reg_wait);
smc_lgr_schedule_free_work(lgr); smc_lgr_schedule_free_work_fast(lgr);
} }
/* unlink and terminate link group */ /* unlink and terminate link group */
......
...@@ -204,6 +204,8 @@ struct smc_link_group { ...@@ -204,6 +204,8 @@ struct smc_link_group {
struct delayed_work free_work; /* delayed freeing of an lgr */ struct delayed_work free_work; /* delayed freeing of an lgr */
u8 sync_err : 1; /* lgr no longer fits to peer */ u8 sync_err : 1; /* lgr no longer fits to peer */
u8 terminating : 1;/* lgr is terminating */ u8 terminating : 1;/* lgr is terminating */
u8 freefast : 1; /* free worker scheduled fast */
u8 freeing : 1; /* lgr is being freed */
bool is_smcd; /* SMC-R or SMC-D */ bool is_smcd; /* SMC-R or SMC-D */
union { union {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment