Commit 7324f47d authored by Mike Christie's avatar Mike Christie Committed by Martin K. Petersen

scsi: target: Replace lun_tg_pt_gp_lock with rcu in I/O path

We are only holding the lun_tg_pt_gp_lock in target_alua_state_check() to
make sure tg_pt_gp is not freed from under us while we copy the state,
delay, ID values. We can instead use RCU here to access the tg_pt_gp.

With this patch IOPs can increase up to 10% for jobs like:

  fio  --filename=/dev/sdX  --direct=1 --rw=randrw --bs=4k \
    --ioengine=libaio --iodepth=64  --numjobs=N

when there are multiple sessions (running that fio command to each /dev/sdX
or using multipath and there are over 8 paths), or more than 8 queues for
the loop or vhost with multiple threads case and numjobs > 8.

Link: https://lore.kernel.org/r/20210930020422.92578-5-michael.christie@oracle.comSigned-off-by: default avatarMike Christie <michael.christie@oracle.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 1283c0d1
...@@ -247,11 +247,11 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) ...@@ -247,11 +247,11 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd)
* this CDB was received upon to determine this value individually * this CDB was received upon to determine this value individually
* for ALUA target port group. * for ALUA target port group.
*/ */
spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock); rcu_read_lock();
tg_pt_gp = cmd->se_lun->lun_tg_pt_gp; tg_pt_gp = rcu_dereference(cmd->se_lun->lun_tg_pt_gp);
if (tg_pt_gp) if (tg_pt_gp)
buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs; buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs;
spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock); rcu_read_unlock();
} }
transport_kunmap_data_sg(cmd); transport_kunmap_data_sg(cmd);
...@@ -292,24 +292,24 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) ...@@ -292,24 +292,24 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
* Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed
* for the local tg_pt_gp. * for the local tg_pt_gp.
*/ */
spin_lock(&l_lun->lun_tg_pt_gp_lock); rcu_read_lock();
l_tg_pt_gp = l_lun->lun_tg_pt_gp; l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp);
if (!l_tg_pt_gp) { if (!l_tg_pt_gp) {
spin_unlock(&l_lun->lun_tg_pt_gp_lock); rcu_read_unlock();
pr_err("Unable to access l_lun->tg_pt_gp\n"); pr_err("Unable to access l_lun->tg_pt_gp\n");
rc = TCM_UNSUPPORTED_SCSI_OPCODE; rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out; goto out;
} }
if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) {
spin_unlock(&l_lun->lun_tg_pt_gp_lock); rcu_read_unlock();
pr_debug("Unable to process SET_TARGET_PORT_GROUPS" pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICIT_ALUA is disabled\n"); " while TPGS_EXPLICIT_ALUA is disabled\n");
rc = TCM_UNSUPPORTED_SCSI_OPCODE; rc = TCM_UNSUPPORTED_SCSI_OPCODE;
goto out; goto out;
} }
valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states;
spin_unlock(&l_lun->lun_tg_pt_gp_lock); rcu_read_unlock();
ptr = &buf[4]; /* Skip over RESERVED area in header */ ptr = &buf[4]; /* Skip over RESERVED area in header */
...@@ -662,17 +662,17 @@ target_alua_state_check(struct se_cmd *cmd) ...@@ -662,17 +662,17 @@ target_alua_state_check(struct se_cmd *cmd)
" target port\n"); " target port\n");
return TCM_ALUA_OFFLINE; return TCM_ALUA_OFFLINE;
} }
rcu_read_lock();
if (!lun->lun_tg_pt_gp) tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
if (!tg_pt_gp) {
rcu_read_unlock();
return 0; return 0;
}
spin_lock(&lun->lun_tg_pt_gp_lock);
tg_pt_gp = lun->lun_tg_pt_gp;
out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state; out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state;
nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
rcu_read_unlock();
spin_unlock(&lun->lun_tg_pt_gp_lock);
/* /*
* Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional
* statement so the compiler knows explicitly to check this case first. * statement so the compiler knows explicitly to check this case first.
...@@ -1219,10 +1219,10 @@ static int core_alua_set_tg_pt_secondary_state( ...@@ -1219,10 +1219,10 @@ static int core_alua_set_tg_pt_secondary_state(
struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp *tg_pt_gp;
int trans_delay_msecs; int trans_delay_msecs;
spin_lock(&lun->lun_tg_pt_gp_lock); rcu_read_lock();
tg_pt_gp = lun->lun_tg_pt_gp; tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
if (!tg_pt_gp) { if (!tg_pt_gp) {
spin_unlock(&lun->lun_tg_pt_gp_lock); rcu_read_unlock();
pr_err("Unable to complete secondary state" pr_err("Unable to complete secondary state"
" transition\n"); " transition\n");
return -EINVAL; return -EINVAL;
...@@ -1246,7 +1246,7 @@ static int core_alua_set_tg_pt_secondary_state( ...@@ -1246,7 +1246,7 @@ static int core_alua_set_tg_pt_secondary_state(
"implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
spin_unlock(&lun->lun_tg_pt_gp_lock); rcu_read_unlock();
/* /*
* Do the optional transition delay after we set the secondary * Do the optional transition delay after we set the secondary
* ALUA access state. * ALUA access state.
...@@ -1754,13 +1754,14 @@ void core_alua_free_tg_pt_gp( ...@@ -1754,13 +1754,14 @@ void core_alua_free_tg_pt_gp(
__target_attach_tg_pt_gp(lun, __target_attach_tg_pt_gp(lun,
dev->t10_alua.default_tg_pt_gp); dev->t10_alua.default_tg_pt_gp);
} else } else
lun->lun_tg_pt_gp = NULL; rcu_assign_pointer(lun->lun_tg_pt_gp, NULL);
spin_unlock(&lun->lun_tg_pt_gp_lock); spin_unlock(&lun->lun_tg_pt_gp_lock);
spin_lock(&tg_pt_gp->tg_pt_gp_lock); spin_lock(&tg_pt_gp->tg_pt_gp_lock);
} }
spin_unlock(&tg_pt_gp->tg_pt_gp_lock); spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
synchronize_rcu();
kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
} }
...@@ -1805,7 +1806,7 @@ static void __target_attach_tg_pt_gp(struct se_lun *lun, ...@@ -1805,7 +1806,7 @@ static void __target_attach_tg_pt_gp(struct se_lun *lun,
assert_spin_locked(&lun->lun_tg_pt_gp_lock); assert_spin_locked(&lun->lun_tg_pt_gp_lock);
spin_lock(&tg_pt_gp->tg_pt_gp_lock); spin_lock(&tg_pt_gp->tg_pt_gp_lock);
lun->lun_tg_pt_gp = tg_pt_gp; rcu_assign_pointer(lun->lun_tg_pt_gp, tg_pt_gp);
list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list); list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list);
tg_pt_gp->tg_pt_gp_members++; tg_pt_gp->tg_pt_gp_members++;
spin_lock(&lun->lun_deve_lock); spin_lock(&lun->lun_deve_lock);
...@@ -1822,6 +1823,7 @@ void target_attach_tg_pt_gp(struct se_lun *lun, ...@@ -1822,6 +1823,7 @@ void target_attach_tg_pt_gp(struct se_lun *lun,
spin_lock(&lun->lun_tg_pt_gp_lock); spin_lock(&lun->lun_tg_pt_gp_lock);
__target_attach_tg_pt_gp(lun, tg_pt_gp); __target_attach_tg_pt_gp(lun, tg_pt_gp);
spin_unlock(&lun->lun_tg_pt_gp_lock); spin_unlock(&lun->lun_tg_pt_gp_lock);
synchronize_rcu();
} }
static void __target_detach_tg_pt_gp(struct se_lun *lun, static void __target_detach_tg_pt_gp(struct se_lun *lun,
...@@ -1834,7 +1836,7 @@ static void __target_detach_tg_pt_gp(struct se_lun *lun, ...@@ -1834,7 +1836,7 @@ static void __target_detach_tg_pt_gp(struct se_lun *lun,
tg_pt_gp->tg_pt_gp_members--; tg_pt_gp->tg_pt_gp_members--;
spin_unlock(&tg_pt_gp->tg_pt_gp_lock); spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
lun->lun_tg_pt_gp = NULL; rcu_assign_pointer(lun->lun_tg_pt_gp, NULL);
} }
void target_detach_tg_pt_gp(struct se_lun *lun) void target_detach_tg_pt_gp(struct se_lun *lun)
...@@ -1842,10 +1844,12 @@ void target_detach_tg_pt_gp(struct se_lun *lun) ...@@ -1842,10 +1844,12 @@ void target_detach_tg_pt_gp(struct se_lun *lun)
struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp *tg_pt_gp;
spin_lock(&lun->lun_tg_pt_gp_lock); spin_lock(&lun->lun_tg_pt_gp_lock);
tg_pt_gp = lun->lun_tg_pt_gp; tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp,
lockdep_is_held(&lun->lun_tg_pt_gp_lock));
if (tg_pt_gp) if (tg_pt_gp)
__target_detach_tg_pt_gp(lun, tg_pt_gp); __target_detach_tg_pt_gp(lun, tg_pt_gp);
spin_unlock(&lun->lun_tg_pt_gp_lock); spin_unlock(&lun->lun_tg_pt_gp_lock);
synchronize_rcu();
} }
ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
...@@ -1854,8 +1858,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) ...@@ -1854,8 +1858,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
struct t10_alua_tg_pt_gp *tg_pt_gp; struct t10_alua_tg_pt_gp *tg_pt_gp;
ssize_t len = 0; ssize_t len = 0;
spin_lock(&lun->lun_tg_pt_gp_lock); rcu_read_lock();
tg_pt_gp = lun->lun_tg_pt_gp; tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp);
if (tg_pt_gp) { if (tg_pt_gp) {
tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
...@@ -1871,7 +1875,7 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) ...@@ -1871,7 +1875,7 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page)
"Offline" : "None", "Offline" : "None",
core_alua_dump_status(lun->lun_tg_pt_secondary_stat)); core_alua_dump_status(lun->lun_tg_pt_secondary_stat));
} }
spin_unlock(&lun->lun_tg_pt_gp_lock); rcu_read_unlock();
return len; return len;
} }
...@@ -1918,7 +1922,8 @@ ssize_t core_alua_store_tg_pt_gp_info( ...@@ -1918,7 +1922,8 @@ ssize_t core_alua_store_tg_pt_gp_info(
} }
spin_lock(&lun->lun_tg_pt_gp_lock); spin_lock(&lun->lun_tg_pt_gp_lock);
tg_pt_gp = lun->lun_tg_pt_gp; tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp,
lockdep_is_held(&lun->lun_tg_pt_gp_lock));
if (tg_pt_gp) { if (tg_pt_gp) {
/* /*
* Clearing an existing tg_pt_gp association, and replacing * Clearing an existing tg_pt_gp association, and replacing
...@@ -1941,7 +1946,7 @@ ssize_t core_alua_store_tg_pt_gp_info( ...@@ -1941,7 +1946,7 @@ ssize_t core_alua_store_tg_pt_gp_info(
dev->t10_alua.default_tg_pt_gp); dev->t10_alua.default_tg_pt_gp);
spin_unlock(&lun->lun_tg_pt_gp_lock); spin_unlock(&lun->lun_tg_pt_gp_lock);
return count; goto sync_rcu;
} }
__target_detach_tg_pt_gp(lun, tg_pt_gp); __target_detach_tg_pt_gp(lun, tg_pt_gp);
move = 1; move = 1;
...@@ -1958,6 +1963,8 @@ ssize_t core_alua_store_tg_pt_gp_info( ...@@ -1958,6 +1963,8 @@ ssize_t core_alua_store_tg_pt_gp_info(
tg_pt_gp_new->tg_pt_gp_id); tg_pt_gp_new->tg_pt_gp_id);
core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
sync_rcu:
synchronize_rcu();
return count; return count;
} }
......
...@@ -749,7 +749,7 @@ struct se_lun { ...@@ -749,7 +749,7 @@ struct se_lun {
/* ALUA target port group linkage */ /* ALUA target port group linkage */
struct list_head lun_tg_pt_gp_link; struct list_head lun_tg_pt_gp_link;
struct t10_alua_tg_pt_gp *lun_tg_pt_gp; struct t10_alua_tg_pt_gp __rcu *lun_tg_pt_gp;
spinlock_t lun_tg_pt_gp_lock; spinlock_t lun_tg_pt_gp_lock;
struct se_portal_group *lun_tpg; struct se_portal_group *lun_tpg;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment