Commit a99bcdce authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target fixes from Nicholas Bellinger:
 "The highlights include:

   - Fix iscsi-target payload memory leak during
     ISCSI_FLAG_TEXT_CONTINUE (Varun Prakash)

   - Fix tcm_qla2xxx incorrect use of tcm_qla2xxx_free_cmd during ABORT
     (Pascal de Bruijn + Himanshu Madhani + nab)

   - Fix iscsi-target long-standing issue with parallel delete of a
     single network portal across multiple target instances (Gary Guo +
     nab)

   - Fix target dynamic se_node GPF during uncached shutdown regression
     (Justin Maggard + nab)"

* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
  target: Fix node_acl demo-mode + uncached dynamic shutdown regression
  iscsi-target: Fix iscsi_np reset hung task during parallel delete
  qla2xxx: Fix incorrect tcm_qla2xxx_free_cmd use during TMR ABORT (v2)
  cxgbit: fix sg_nents calculation
  iscsi-target: fix invalid flags in text response
  iscsi-target: fix memory leak in iscsit_setup_text_cmd()
  cxgbit: add missing __kfree_skb()
  tcmu: free old string on reconfig
  tcmu: Fix possible to/from address overflow when doing the memcpy
parents 043cd07c 6f48655f
...@@ -500,7 +500,6 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, ...@@ -500,7 +500,6 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
static void tcm_qla2xxx_handle_data_work(struct work_struct *work) static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
{ {
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
unsigned long flags;
/* /*
* Ensure that the complete FCP WRITE payload has been received. * Ensure that the complete FCP WRITE payload has been received.
...@@ -508,17 +507,6 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work) ...@@ -508,17 +507,6 @@ static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
*/ */
cmd->cmd_in_wq = 0; cmd->cmd_in_wq = 0;
spin_lock_irqsave(&cmd->cmd_lock, flags);
cmd->data_work = 1;
if (cmd->aborted) {
cmd->data_work_free = 1;
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
tcm_qla2xxx_free_cmd(cmd);
return;
}
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
cmd->qpair->tgt_counters.qla_core_ret_ctio++; cmd->qpair->tgt_counters.qla_core_ret_ctio++;
if (!cmd->write_data_transferred) { if (!cmd->write_data_transferred) {
/* /*
...@@ -765,31 +753,13 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) ...@@ -765,31 +753,13 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
qlt_xmit_tm_rsp(mcmd); qlt_xmit_tm_rsp(mcmd);
} }
#define DATA_WORK_NOT_FREE(_cmd) (_cmd->data_work && !_cmd->data_work_free)
static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
{ {
struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd); struct qla_tgt_cmd, se_cmd);
unsigned long flags;
if (qlt_abort_cmd(cmd)) if (qlt_abort_cmd(cmd))
return; return;
spin_lock_irqsave(&cmd->cmd_lock, flags);
if ((cmd->state == QLA_TGT_STATE_NEW)||
((cmd->state == QLA_TGT_STATE_DATA_IN) &&
DATA_WORK_NOT_FREE(cmd))) {
cmd->data_work_free = 1;
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
/*
* cmd has not reached fw, Use this trigger to free it.
*/
tcm_qla2xxx_free_cmd(cmd);
return;
}
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
return;
} }
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
......
...@@ -1510,11 +1510,13 @@ cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) ...@@ -1510,11 +1510,13 @@ cxgbit_pass_open_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
if (!cnp) { if (!cnp) {
pr_info("%s stid %d lookup failure\n", __func__, stid); pr_info("%s stid %d lookup failure\n", __func__, stid);
return; goto rel_skb;
} }
cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
cxgbit_put_cnp(cnp); cxgbit_put_cnp(cnp);
rel_skb:
__kfree_skb(skb);
} }
static void static void
...@@ -1530,11 +1532,13 @@ cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) ...@@ -1530,11 +1532,13 @@ cxgbit_close_listsrv_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
if (!cnp) { if (!cnp) {
pr_info("%s stid %d lookup failure\n", __func__, stid); pr_info("%s stid %d lookup failure\n", __func__, stid);
return; goto rel_skb;
} }
cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status); cxgbit_wake_up(&cnp->com.wr_wait, __func__, rpl->status);
cxgbit_put_cnp(cnp); cxgbit_put_cnp(cnp);
rel_skb:
__kfree_skb(skb);
} }
static void static void
...@@ -1819,12 +1823,16 @@ static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb) ...@@ -1819,12 +1823,16 @@ static void cxgbit_set_tcb_rpl(struct cxgbit_device *cdev, struct sk_buff *skb)
struct tid_info *t = lldi->tids; struct tid_info *t = lldi->tids;
csk = lookup_tid(t, tid); csk = lookup_tid(t, tid);
if (unlikely(!csk)) if (unlikely(!csk)) {
pr_err("can't find connection for tid %u.\n", tid); pr_err("can't find connection for tid %u.\n", tid);
else goto rel_skb;
} else {
cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status); cxgbit_wake_up(&csk->com.wr_wait, __func__, rpl->status);
}
cxgbit_put_csk(csk); cxgbit_put_csk(csk);
rel_skb:
__kfree_skb(skb);
} }
static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb) static void cxgbit_rx_data(struct cxgbit_device *cdev, struct sk_buff *skb)
......
...@@ -827,7 +827,7 @@ cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login, ...@@ -827,7 +827,7 @@ cxgbit_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
static void static void
cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg, cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
unsigned int nents) unsigned int nents, u32 skip)
{ {
struct skb_seq_state st; struct skb_seq_state st;
const u8 *buf; const u8 *buf;
...@@ -846,7 +846,7 @@ cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg, ...@@ -846,7 +846,7 @@ cxgbit_skb_copy_to_sg(struct sk_buff *skb, struct scatterlist *sg,
} }
consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf, consumed += sg_pcopy_from_buffer(sg, nents, (void *)buf,
buf_len, consumed); buf_len, skip + consumed);
} }
} }
...@@ -912,7 +912,7 @@ cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr, ...@@ -912,7 +912,7 @@ cxgbit_handle_immediate_data(struct iscsi_cmd *cmd, struct iscsi_scsi_req *hdr,
struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0]; struct scatterlist *sg = &cmd->se_cmd.t_data_sg[0];
u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE)); u32 sg_nents = max(1UL, DIV_ROUND_UP(pdu_cb->dlen, PAGE_SIZE));
cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents); cxgbit_skb_copy_to_sg(csk->skb, sg, sg_nents, 0);
} }
cmd->write_data_done += pdu_cb->dlen; cmd->write_data_done += pdu_cb->dlen;
...@@ -1069,11 +1069,13 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk) ...@@ -1069,11 +1069,13 @@ static int cxgbit_handle_iscsi_dataout(struct cxgbit_sock *csk)
cmd->se_cmd.data_length); cmd->se_cmd.data_length);
if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) { if (!(pdu_cb->flags & PDUCBF_RX_DATA_DDPD)) {
u32 skip = data_offset % PAGE_SIZE;
sg_off = data_offset / PAGE_SIZE; sg_off = data_offset / PAGE_SIZE;
sg_start = &cmd->se_cmd.t_data_sg[sg_off]; sg_start = &cmd->se_cmd.t_data_sg[sg_off];
sg_nents = max(1UL, DIV_ROUND_UP(data_len, PAGE_SIZE)); sg_nents = max(1UL, DIV_ROUND_UP(skip + data_len, PAGE_SIZE));
cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents); cxgbit_skb_copy_to_sg(csk->skb, sg_start, sg_nents, skip);
} }
check_payload: check_payload:
......
...@@ -418,6 +418,7 @@ int iscsit_reset_np_thread( ...@@ -418,6 +418,7 @@ int iscsit_reset_np_thread(
return 0; return 0;
} }
np->np_thread_state = ISCSI_NP_THREAD_RESET; np->np_thread_state = ISCSI_NP_THREAD_RESET;
atomic_inc(&np->np_reset_count);
if (np->np_thread) { if (np->np_thread) {
spin_unlock_bh(&np->np_thread_lock); spin_unlock_bh(&np->np_thread_lock);
...@@ -2167,6 +2168,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, ...@@ -2167,6 +2168,7 @@ iscsit_setup_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
cmd->cmd_sn = be32_to_cpu(hdr->cmdsn); cmd->cmd_sn = be32_to_cpu(hdr->cmdsn);
cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn); cmd->exp_stat_sn = be32_to_cpu(hdr->exp_statsn);
cmd->data_direction = DMA_NONE; cmd->data_direction = DMA_NONE;
kfree(cmd->text_in_ptr);
cmd->text_in_ptr = NULL; cmd->text_in_ptr = NULL;
return 0; return 0;
...@@ -3487,9 +3489,9 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn, ...@@ -3487,9 +3489,9 @@ iscsit_build_text_rsp(struct iscsi_cmd *cmd, struct iscsi_conn *conn,
return text_length; return text_length;
if (completed) { if (completed) {
hdr->flags |= ISCSI_FLAG_CMD_FINAL; hdr->flags = ISCSI_FLAG_CMD_FINAL;
} else { } else {
hdr->flags |= ISCSI_FLAG_TEXT_CONTINUE; hdr->flags = ISCSI_FLAG_TEXT_CONTINUE;
cmd->read_data_done += text_length; cmd->read_data_done += text_length;
if (cmd->targ_xfer_tag == 0xFFFFFFFF) if (cmd->targ_xfer_tag == 0xFFFFFFFF)
cmd->targ_xfer_tag = session_get_next_ttt(conn->sess); cmd->targ_xfer_tag = session_get_next_ttt(conn->sess);
......
...@@ -1243,9 +1243,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) ...@@ -1243,9 +1243,11 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
flush_signals(current); flush_signals(current);
spin_lock_bh(&np->np_thread_lock); spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp); complete(&np->np_restart_comp);
return 1;
} else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) { } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
spin_unlock_bh(&np->np_thread_lock); spin_unlock_bh(&np->np_thread_lock);
goto exit; goto exit;
...@@ -1278,7 +1280,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) ...@@ -1278,7 +1280,8 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
goto exit; goto exit;
} else if (rc < 0) { } else if (rc < 0) {
spin_lock_bh(&np->np_thread_lock); spin_lock_bh(&np->np_thread_lock);
if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { if (atomic_dec_if_positive(&np->np_reset_count) >= 0) {
np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
spin_unlock_bh(&np->np_thread_lock); spin_unlock_bh(&np->np_thread_lock);
complete(&np->np_restart_comp); complete(&np->np_restart_comp);
iscsit_put_transport(conn->conn_transport); iscsit_put_transport(conn->conn_transport);
......
...@@ -364,7 +364,7 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl) ...@@ -364,7 +364,7 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
mutex_lock(&tpg->acl_node_mutex); mutex_lock(&tpg->acl_node_mutex);
if (acl->dynamic_node_acl) if (acl->dynamic_node_acl)
acl->dynamic_node_acl = 0; acl->dynamic_node_acl = 0;
list_del(&acl->acl_list); list_del_init(&acl->acl_list);
mutex_unlock(&tpg->acl_node_mutex); mutex_unlock(&tpg->acl_node_mutex);
target_shutdown_sessions(acl); target_shutdown_sessions(acl);
...@@ -548,7 +548,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) ...@@ -548,7 +548,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
* in transport_deregister_session(). * in transport_deregister_session().
*/ */
list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) { list_for_each_entry_safe(nacl, nacl_tmp, &node_list, acl_list) {
list_del(&nacl->acl_list); list_del_init(&nacl->acl_list);
core_tpg_wait_for_nacl_pr_ref(nacl); core_tpg_wait_for_nacl_pr_ref(nacl);
core_free_device_list_for_node(nacl, se_tpg); core_free_device_list_for_node(nacl, se_tpg);
......
...@@ -466,7 +466,7 @@ static void target_complete_nacl(struct kref *kref) ...@@ -466,7 +466,7 @@ static void target_complete_nacl(struct kref *kref)
} }
mutex_lock(&se_tpg->acl_node_mutex); mutex_lock(&se_tpg->acl_node_mutex);
list_del(&nacl->acl_list); list_del_init(&nacl->acl_list);
mutex_unlock(&se_tpg->acl_node_mutex); mutex_unlock(&se_tpg->acl_node_mutex);
core_tpg_wait_for_nacl_pr_ref(nacl); core_tpg_wait_for_nacl_pr_ref(nacl);
...@@ -538,7 +538,7 @@ void transport_free_session(struct se_session *se_sess) ...@@ -538,7 +538,7 @@ void transport_free_session(struct se_session *se_sess)
spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
if (se_nacl->dynamic_stop) if (se_nacl->dynamic_stop)
list_del(&se_nacl->acl_list); list_del_init(&se_nacl->acl_list);
} }
mutex_unlock(&se_tpg->acl_node_mutex); mutex_unlock(&se_tpg->acl_node_mutex);
......
...@@ -563,8 +563,6 @@ static int scatter_data_area(struct tcmu_dev *udev, ...@@ -563,8 +563,6 @@ static int scatter_data_area(struct tcmu_dev *udev,
block_remaining); block_remaining);
to_offset = get_block_offset_user(udev, dbi, to_offset = get_block_offset_user(udev, dbi,
block_remaining); block_remaining);
offset = DATA_BLOCK_SIZE - block_remaining;
to += offset;
if (*iov_cnt != 0 && if (*iov_cnt != 0 &&
to_offset == iov_tail(*iov)) { to_offset == iov_tail(*iov)) {
...@@ -575,7 +573,9 @@ static int scatter_data_area(struct tcmu_dev *udev, ...@@ -575,7 +573,9 @@ static int scatter_data_area(struct tcmu_dev *udev,
(*iov)->iov_len = copy_bytes; (*iov)->iov_len = copy_bytes;
} }
if (copy_data) { if (copy_data) {
memcpy(to, from + sg->length - sg_remaining, offset = DATA_BLOCK_SIZE - block_remaining;
memcpy(to + offset,
from + sg->length - sg_remaining,
copy_bytes); copy_bytes);
tcmu_flush_dcache_range(to, copy_bytes); tcmu_flush_dcache_range(to, copy_bytes);
} }
...@@ -637,9 +637,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, ...@@ -637,9 +637,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
copy_bytes = min_t(size_t, sg_remaining, copy_bytes = min_t(size_t, sg_remaining,
block_remaining); block_remaining);
offset = DATA_BLOCK_SIZE - block_remaining; offset = DATA_BLOCK_SIZE - block_remaining;
from += offset;
tcmu_flush_dcache_range(from, copy_bytes); tcmu_flush_dcache_range(from, copy_bytes);
memcpy(to + sg->length - sg_remaining, from, memcpy(to + sg->length - sg_remaining, from + offset,
copy_bytes); copy_bytes);
sg_remaining -= copy_bytes; sg_remaining -= copy_bytes;
...@@ -1433,6 +1432,8 @@ static int tcmu_update_uio_info(struct tcmu_dev *udev) ...@@ -1433,6 +1432,8 @@ static int tcmu_update_uio_info(struct tcmu_dev *udev)
if (udev->dev_config[0]) if (udev->dev_config[0])
snprintf(str + used, size - used, "/%s", udev->dev_config); snprintf(str + used, size - used, "/%s", udev->dev_config);
/* If the old string exists, free it */
kfree(info->name);
info->name = str; info->name = str;
return 0; return 0;
......
...@@ -786,6 +786,7 @@ struct iscsi_np { ...@@ -786,6 +786,7 @@ struct iscsi_np {
int np_sock_type; int np_sock_type;
enum np_thread_state_table np_thread_state; enum np_thread_state_table np_thread_state;
bool enabled; bool enabled;
atomic_t np_reset_count;
enum iscsi_timer_flags_table np_login_timer_flags; enum iscsi_timer_flags_table np_login_timer_flags;
u32 np_exports; u32 np_exports;
enum np_flags_table np_flags; enum np_flags_table np_flags;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment