Commit 2f2e9f2d authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target fixes from Nicholas Bellinger:
 "This includes the long awaited series to address a set of bugs around
  active I/O remote-port LUN_RESET, as well as properly handling this
  same case with concurrent fabric driver session disconnect ->
  reconnect.

  Note this set of LUN_RESET bug-fixes has been surviving extended
  testing on both v4.5-rc1 and v3.14.y code over the last weeks, and is
  CC'ed for stable as it's something folks using multiple ESX connected
  hosts with slow backends can certainly trigger.

  The highlights also include:

   - Fix WRITE_SAME/DISCARD emulation 4k sector conversion in
     target/iblock (Mike Christie)

   - Fix TMR abort interaction and AIO type TMR response in qla2xxx
     target (Quinn Tran + Swapnil Nagle)

   - Fix >= v3.17 stale descriptor pointer regression in qla2xxx target
     (Quinn Tran)

   - Fix >= v4.5-rc1 return regression with unmap_zeros_data_store new
     configfs store handler (nab)

   - Add CPU affinity flag + convert qla2xxx to use bit (Quinn + HCH +
     Bart)"

* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
  qla2xxx: use TARGET_SCF_USE_CPUID flag to indiate CPU Affinity
  target/transport: add flag to indicate CPU Affinity is observed
  target: Fix incorrect unmap_zeroes_data_store return
  qla2xxx: Use ATIO type to send correct tmr response
  qla2xxx: Fix stale pointer access.
  target/user: Fix cast from pointer to phys_addr_t
  target: Drop legacy se_cmd->task_stop_comp + REQUEST_STOP usage
  target: Fix race with SCF_SEND_DELAYED_TAS handling
  target: Fix remote-port TMR ABORT + se_cmd fabric stop
  target: Fix TAS handling for multi-session se_node_acls
  target: Fix LUN_RESET active TMR descriptor handling
  target: Fix LUN_RESET active I/O handling for ACK_KREF
  qla2xxx: Fix TMR ABORT interaction issue between qla2xxx and TCM
  qla2xxx: Fix warning reported by static checker
  target: Fix WRITE_SAME/DISCARD conversion to linux 512b sectors
parents 4617c220 5327c7db
...@@ -2204,7 +2204,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) ...@@ -2204,7 +2204,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
/* Clear outstanding commands array. */ /* Clear outstanding commands array. */
for (que = 0; que < ha->max_req_queues; que++) { for (que = 0; que < ha->max_req_queues; que++) {
req = ha->req_q_map[que]; req = ha->req_q_map[que];
if (!req) if (!req || !test_bit(que, ha->req_qid_map))
continue; continue;
req->out_ptr = (void *)(req->ring + req->length); req->out_ptr = (void *)(req->ring + req->length);
*req->out_ptr = 0; *req->out_ptr = 0;
...@@ -2221,7 +2221,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha) ...@@ -2221,7 +2221,7 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
for (que = 0; que < ha->max_rsp_queues; que++) { for (que = 0; que < ha->max_rsp_queues; que++) {
rsp = ha->rsp_q_map[que]; rsp = ha->rsp_q_map[que];
if (!rsp) if (!rsp || !test_bit(que, ha->rsp_qid_map))
continue; continue;
rsp->in_ptr = (void *)(rsp->ring + rsp->length); rsp->in_ptr = (void *)(rsp->ring + rsp->length);
*rsp->in_ptr = 0; *rsp->in_ptr = 0;
...@@ -4981,7 +4981,7 @@ qla25xx_init_queues(struct qla_hw_data *ha) ...@@ -4981,7 +4981,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
for (i = 1; i < ha->max_rsp_queues; i++) { for (i = 1; i < ha->max_rsp_queues; i++) {
rsp = ha->rsp_q_map[i]; rsp = ha->rsp_q_map[i];
if (rsp) { if (rsp && test_bit(i, ha->rsp_qid_map)) {
rsp->options &= ~BIT_0; rsp->options &= ~BIT_0;
ret = qla25xx_init_rsp_que(base_vha, rsp); ret = qla25xx_init_rsp_que(base_vha, rsp);
if (ret != QLA_SUCCESS) if (ret != QLA_SUCCESS)
...@@ -4996,8 +4996,8 @@ qla25xx_init_queues(struct qla_hw_data *ha) ...@@ -4996,8 +4996,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
} }
for (i = 1; i < ha->max_req_queues; i++) { for (i = 1; i < ha->max_req_queues; i++) {
req = ha->req_q_map[i]; req = ha->req_q_map[i];
if (req) { if (req && test_bit(i, ha->req_qid_map)) {
/* Clear outstanding commands array. */ /* Clear outstanding commands array. */
req->options &= ~BIT_0; req->options &= ~BIT_0;
ret = qla25xx_init_req_que(base_vha, req); ret = qla25xx_init_req_que(base_vha, req);
if (ret != QLA_SUCCESS) if (ret != QLA_SUCCESS)
......
...@@ -3063,9 +3063,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) ...@@ -3063,9 +3063,9 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
"MSI-X: Failed to enable support " "MSI-X: Failed to enable support "
"-- %d/%d\n Retry with %d vectors.\n", "-- %d/%d\n Retry with %d vectors.\n",
ha->msix_count, ret, ret); ha->msix_count, ret, ret);
ha->msix_count = ret;
ha->max_rsp_queues = ha->msix_count - 1;
} }
ha->msix_count = ret;
ha->max_rsp_queues = ha->msix_count - 1;
ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) * ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
ha->msix_count, GFP_KERNEL); ha->msix_count, GFP_KERNEL);
if (!ha->msix_entries) { if (!ha->msix_entries) {
......
...@@ -600,7 +600,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha) ...@@ -600,7 +600,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
/* Delete request queues */ /* Delete request queues */
for (cnt = 1; cnt < ha->max_req_queues; cnt++) { for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
req = ha->req_q_map[cnt]; req = ha->req_q_map[cnt];
if (req) { if (req && test_bit(cnt, ha->req_qid_map)) {
ret = qla25xx_delete_req_que(vha, req); ret = qla25xx_delete_req_que(vha, req);
if (ret != QLA_SUCCESS) { if (ret != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x00ea, ql_log(ql_log_warn, vha, 0x00ea,
...@@ -614,7 +614,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha) ...@@ -614,7 +614,7 @@ qla25xx_delete_queues(struct scsi_qla_host *vha)
/* Delete response queues */ /* Delete response queues */
for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
rsp = ha->rsp_q_map[cnt]; rsp = ha->rsp_q_map[cnt];
if (rsp) { if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
ret = qla25xx_delete_rsp_que(vha, rsp); ret = qla25xx_delete_rsp_que(vha, rsp);
if (ret != QLA_SUCCESS) { if (ret != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x00eb, ql_log(ql_log_warn, vha, 0x00eb,
......
...@@ -409,6 +409,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) ...@@ -409,6 +409,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
int cnt; int cnt;
for (cnt = 0; cnt < ha->max_req_queues; cnt++) { for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
if (!test_bit(cnt, ha->req_qid_map))
continue;
req = ha->req_q_map[cnt]; req = ha->req_q_map[cnt];
qla2x00_free_req_que(ha, req); qla2x00_free_req_que(ha, req);
} }
...@@ -416,6 +419,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha) ...@@ -416,6 +419,9 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
ha->req_q_map = NULL; ha->req_q_map = NULL;
for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) { for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
if (!test_bit(cnt, ha->rsp_qid_map))
continue;
rsp = ha->rsp_q_map[cnt]; rsp = ha->rsp_q_map[cnt];
qla2x00_free_rsp_que(ha, rsp); qla2x00_free_rsp_que(ha, rsp);
} }
......
...@@ -105,7 +105,7 @@ static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt); ...@@ -105,7 +105,7 @@ static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun, static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
int fn, void *iocb, int flags); int fn, void *iocb, int flags);
static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
*cmd, struct atio_from_isp *atio, int ha_locked); *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha, static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
struct qla_tgt_srr_imm *imm, int ha_lock); struct qla_tgt_srr_imm *imm, int ha_lock);
static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
...@@ -1756,7 +1756,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd) ...@@ -1756,7 +1756,7 @@ void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy, qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
0, 0, 0, 0, 0, 0); 0, 0, 0, 0, 0, 0);
else { else {
if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK) if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts, qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
mcmd->fc_tm_rsp, false); mcmd->fc_tm_rsp, false);
else else
...@@ -2665,7 +2665,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type, ...@@ -2665,7 +2665,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
/* no need to terminate. FW already freed exchange. */ /* no need to terminate. FW already freed exchange. */
qlt_abort_cmd_on_host_reset(cmd->vha, cmd); qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
else else
qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
return 0; return 0;
} }
...@@ -3173,7 +3173,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha, ...@@ -3173,7 +3173,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
} }
static void qlt_send_term_exchange(struct scsi_qla_host *vha, static void qlt_send_term_exchange(struct scsi_qla_host *vha,
struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
int ul_abort)
{ {
unsigned long flags = 0; unsigned long flags = 0;
int rc; int rc;
...@@ -3193,8 +3194,7 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha, ...@@ -3193,8 +3194,7 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
qlt_alloc_qfull_cmd(vha, atio, 0, 0); qlt_alloc_qfull_cmd(vha, atio, 0, 0);
done: done:
if (cmd && (!cmd->aborted || if (cmd && !ul_abort && !cmd->aborted) {
!cmd->cmd_sent_to_fw)) {
if (cmd->sg_mapped) if (cmd->sg_mapped)
qlt_unmap_sg(vha, cmd); qlt_unmap_sg(vha, cmd);
vha->hw->tgt.tgt_ops->free_cmd(cmd); vha->hw->tgt.tgt_ops->free_cmd(cmd);
...@@ -3253,21 +3253,38 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha) ...@@ -3253,21 +3253,38 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
} }
void qlt_abort_cmd(struct qla_tgt_cmd *cmd) int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
{ {
struct qla_tgt *tgt = cmd->tgt; struct qla_tgt *tgt = cmd->tgt;
struct scsi_qla_host *vha = tgt->vha; struct scsi_qla_host *vha = tgt->vha;
struct se_cmd *se_cmd = &cmd->se_cmd; struct se_cmd *se_cmd = &cmd->se_cmd;
unsigned long flags;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
"qla_target(%d): terminating exchange for aborted cmd=%p " "qla_target(%d): terminating exchange for aborted cmd=%p "
"(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd, "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
se_cmd->tag); se_cmd->tag);
spin_lock_irqsave(&cmd->cmd_lock, flags);
if (cmd->aborted) {
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
/*
* It's normal to see 2 calls in this path:
* 1) XFER Rdy completion + CMD_T_ABORT
* 2) TCM TMR - drain_state_list
*/
ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
"multiple abort. %p transport_state %x, t_state %x,"
" se_cmd_flags %x \n", cmd, cmd->se_cmd.transport_state,
cmd->se_cmd.t_state,cmd->se_cmd.se_cmd_flags);
return EIO;
}
cmd->aborted = 1; cmd->aborted = 1;
cmd->cmd_flags |= BIT_6; cmd->cmd_flags |= BIT_6;
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
qlt_send_term_exchange(vha, cmd, &cmd->atio, 0); qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1);
return 0;
} }
EXPORT_SYMBOL(qlt_abort_cmd); EXPORT_SYMBOL(qlt_abort_cmd);
...@@ -3282,6 +3299,9 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd) ...@@ -3282,6 +3299,9 @@ void qlt_free_cmd(struct qla_tgt_cmd *cmd)
BUG_ON(cmd->cmd_in_wq); BUG_ON(cmd->cmd_in_wq);
if (cmd->sg_mapped)
qlt_unmap_sg(cmd->vha, cmd);
if (!cmd->q_full) if (!cmd->q_full)
qlt_decr_num_pend_cmds(cmd->vha); qlt_decr_num_pend_cmds(cmd->vha);
...@@ -3399,7 +3419,7 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio, ...@@ -3399,7 +3419,7 @@ static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
term = 1; term = 1;
if (term) if (term)
qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
return term; return term;
} }
...@@ -3580,12 +3600,13 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle, ...@@ -3580,12 +3600,13 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
case CTIO_PORT_LOGGED_OUT: case CTIO_PORT_LOGGED_OUT:
case CTIO_PORT_UNAVAILABLE: case CTIO_PORT_UNAVAILABLE:
{ {
int logged_out = (status & 0xFFFF); int logged_out =
(status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059, ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
"qla_target(%d): CTIO with %s status %x " "qla_target(%d): CTIO with %s status %x "
"received (state %x, se_cmd %p)\n", vha->vp_idx, "received (state %x, se_cmd %p)\n", vha->vp_idx,
(logged_out == CTIO_PORT_LOGGED_OUT) ? logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
"PORT LOGGED OUT" : "PORT UNAVAILABLE",
status, cmd->state, se_cmd); status, cmd->state, se_cmd);
if (logged_out && cmd->sess) { if (logged_out && cmd->sess) {
...@@ -3754,6 +3775,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) ...@@ -3754,6 +3775,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
goto out_term; goto out_term;
} }
spin_lock_init(&cmd->cmd_lock);
cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
cmd->se_cmd.tag = atio->u.isp24.exchange_addr; cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
cmd->unpacked_lun = scsilun_to_int( cmd->unpacked_lun = scsilun_to_int(
...@@ -3796,7 +3818,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd) ...@@ -3796,7 +3818,7 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
*/ */
cmd->cmd_flags |= BIT_2; cmd->cmd_flags |= BIT_2;
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_term_exchange(vha, NULL, &cmd->atio, 1); qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0);
qlt_decr_num_pend_cmds(vha); qlt_decr_num_pend_cmds(vha);
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag); percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
...@@ -3918,7 +3940,7 @@ static void qlt_create_sess_from_atio(struct work_struct *work) ...@@ -3918,7 +3940,7 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
out_term: out_term:
spin_lock_irqsave(&ha->hardware_lock, flags); spin_lock_irqsave(&ha->hardware_lock, flags);
qlt_send_term_exchange(vha, NULL, &op->atio, 1); qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
kfree(op); kfree(op);
...@@ -3982,7 +4004,8 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, ...@@ -3982,7 +4004,8 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
cmd->cmd_in_wq = 1; cmd->cmd_in_wq = 1;
cmd->cmd_flags |= BIT_0; cmd->cmd_flags |= BIT_0;
cmd->se_cmd.cpuid = -1; cmd->se_cmd.cpuid = ha->msix_count ?
ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND;
spin_lock(&vha->cmd_list_lock); spin_lock(&vha->cmd_list_lock);
list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list); list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
...@@ -3990,7 +4013,6 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha, ...@@ -3990,7 +4013,6 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
INIT_WORK(&cmd->work, qlt_do_work); INIT_WORK(&cmd->work, qlt_do_work);
if (ha->msix_count) { if (ha->msix_count) {
cmd->se_cmd.cpuid = ha->tgt.rspq_vector_cpuid;
if (cmd->atio.u.isp24.fcp_cmnd.rddata) if (cmd->atio.u.isp24.fcp_cmnd.rddata)
queue_work_on(smp_processor_id(), qla_tgt_wq, queue_work_on(smp_processor_id(), qla_tgt_wq,
&cmd->work); &cmd->work);
...@@ -4771,7 +4793,7 @@ static void qlt_handle_srr(struct scsi_qla_host *vha, ...@@ -4771,7 +4793,7 @@ static void qlt_handle_srr(struct scsi_qla_host *vha,
dump_stack(); dump_stack();
} else { } else {
cmd->cmd_flags |= BIT_9; cmd->cmd_flags |= BIT_9;
qlt_send_term_exchange(vha, cmd, &cmd->atio, 1); qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
} }
spin_unlock_irqrestore(&ha->hardware_lock, flags); spin_unlock_irqrestore(&ha->hardware_lock, flags);
} }
...@@ -4950,7 +4972,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha, ...@@ -4950,7 +4972,7 @@ static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
sctio, sctio->srr_id); sctio, sctio->srr_id);
list_del(&sctio->srr_list_entry); list_del(&sctio->srr_list_entry);
qlt_send_term_exchange(vha, sctio->cmd, qlt_send_term_exchange(vha, sctio->cmd,
&sctio->cmd->atio, 1); &sctio->cmd->atio, 1, 0);
kfree(sctio); kfree(sctio);
} }
} }
...@@ -5123,7 +5145,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha, ...@@ -5123,7 +5145,7 @@ static int __qlt_send_busy(struct scsi_qla_host *vha,
atio->u.isp24.fcp_hdr.s_id); atio->u.isp24.fcp_hdr.s_id);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
if (!sess) { if (!sess) {
qlt_send_term_exchange(vha, NULL, atio, 1); qlt_send_term_exchange(vha, NULL, atio, 1, 0);
return 0; return 0;
} }
/* Sending marker isn't necessary, since we called from ISR */ /* Sending marker isn't necessary, since we called from ISR */
...@@ -5406,7 +5428,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha, ...@@ -5406,7 +5428,7 @@ static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
qlt_send_busy(vha, atio, SAM_STAT_BUSY); qlt_send_busy(vha, atio, SAM_STAT_BUSY);
#else #else
qlt_send_term_exchange(vha, NULL, atio, 1); qlt_send_term_exchange(vha, NULL, atio, 1, 0);
#endif #endif
if (!ha_locked) if (!ha_locked)
...@@ -5523,7 +5545,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) ...@@ -5523,7 +5545,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */ #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
qlt_send_busy(vha, atio, 0); qlt_send_busy(vha, atio, 0);
#else #else
qlt_send_term_exchange(vha, NULL, atio, 1); qlt_send_term_exchange(vha, NULL, atio, 1, 0);
#endif #endif
} else { } else {
if (tgt->tgt_stop) { if (tgt->tgt_stop) {
...@@ -5532,7 +5554,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt) ...@@ -5532,7 +5554,7 @@ static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
"command to target, sending TERM " "command to target, sending TERM "
"EXCHANGE for rsp\n"); "EXCHANGE for rsp\n");
qlt_send_term_exchange(vha, NULL, qlt_send_term_exchange(vha, NULL,
atio, 1); atio, 1, 0);
} else { } else {
ql_dbg(ql_dbg_tgt, vha, 0xe060, ql_dbg(ql_dbg_tgt, vha, 0xe060,
"qla_target(%d): Unable to send " "qla_target(%d): Unable to send "
...@@ -5960,7 +5982,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt, ...@@ -5960,7 +5982,7 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
return; return;
out_term: out_term:
qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 0); qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
if (sess) if (sess)
ha->tgt.tgt_ops->put_sess(sess); ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
......
...@@ -943,6 +943,36 @@ struct qla_tgt_sess { ...@@ -943,6 +943,36 @@ struct qla_tgt_sess {
qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX]; qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
}; };
typedef enum {
/*
* BIT_0 - Atio Arrival / schedule to work
* BIT_1 - qlt_do_work
* BIT_2 - qlt_do work failed
* BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
* BIT_4 - read respond/tcm_qla2xx_queue_data_in
* BIT_5 - status respond / tcm_qla2xx_queue_status
* BIT_6 - tcm request to abort/Term exchange.
* pre_xmit_response->qlt_send_term_exchange
* BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
* BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
* BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
* BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
* BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
* BIT_13 - Bad completion -
* qlt_ctio_do_completion --> qlt_term_ctio_exchange
* BIT_14 - Back end data received/sent.
* BIT_15 - SRR prepare ctio
* BIT_16 - complete free
* BIT_17 - flush - qlt_abort_cmd_on_host_reset
* BIT_18 - completion w/abort status
* BIT_19 - completion w/unknown status
* BIT_20 - tcm_qla2xxx_free_cmd
*/
CMD_FLAG_DATA_WORK = BIT_11,
CMD_FLAG_DATA_WORK_FREE = BIT_21,
} cmd_flags_t;
struct qla_tgt_cmd { struct qla_tgt_cmd {
struct se_cmd se_cmd; struct se_cmd se_cmd;
struct qla_tgt_sess *sess; struct qla_tgt_sess *sess;
...@@ -952,6 +982,7 @@ struct qla_tgt_cmd { ...@@ -952,6 +982,7 @@ struct qla_tgt_cmd {
/* Sense buffer that will be mapped into outgoing status */ /* Sense buffer that will be mapped into outgoing status */
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER]; unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
spinlock_t cmd_lock;
/* to save extra sess dereferences */ /* to save extra sess dereferences */
unsigned int conf_compl_supported:1; unsigned int conf_compl_supported:1;
unsigned int sg_mapped:1; unsigned int sg_mapped:1;
...@@ -986,30 +1017,8 @@ struct qla_tgt_cmd { ...@@ -986,30 +1017,8 @@ struct qla_tgt_cmd {
uint64_t jiffies_at_alloc; uint64_t jiffies_at_alloc;
uint64_t jiffies_at_free; uint64_t jiffies_at_free;
/* BIT_0 - Atio Arrival / schedule to work
* BIT_1 - qlt_do_work cmd_flags_t cmd_flags;
* BIT_2 - qlt_do work failed
* BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
* BIT_4 - read respond/tcm_qla2xx_queue_data_in
* BIT_5 - status respond / tcm_qla2xx_queue_status
* BIT_6 - tcm request to abort/Term exchange.
* pre_xmit_response->qlt_send_term_exchange
* BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
* BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
* BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
* BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
* BIT_11 - Data actually going to TCM : tcm_qla2xx_handle_data_work
* BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
* BIT_13 - Bad completion -
* qlt_ctio_do_completion --> qlt_term_ctio_exchange
* BIT_14 - Back end data received/sent.
* BIT_15 - SRR prepare ctio
* BIT_16 - complete free
* BIT_17 - flush - qlt_abort_cmd_on_host_reset
* BIT_18 - completion w/abort status
* BIT_19 - completion w/unknown status
*/
uint32_t cmd_flags;
}; };
struct qla_tgt_sess_work_param { struct qla_tgt_sess_work_param {
...@@ -1148,7 +1157,7 @@ static inline void sid_to_portid(const uint8_t *s_id, port_id_t *p) ...@@ -1148,7 +1157,7 @@ static inline void sid_to_portid(const uint8_t *s_id, port_id_t *p)
extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
extern void qlt_abort_cmd(struct qla_tgt_cmd *); extern int qlt_abort_cmd(struct qla_tgt_cmd *);
extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
......
...@@ -395,6 +395,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, ...@@ -395,6 +395,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) { if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
for (i = 0; i < vha->hw->max_req_queues; i++) { for (i = 0; i < vha->hw->max_req_queues; i++) {
struct req_que *req = vha->hw->req_q_map[i]; struct req_que *req = vha->hw->req_q_map[i];
if (!test_bit(i, vha->hw->req_qid_map))
continue;
if (req || !buf) { if (req || !buf) {
length = req ? length = req ?
req->length : REQUEST_ENTRY_CNT_24XX; req->length : REQUEST_ENTRY_CNT_24XX;
...@@ -408,6 +412,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha, ...@@ -408,6 +412,10 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
} else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) { } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
for (i = 0; i < vha->hw->max_rsp_queues; i++) { for (i = 0; i < vha->hw->max_rsp_queues; i++) {
struct rsp_que *rsp = vha->hw->rsp_q_map[i]; struct rsp_que *rsp = vha->hw->rsp_q_map[i];
if (!test_bit(i, vha->hw->rsp_qid_map))
continue;
if (rsp || !buf) { if (rsp || !buf) {
length = rsp ? length = rsp ?
rsp->length : RESPONSE_ENTRY_CNT_MQ; rsp->length : RESPONSE_ENTRY_CNT_MQ;
...@@ -634,6 +642,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, ...@@ -634,6 +642,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) { if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
for (i = 0; i < vha->hw->max_req_queues; i++) { for (i = 0; i < vha->hw->max_req_queues; i++) {
struct req_que *req = vha->hw->req_q_map[i]; struct req_que *req = vha->hw->req_q_map[i];
if (!test_bit(i, vha->hw->req_qid_map))
continue;
if (req || !buf) { if (req || !buf) {
qla27xx_insert16(i, buf, len); qla27xx_insert16(i, buf, len);
qla27xx_insert16(1, buf, len); qla27xx_insert16(1, buf, len);
...@@ -645,6 +657,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha, ...@@ -645,6 +657,10 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
} else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) { } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
for (i = 0; i < vha->hw->max_rsp_queues; i++) { for (i = 0; i < vha->hw->max_rsp_queues; i++) {
struct rsp_que *rsp = vha->hw->rsp_q_map[i]; struct rsp_que *rsp = vha->hw->rsp_q_map[i];
if (!test_bit(i, vha->hw->rsp_qid_map))
continue;
if (rsp || !buf) { if (rsp || !buf) {
qla27xx_insert16(i, buf, len); qla27xx_insert16(i, buf, len);
qla27xx_insert16(1, buf, len); qla27xx_insert16(1, buf, len);
......
...@@ -298,6 +298,10 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd) ...@@ -298,6 +298,10 @@ static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
{ {
cmd->vha->tgt_counters.core_qla_free_cmd++; cmd->vha->tgt_counters.core_qla_free_cmd++;
cmd->cmd_in_wq = 1; cmd->cmd_in_wq = 1;
BUG_ON(cmd->cmd_flags & BIT_20);
cmd->cmd_flags |= BIT_20;
INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free); INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work); queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
} }
...@@ -374,6 +378,20 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd) ...@@ -374,6 +378,20 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
{ {
struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd); struct qla_tgt_cmd, se_cmd);
if (cmd->aborted) {
/* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
* can get ahead of this cmd. tcm_qla2xxx_aborted_task
* already kick start the free.
*/
pr_debug("write_pending aborted cmd[%p] refcount %d "
"transport_state %x, t_state %x, se_cmd_flags %x\n",
cmd,cmd->se_cmd.cmd_kref.refcount.counter,
cmd->se_cmd.transport_state,
cmd->se_cmd.t_state,
cmd->se_cmd.se_cmd_flags);
return 0;
}
cmd->cmd_flags |= BIT_3; cmd->cmd_flags |= BIT_3;
cmd->bufflen = se_cmd->data_length; cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
...@@ -405,7 +423,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd) ...@@ -405,7 +423,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
3 * HZ); 50);
return 0; return 0;
} }
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
...@@ -444,6 +462,9 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, ...@@ -444,6 +462,9 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
if (bidi) if (bidi)
flags |= TARGET_SCF_BIDI_OP; flags |= TARGET_SCF_BIDI_OP;
if (se_cmd->cpuid != WORK_CPU_UNBOUND)
flags |= TARGET_SCF_USE_CPUID;
sess = cmd->sess; sess = cmd->sess;
if (!sess) { if (!sess) {
pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n"); pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
...@@ -465,13 +486,25 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd, ...@@ -465,13 +486,25 @@ static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
static void tcm_qla2xxx_handle_data_work(struct work_struct *work) static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
{ {
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
unsigned long flags;
/* /*
* Ensure that the complete FCP WRITE payload has been received. * Ensure that the complete FCP WRITE payload has been received.
* Otherwise return an exception via CHECK_CONDITION status. * Otherwise return an exception via CHECK_CONDITION status.
*/ */
cmd->cmd_in_wq = 0; cmd->cmd_in_wq = 0;
cmd->cmd_flags |= BIT_11;
spin_lock_irqsave(&cmd->cmd_lock, flags);
cmd->cmd_flags |= CMD_FLAG_DATA_WORK;
if (cmd->aborted) {
cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
tcm_qla2xxx_free_cmd(cmd);
return;
}
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
cmd->vha->tgt_counters.qla_core_ret_ctio++; cmd->vha->tgt_counters.qla_core_ret_ctio++;
if (!cmd->write_data_transferred) { if (!cmd->write_data_transferred) {
/* /*
...@@ -546,6 +579,20 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) ...@@ -546,6 +579,20 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd); struct qla_tgt_cmd, se_cmd);
if (cmd->aborted) {
/* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
* can get ahead of this cmd. tcm_qla2xxx_aborted_task
* already kick start the free.
*/
pr_debug("queue_data_in aborted cmd[%p] refcount %d "
"transport_state %x, t_state %x, se_cmd_flags %x\n",
cmd,cmd->se_cmd.cmd_kref.refcount.counter,
cmd->se_cmd.transport_state,
cmd->se_cmd.t_state,
cmd->se_cmd.se_cmd_flags);
return 0;
}
cmd->cmd_flags |= BIT_4; cmd->cmd_flags |= BIT_4;
cmd->bufflen = se_cmd->data_length; cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
...@@ -637,11 +684,34 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd) ...@@ -637,11 +684,34 @@ static void tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
qlt_xmit_tm_rsp(mcmd); qlt_xmit_tm_rsp(mcmd);
} }
#define DATA_WORK_NOT_FREE(_flags) \
(( _flags & (CMD_FLAG_DATA_WORK|CMD_FLAG_DATA_WORK_FREE)) == \
CMD_FLAG_DATA_WORK)
static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd) static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
{ {
struct qla_tgt_cmd *cmd = container_of(se_cmd, struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd); struct qla_tgt_cmd, se_cmd);
qlt_abort_cmd(cmd); unsigned long flags;
if (qlt_abort_cmd(cmd))
return;
spin_lock_irqsave(&cmd->cmd_lock, flags);
if ((cmd->state == QLA_TGT_STATE_NEW)||
((cmd->state == QLA_TGT_STATE_DATA_IN) &&
DATA_WORK_NOT_FREE(cmd->cmd_flags)) ) {
cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
/* Cmd have not reached firmware.
* Use this trigger to free it. */
tcm_qla2xxx_free_cmd(cmd);
return;
}
spin_unlock_irqrestore(&cmd->cmd_lock, flags);
return;
} }
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
......
...@@ -898,7 +898,7 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item, ...@@ -898,7 +898,7 @@ static ssize_t unmap_zeroes_data_store(struct config_item *item,
da->unmap_zeroes_data = flag; da->unmap_zeroes_data = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n", pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
da->da_dev, flag); da->da_dev, flag);
return 0; return count;
} }
/* /*
......
...@@ -828,6 +828,50 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) ...@@ -828,6 +828,50 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
return dev; return dev;
} }
/*
* Check if the underlying struct block_device request_queue supports
* the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
* in ATA and we need to set TPE=1
*/
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
struct request_queue *q, int block_size)
{
if (!blk_queue_discard(q))
return false;
attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
block_size;
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
attrib->max_unmap_block_desc_count = 1;
attrib->unmap_granularity = q->limits.discard_granularity / block_size;
attrib->unmap_granularity_alignment = q->limits.discard_alignment /
block_size;
attrib->unmap_zeroes_data = q->limits.discard_zeroes_data;
return true;
}
EXPORT_SYMBOL(target_configure_unmap_from_queue);
/*
* Convert from blocksize advertised to the initiator to the 512 byte
* units unconditionally used by the Linux block layer.
*/
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
{
switch (dev->dev_attrib.block_size) {
case 4096:
return lb << 3;
case 2048:
return lb << 2;
case 1024:
return lb << 1;
default:
return lb;
}
}
EXPORT_SYMBOL(target_to_linux_sector);
int target_configure_device(struct se_device *dev) int target_configure_device(struct se_device *dev)
{ {
struct se_hba *hba = dev->se_hba; struct se_hba *hba = dev->se_hba;
......
...@@ -160,25 +160,11 @@ static int fd_configure_device(struct se_device *dev) ...@@ -160,25 +160,11 @@ static int fd_configure_device(struct se_device *dev)
" block_device blocks: %llu logical_block_size: %d\n", " block_device blocks: %llu logical_block_size: %d\n",
dev_size, div_u64(dev_size, fd_dev->fd_block_size), dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size); fd_dev->fd_block_size);
/*
* Check if the underlying struct block_device request_queue supports if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
* the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM fd_dev->fd_block_size))
* in ATA and we need to set TPE=1
*/
if (blk_queue_discard(q)) {
dev->dev_attrib.max_unmap_lba_count =
q->limits.max_discard_sectors;
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
dev->dev_attrib.max_unmap_block_desc_count = 1;
dev->dev_attrib.unmap_granularity =
q->limits.discard_granularity >> 9;
dev->dev_attrib.unmap_granularity_alignment =
q->limits.discard_alignment;
pr_debug("IFILE: BLOCK Discard support available," pr_debug("IFILE: BLOCK Discard support available,"
" disabled by default\n"); " disabled by default\n");
}
/* /*
* Enable write same emulation for IBLOCK and use 0xFFFF as * Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count. * the smaller WRITE_SAME(10) only has a two-byte block count.
...@@ -490,9 +476,12 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) ...@@ -490,9 +476,12 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
if (S_ISBLK(inode->i_mode)) { if (S_ISBLK(inode->i_mode)) {
/* The backend is block device, use discard */ /* The backend is block device, use discard */
struct block_device *bdev = inode->i_bdev; struct block_device *bdev = inode->i_bdev;
struct se_device *dev = cmd->se_dev;
ret = blkdev_issue_discard(bdev, lba, ret = blkdev_issue_discard(bdev,
nolb, GFP_KERNEL, 0); target_to_linux_sector(dev, lba),
target_to_linux_sector(dev, nolb),
GFP_KERNEL, 0);
if (ret < 0) { if (ret < 0) {
pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n", pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
ret); ret);
......
...@@ -121,29 +121,11 @@ static int iblock_configure_device(struct se_device *dev) ...@@ -121,29 +121,11 @@ static int iblock_configure_device(struct se_device *dev)
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests; dev->dev_attrib.hw_queue_depth = q->nr_requests;
/* if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
* Check if the underlying struct block_device request_queue supports dev->dev_attrib.hw_block_size))
* the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
* in ATA and we need to set TPE=1
*/
if (blk_queue_discard(q)) {
dev->dev_attrib.max_unmap_lba_count =
q->limits.max_discard_sectors;
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
dev->dev_attrib.max_unmap_block_desc_count = 1;
dev->dev_attrib.unmap_granularity =
q->limits.discard_granularity >> 9;
dev->dev_attrib.unmap_granularity_alignment =
q->limits.discard_alignment;
dev->dev_attrib.unmap_zeroes_data =
q->limits.discard_zeroes_data;
pr_debug("IBLOCK: BLOCK Discard support available," pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n"); " disabled by default\n");
}
/* /*
* Enable write same emulation for IBLOCK and use 0xFFFF as * Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count. * the smaller WRITE_SAME(10) only has a two-byte block count.
...@@ -415,9 +397,13 @@ static sense_reason_t ...@@ -415,9 +397,13 @@ static sense_reason_t
iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
{ {
struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd; struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
struct se_device *dev = cmd->se_dev;
int ret; int ret;
ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0); ret = blkdev_issue_discard(bdev,
target_to_linux_sector(dev, lba),
target_to_linux_sector(dev, nolb),
GFP_KERNEL, 0);
if (ret < 0) { if (ret < 0) {
pr_err("blkdev_issue_discard() failed: %d\n", ret); pr_err("blkdev_issue_discard() failed: %d\n", ret);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
...@@ -433,8 +419,10 @@ iblock_execute_write_same(struct se_cmd *cmd) ...@@ -433,8 +419,10 @@ iblock_execute_write_same(struct se_cmd *cmd)
struct scatterlist *sg; struct scatterlist *sg;
struct bio *bio; struct bio *bio;
struct bio_list list; struct bio_list list;
sector_t block_lba = cmd->t_task_lba; struct se_device *dev = cmd->se_dev;
sector_t sectors = sbc_get_write_same_sectors(cmd); sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
sector_t sectors = target_to_linux_sector(dev,
sbc_get_write_same_sectors(cmd));
if (cmd->prot_op) { if (cmd->prot_op) {
pr_err("WRITE_SAME: Protection information with IBLOCK" pr_err("WRITE_SAME: Protection information with IBLOCK"
...@@ -648,12 +636,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, ...@@ -648,12 +636,12 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
enum dma_data_direction data_direction) enum dma_data_direction data_direction)
{ {
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
struct iblock_req *ibr; struct iblock_req *ibr;
struct bio *bio, *bio_start; struct bio *bio, *bio_start;
struct bio_list list; struct bio_list list;
struct scatterlist *sg; struct scatterlist *sg;
u32 sg_num = sgl_nents; u32 sg_num = sgl_nents;
sector_t block_lba;
unsigned bio_cnt; unsigned bio_cnt;
int rw = 0; int rw = 0;
int i; int i;
...@@ -679,24 +667,6 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, ...@@ -679,24 +667,6 @@ iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
rw = READ; rw = READ;
} }
/*
* Convert the blocksize advertised to the initiator to the 512 byte
* units unconditionally used by the Linux block layer.
*/
if (dev->dev_attrib.block_size == 4096)
block_lba = (cmd->t_task_lba << 3);
else if (dev->dev_attrib.block_size == 2048)
block_lba = (cmd->t_task_lba << 2);
else if (dev->dev_attrib.block_size == 1024)
block_lba = (cmd->t_task_lba << 1);
else if (dev->dev_attrib.block_size == 512)
block_lba = cmd->t_task_lba;
else {
pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
" %u\n", dev->dev_attrib.block_size);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
if (!ibr) if (!ibr)
goto fail; goto fail;
......
...@@ -141,7 +141,6 @@ void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int); ...@@ -141,7 +141,6 @@ void transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int); int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int); int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int); int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
void transport_clear_lun_ref(struct se_lun *); void transport_clear_lun_ref(struct se_lun *);
void transport_send_task_abort(struct se_cmd *); void transport_send_task_abort(struct se_cmd *);
sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size); sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
......
...@@ -68,23 +68,25 @@ void core_tmr_release_req(struct se_tmr_req *tmr) ...@@ -68,23 +68,25 @@ void core_tmr_release_req(struct se_tmr_req *tmr)
if (dev) { if (dev) {
spin_lock_irqsave(&dev->se_tmr_lock, flags); spin_lock_irqsave(&dev->se_tmr_lock, flags);
list_del(&tmr->tmr_list); list_del_init(&tmr->tmr_list);
spin_unlock_irqrestore(&dev->se_tmr_lock, flags); spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
} }
kfree(tmr); kfree(tmr);
} }
static void core_tmr_handle_tas_abort( static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
struct se_node_acl *tmr_nacl,
struct se_cmd *cmd,
int tas)
{ {
bool remove = true; unsigned long flags;
bool remove = true, send_tas;
/* /*
* TASK ABORTED status (TAS) bit support * TASK ABORTED status (TAS) bit support
*/ */
if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) { spin_lock_irqsave(&cmd->t_state_lock, flags);
send_tas = (cmd->transport_state & CMD_T_TAS);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
if (send_tas) {
remove = false; remove = false;
transport_send_task_abort(cmd); transport_send_task_abort(cmd);
} }
...@@ -107,6 +109,46 @@ static int target_check_cdb_and_preempt(struct list_head *list, ...@@ -107,6 +109,46 @@ static int target_check_cdb_and_preempt(struct list_head *list,
return 1; return 1;
} }
static bool __target_check_io_state(struct se_cmd *se_cmd,
struct se_session *tmr_sess, int tas)
{
struct se_session *sess = se_cmd->se_sess;
assert_spin_locked(&sess->sess_cmd_lock);
WARN_ON_ONCE(!irqs_disabled());
/*
* If command already reached CMD_T_COMPLETE state within
* target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
* this se_cmd has been passed to fabric driver and will
* not be aborted.
*
* Otherwise, obtain a local se_cmd->cmd_kref now for TMR
* ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
* long as se_cmd->cmd_kref is still active unless zero.
*/
spin_lock(&se_cmd->t_state_lock);
if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
pr_debug("Attempted to abort io tag: %llu already complete or"
" fabric stop, skipping\n", se_cmd->tag);
spin_unlock(&se_cmd->t_state_lock);
return false;
}
if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
pr_debug("Attempted to abort io tag: %llu already shutdown,"
" skipping\n", se_cmd->tag);
spin_unlock(&se_cmd->t_state_lock);
return false;
}
se_cmd->transport_state |= CMD_T_ABORTED;
if ((tmr_sess != se_cmd->se_sess) && tas)
se_cmd->transport_state |= CMD_T_TAS;
spin_unlock(&se_cmd->t_state_lock);
return kref_get_unless_zero(&se_cmd->cmd_kref);
}
void core_tmr_abort_task( void core_tmr_abort_task(
struct se_device *dev, struct se_device *dev,
struct se_tmr_req *tmr, struct se_tmr_req *tmr,
...@@ -130,34 +172,22 @@ void core_tmr_abort_task( ...@@ -130,34 +172,22 @@ void core_tmr_abort_task(
if (tmr->ref_task_tag != ref_tag) if (tmr->ref_task_tag != ref_tag)
continue; continue;
if (!kref_get_unless_zero(&se_cmd->cmd_kref))
continue;
printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
se_cmd->se_tfo->get_fabric_name(), ref_tag); se_cmd->se_tfo->get_fabric_name(), ref_tag);
spin_lock(&se_cmd->t_state_lock); if (!__target_check_io_state(se_cmd, se_sess, 0)) {
if (se_cmd->transport_state & CMD_T_COMPLETE) {
printk("ABORT_TASK: ref_tag: %llu already complete,"
" skipping\n", ref_tag);
spin_unlock(&se_cmd->t_state_lock);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
target_put_sess_cmd(se_cmd); target_put_sess_cmd(se_cmd);
goto out; goto out;
} }
se_cmd->transport_state |= CMD_T_ABORTED;
spin_unlock(&se_cmd->t_state_lock);
list_del_init(&se_cmd->se_cmd_list); list_del_init(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
cancel_work_sync(&se_cmd->work); cancel_work_sync(&se_cmd->work);
transport_wait_for_tasks(se_cmd); transport_wait_for_tasks(se_cmd);
target_put_sess_cmd(se_cmd);
transport_cmd_finish_abort(se_cmd, true); transport_cmd_finish_abort(se_cmd, true);
target_put_sess_cmd(se_cmd);
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
" ref_tag: %llu\n", ref_tag); " ref_tag: %llu\n", ref_tag);
...@@ -178,9 +208,11 @@ static void core_tmr_drain_tmr_list( ...@@ -178,9 +208,11 @@ static void core_tmr_drain_tmr_list(
struct list_head *preempt_and_abort_list) struct list_head *preempt_and_abort_list)
{ {
LIST_HEAD(drain_tmr_list); LIST_HEAD(drain_tmr_list);
struct se_session *sess;
struct se_tmr_req *tmr_p, *tmr_pp; struct se_tmr_req *tmr_p, *tmr_pp;
struct se_cmd *cmd; struct se_cmd *cmd;
unsigned long flags; unsigned long flags;
bool rc;
/* /*
* Release all pending and outgoing TMRs aside from the received * Release all pending and outgoing TMRs aside from the received
* LUN_RESET tmr.. * LUN_RESET tmr..
...@@ -206,17 +238,39 @@ static void core_tmr_drain_tmr_list( ...@@ -206,17 +238,39 @@ static void core_tmr_drain_tmr_list(
if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue; continue;
sess = cmd->se_sess;
if (WARN_ON_ONCE(!sess))
continue;
spin_lock(&sess->sess_cmd_lock);
spin_lock(&cmd->t_state_lock); spin_lock(&cmd->t_state_lock);
if (!(cmd->transport_state & CMD_T_ACTIVE)) { if (!(cmd->transport_state & CMD_T_ACTIVE) ||
(cmd->transport_state & CMD_T_FABRIC_STOP)) {
spin_unlock(&cmd->t_state_lock); spin_unlock(&cmd->t_state_lock);
spin_unlock(&sess->sess_cmd_lock);
continue; continue;
} }
if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
spin_unlock(&cmd->t_state_lock); spin_unlock(&cmd->t_state_lock);
spin_unlock(&sess->sess_cmd_lock);
continue; continue;
} }
if (sess->sess_tearing_down || cmd->cmd_wait_set) {
spin_unlock(&cmd->t_state_lock);
spin_unlock(&sess->sess_cmd_lock);
continue;
}
cmd->transport_state |= CMD_T_ABORTED;
spin_unlock(&cmd->t_state_lock); spin_unlock(&cmd->t_state_lock);
rc = kref_get_unless_zero(&cmd->cmd_kref);
if (!rc) {
printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
spin_unlock(&sess->sess_cmd_lock);
continue;
}
spin_unlock(&sess->sess_cmd_lock);
list_move_tail(&tmr_p->tmr_list, &drain_tmr_list); list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
} }
spin_unlock_irqrestore(&dev->se_tmr_lock, flags); spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
...@@ -230,20 +284,26 @@ static void core_tmr_drain_tmr_list( ...@@ -230,20 +284,26 @@ static void core_tmr_drain_tmr_list(
(preempt_and_abort_list) ? "Preempt" : "", tmr_p, (preempt_and_abort_list) ? "Preempt" : "", tmr_p,
tmr_p->function, tmr_p->response, cmd->t_state); tmr_p->function, tmr_p->response, cmd->t_state);
cancel_work_sync(&cmd->work);
transport_wait_for_tasks(cmd);
transport_cmd_finish_abort(cmd, 1); transport_cmd_finish_abort(cmd, 1);
target_put_sess_cmd(cmd);
} }
} }
static void core_tmr_drain_state_list( static void core_tmr_drain_state_list(
struct se_device *dev, struct se_device *dev,
struct se_cmd *prout_cmd, struct se_cmd *prout_cmd,
struct se_node_acl *tmr_nacl, struct se_session *tmr_sess,
int tas, int tas,
struct list_head *preempt_and_abort_list) struct list_head *preempt_and_abort_list)
{ {
LIST_HEAD(drain_task_list); LIST_HEAD(drain_task_list);
struct se_session *sess;
struct se_cmd *cmd, *next; struct se_cmd *cmd, *next;
unsigned long flags; unsigned long flags;
int rc;
/* /*
* Complete outstanding commands with TASK_ABORTED SAM status. * Complete outstanding commands with TASK_ABORTED SAM status.
...@@ -282,6 +342,16 @@ static void core_tmr_drain_state_list( ...@@ -282,6 +342,16 @@ static void core_tmr_drain_state_list(
if (prout_cmd == cmd) if (prout_cmd == cmd)
continue; continue;
sess = cmd->se_sess;
if (WARN_ON_ONCE(!sess))
continue;
spin_lock(&sess->sess_cmd_lock);
rc = __target_check_io_state(cmd, tmr_sess, tas);
spin_unlock(&sess->sess_cmd_lock);
if (!rc)
continue;
list_move_tail(&cmd->state_list, &drain_task_list); list_move_tail(&cmd->state_list, &drain_task_list);
cmd->state_active = false; cmd->state_active = false;
} }
...@@ -289,7 +359,7 @@ static void core_tmr_drain_state_list( ...@@ -289,7 +359,7 @@ static void core_tmr_drain_state_list(
while (!list_empty(&drain_task_list)) { while (!list_empty(&drain_task_list)) {
cmd = list_entry(drain_task_list.next, struct se_cmd, state_list); cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
list_del(&cmd->state_list); list_del_init(&cmd->state_list);
pr_debug("LUN_RESET: %s cmd: %p" pr_debug("LUN_RESET: %s cmd: %p"
" ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d" " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
...@@ -313,16 +383,11 @@ static void core_tmr_drain_state_list( ...@@ -313,16 +383,11 @@ static void core_tmr_drain_state_list(
* loop above, but we do it down here given that * loop above, but we do it down here given that
* cancel_work_sync may block. * cancel_work_sync may block.
*/ */
if (cmd->t_state == TRANSPORT_COMPLETE) cancel_work_sync(&cmd->work);
cancel_work_sync(&cmd->work); transport_wait_for_tasks(cmd);
spin_lock_irqsave(&cmd->t_state_lock, flags);
target_stop_cmd(cmd, &flags);
cmd->transport_state |= CMD_T_ABORTED;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas); core_tmr_handle_tas_abort(cmd, tas);
target_put_sess_cmd(cmd);
} }
} }
...@@ -334,6 +399,7 @@ int core_tmr_lun_reset( ...@@ -334,6 +399,7 @@ int core_tmr_lun_reset(
{ {
struct se_node_acl *tmr_nacl = NULL; struct se_node_acl *tmr_nacl = NULL;
struct se_portal_group *tmr_tpg = NULL; struct se_portal_group *tmr_tpg = NULL;
struct se_session *tmr_sess = NULL;
int tas; int tas;
/* /*
* TASK_ABORTED status bit, this is configurable via ConfigFS * TASK_ABORTED status bit, this is configurable via ConfigFS
...@@ -352,8 +418,9 @@ int core_tmr_lun_reset( ...@@ -352,8 +418,9 @@ int core_tmr_lun_reset(
* or struct se_device passthrough.. * or struct se_device passthrough..
*/ */
if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; tmr_sess = tmr->task_cmd->se_sess;
tmr_tpg = tmr->task_cmd->se_sess->se_tpg; tmr_nacl = tmr_sess->se_node_acl;
tmr_tpg = tmr_sess->se_tpg;
if (tmr_nacl && tmr_tpg) { if (tmr_nacl && tmr_tpg) {
pr_debug("LUN_RESET: TMR caller fabric: %s" pr_debug("LUN_RESET: TMR caller fabric: %s"
" initiator port %s\n", " initiator port %s\n",
...@@ -366,7 +433,7 @@ int core_tmr_lun_reset( ...@@ -366,7 +433,7 @@ int core_tmr_lun_reset(
dev->transport->name, tas); dev->transport->name, tas);
core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas, core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
preempt_and_abort_list); preempt_and_abort_list);
/* /*
......
This diff is collapsed.
...@@ -903,7 +903,7 @@ static int tcmu_configure_device(struct se_device *dev) ...@@ -903,7 +903,7 @@ static int tcmu_configure_device(struct se_device *dev)
info->version = __stringify(TCMU_MAILBOX_VERSION); info->version = __stringify(TCMU_MAILBOX_VERSION);
info->mem[0].name = "tcm-user command & data buffer"; info->mem[0].name = "tcm-user command & data buffer";
info->mem[0].addr = (phys_addr_t) udev->mb_addr; info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
info->mem[0].size = TCMU_RING_SIZE; info->mem[0].size = TCMU_RING_SIZE;
info->mem[0].memtype = UIO_MEM_VIRTUAL; info->mem[0].memtype = UIO_MEM_VIRTUAL;
......
...@@ -94,5 +94,8 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, ...@@ -94,5 +94,8 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
bool target_sense_desc_format(struct se_device *dev); bool target_sense_desc_format(struct se_device *dev);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
struct request_queue *q, int block_size);
#endif /* TARGET_CORE_BACKEND_H */ #endif /* TARGET_CORE_BACKEND_H */
...@@ -140,6 +140,8 @@ enum se_cmd_flags_table { ...@@ -140,6 +140,8 @@ enum se_cmd_flags_table {
SCF_COMPARE_AND_WRITE = 0x00080000, SCF_COMPARE_AND_WRITE = 0x00080000,
SCF_COMPARE_AND_WRITE_POST = 0x00100000, SCF_COMPARE_AND_WRITE_POST = 0x00100000,
SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000, SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
SCF_ACK_KREF = 0x00400000,
SCF_USE_CPUID = 0x00800000,
}; };
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */ /* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
...@@ -187,6 +189,7 @@ enum target_sc_flags_table { ...@@ -187,6 +189,7 @@ enum target_sc_flags_table {
TARGET_SCF_BIDI_OP = 0x01, TARGET_SCF_BIDI_OP = 0x01,
TARGET_SCF_ACK_KREF = 0x02, TARGET_SCF_ACK_KREF = 0x02,
TARGET_SCF_UNKNOWN_SIZE = 0x04, TARGET_SCF_UNKNOWN_SIZE = 0x04,
TARGET_SCF_USE_CPUID = 0x08,
}; };
/* fabric independent task management function values */ /* fabric independent task management function values */
...@@ -490,8 +493,9 @@ struct se_cmd { ...@@ -490,8 +493,9 @@ struct se_cmd {
#define CMD_T_SENT (1 << 4) #define CMD_T_SENT (1 << 4)
#define CMD_T_STOP (1 << 5) #define CMD_T_STOP (1 << 5)
#define CMD_T_DEV_ACTIVE (1 << 7) #define CMD_T_DEV_ACTIVE (1 << 7)
#define CMD_T_REQUEST_STOP (1 << 8)
#define CMD_T_BUSY (1 << 9) #define CMD_T_BUSY (1 << 9)
#define CMD_T_TAS (1 << 10)
#define CMD_T_FABRIC_STOP (1 << 11)
spinlock_t t_state_lock; spinlock_t t_state_lock;
struct kref cmd_kref; struct kref cmd_kref;
struct completion t_transport_stop_comp; struct completion t_transport_stop_comp;
...@@ -511,9 +515,6 @@ struct se_cmd { ...@@ -511,9 +515,6 @@ struct se_cmd {
struct list_head state_list; struct list_head state_list;
/* old task stop completion, consider merging with some of the above */
struct completion task_stop_comp;
/* backend private data */ /* backend private data */
void *priv; void *priv;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment