Commit 0f65ff68 authored by James Smart's avatar James Smart Committed by James Bottomley

[SCSI] lpfc 8.3.10: Update SLI interface areas

- Clear LPFC_DRIVER_ABORTED on FCP command completion.
- Clear exchange busy flag when I/O is aborted and found on aborted list.
- Free sglq when XRI_ABORTED event is processed before release of IOCB.
- Only process iocb as aborted when LPFC_DRIVER_ABORTED is set.
Signed-off-by: default avatarJames Smart <james.smart@emulex.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@suse.de>
parent e40a02c1
...@@ -509,7 +509,6 @@ struct lpfc_hba { ...@@ -509,7 +509,6 @@ struct lpfc_hba {
int (*lpfc_hba_down_link) int (*lpfc_hba_down_link)
(struct lpfc_hba *); (struct lpfc_hba *);
/* SLI4 specific HBA data structure */ /* SLI4 specific HBA data structure */
struct lpfc_sli4_hba sli4_hba; struct lpfc_sli4_hba sli4_hba;
......
...@@ -385,7 +385,7 @@ void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t); ...@@ -385,7 +385,7 @@ void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int); int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
void lpfc_start_fdiscs(struct lpfc_hba *phba); void lpfc_start_fdiscs(struct lpfc_hba *phba);
struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t); struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
#define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code) #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
#define HBA_EVENT_RSCN 5 #define HBA_EVENT_RSCN 5
#define HBA_EVENT_LINK_UP 2 #define HBA_EVENT_LINK_UP 2
......
...@@ -6234,7 +6234,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -6234,7 +6234,8 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
lpfc_mbx_unreg_vpi(vport); lpfc_mbx_unreg_vpi(vport);
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI; vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI; if (phba->sli_rev == LPFC_SLI_REV4)
vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
spin_unlock_irq(shost->host_lock); spin_unlock_irq(shost->host_lock);
} }
...@@ -6812,21 +6813,27 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba, ...@@ -6812,21 +6813,27 @@ lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL; struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
unsigned long iflag = 0; unsigned long iflag = 0;
spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag); spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
list_for_each_entry_safe(sglq_entry, sglq_next, list_for_each_entry_safe(sglq_entry, sglq_next,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list) { &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
if (sglq_entry->sli4_xritag == xri) { if (sglq_entry->sli4_xritag == xri) {
list_del(&sglq_entry->list); list_del(&sglq_entry->list);
spin_unlock_irqrestore(
&phba->sli4_hba.abts_sgl_list_lock,
iflag);
spin_lock_irqsave(&phba->hbalock, iflag);
list_add_tail(&sglq_entry->list, list_add_tail(&sglq_entry->list,
&phba->sli4_hba.lpfc_sgl_list); &phba->sli4_hba.lpfc_sgl_list);
sglq_entry->state = SGL_FREED;
spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
return; return;
} }
} }
spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag); spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
sglq_entry = __lpfc_get_active_sglq(phba, xri);
if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
sglq_entry->state = SGL_XRI_ABORTED;
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
} }
...@@ -822,6 +822,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) ...@@ -822,6 +822,8 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
LIST_HEAD(aborts); LIST_HEAD(aborts);
int ret; int ret;
unsigned long iflag = 0; unsigned long iflag = 0;
struct lpfc_sglq *sglq_entry = NULL;
ret = lpfc_hba_down_post_s3(phba); ret = lpfc_hba_down_post_s3(phba);
if (ret) if (ret)
return ret; return ret;
...@@ -837,6 +839,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba) ...@@ -837,6 +839,10 @@ lpfc_hba_down_post_s4(struct lpfc_hba *phba)
* list. * list.
*/ */
spin_lock(&phba->sli4_hba.abts_sgl_list_lock); spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
list_for_each_entry(sglq_entry,
&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
sglq_entry->state = SGL_FREED;
list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list, list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
&phba->sli4_hba.lpfc_sgl_list); &phba->sli4_hba.lpfc_sgl_list);
spin_unlock(&phba->sli4_hba.abts_sgl_list_lock); spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
...@@ -4412,6 +4418,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba) ...@@ -4412,6 +4418,7 @@ lpfc_init_sgl_list(struct lpfc_hba *phba)
/* The list order is used by later block SGL registraton */ /* The list order is used by later block SGL registraton */
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
sglq_entry->state = SGL_FREED;
list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list); list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry; phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
phba->sli4_hba.total_sglq_bufs++; phba->sli4_hba.total_sglq_bufs++;
......
...@@ -620,23 +620,40 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba, ...@@ -620,23 +620,40 @@ lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri); uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
struct lpfc_scsi_buf *psb, *next_psb; struct lpfc_scsi_buf *psb, *next_psb;
unsigned long iflag = 0; unsigned long iflag = 0;
struct lpfc_iocbq *iocbq;
int i;
spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag); spin_lock_irqsave(&phba->hbalock, iflag);
spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
list_for_each_entry_safe(psb, next_psb, list_for_each_entry_safe(psb, next_psb,
&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) { &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
if (psb->cur_iocbq.sli4_xritag == xri) { if (psb->cur_iocbq.sli4_xritag == xri) {
list_del(&psb->list); list_del(&psb->list);
psb->exch_busy = 0; psb->exch_busy = 0;
psb->status = IOSTAT_SUCCESS; psb->status = IOSTAT_SUCCESS;
spin_unlock_irqrestore( spin_unlock(
&phba->sli4_hba.abts_scsi_buf_list_lock, &phba->sli4_hba.abts_scsi_buf_list_lock);
iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
lpfc_release_scsi_buf_s4(phba, psb); lpfc_release_scsi_buf_s4(phba, psb);
return; return;
} }
} }
spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock, spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
iflag); for (i = 1; i <= phba->sli.last_iotag; i++) {
iocbq = phba->sli.iocbq_lookup[i];
if (!(iocbq->iocb_flag & LPFC_IO_FCP) ||
(iocbq->iocb_flag & LPFC_IO_LIBDFC))
continue;
if (iocbq->sli4_xritag != xri)
continue;
psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
psb->exch_busy = 0;
spin_unlock_irqrestore(&phba->hbalock, iflag);
return;
}
spin_unlock_irqrestore(&phba->hbalock, iflag);
} }
/** /**
...@@ -1006,6 +1023,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) ...@@ -1006,6 +1023,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
struct scatterlist *sgel = NULL; struct scatterlist *sgel = NULL;
struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd; struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl; struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb; IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde; struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
dma_addr_t physaddr; dma_addr_t physaddr;
...@@ -1056,6 +1074,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) ...@@ -1056,6 +1074,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
physaddr = sg_dma_address(sgel); physaddr = sg_dma_address(sgel);
if (phba->sli_rev == 3 && if (phba->sli_rev == 3 &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED) && !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
!(iocbq->iocb_flag & DSS_SECURITY_OP) &&
nseg <= LPFC_EXT_DATA_BDE_COUNT) { nseg <= LPFC_EXT_DATA_BDE_COUNT) {
data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64; data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
data_bde->tus.f.bdeSize = sg_dma_len(sgel); data_bde->tus.f.bdeSize = sg_dma_len(sgel);
...@@ -1082,7 +1101,8 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) ...@@ -1082,7 +1101,8 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
* explicitly reinitialized since all iocb memory resources are reused. * explicitly reinitialized since all iocb memory resources are reused.
*/ */
if (phba->sli_rev == 3 && if (phba->sli_rev == 3 &&
!(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) { !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
!(iocbq->iocb_flag & DSS_SECURITY_OP)) {
if (num_bde > LPFC_EXT_DATA_BDE_COUNT) { if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
/* /*
* The extended IOCB format can only fit 3 BDE or a BPL. * The extended IOCB format can only fit 3 BDE or a BPL.
...@@ -1107,6 +1127,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) ...@@ -1107,6 +1127,7 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
} else { } else {
iocb_cmd->un.fcpi64.bdl.bdeSize = iocb_cmd->un.fcpi64.bdl.bdeSize =
((num_bde + 2) * sizeof(struct ulp_bde64)); ((num_bde + 2) * sizeof(struct ulp_bde64));
iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
} }
fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd)); fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
......
...@@ -494,7 +494,7 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag) ...@@ -494,7 +494,7 @@ __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
* *
* Returns sglq ponter = success, NULL = Failure. * Returns sglq ponter = success, NULL = Failure.
**/ **/
static struct lpfc_sglq * struct lpfc_sglq *
__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag) __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
{ {
uint16_t adj_xri; uint16_t adj_xri;
...@@ -526,6 +526,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba) ...@@ -526,6 +526,7 @@ __lpfc_sli_get_sglq(struct lpfc_hba *phba)
return NULL; return NULL;
adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base; adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq; phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
sglq->state = SGL_ALLOCATED;
return sglq; return sglq;
} }
...@@ -580,15 +581,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq) ...@@ -580,15 +581,18 @@ __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
else else
sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag); sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
if (sglq) { if (sglq) {
if (iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) { if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
(sglq->state != SGL_XRI_ABORTED)) {
spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
iflag); iflag);
list_add(&sglq->list, list_add(&sglq->list,
&phba->sli4_hba.lpfc_abts_els_sgl_list); &phba->sli4_hba.lpfc_abts_els_sgl_list);
spin_unlock_irqrestore( spin_unlock_irqrestore(
&phba->sli4_hba.abts_sgl_list_lock, iflag); &phba->sli4_hba.abts_sgl_list_lock, iflag);
} else } else {
sglq->state = SGL_FREED;
list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list); list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
}
} }
...@@ -2258,41 +2262,56 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, ...@@ -2258,41 +2262,56 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
spin_unlock_irqrestore(&phba->hbalock, spin_unlock_irqrestore(&phba->hbalock,
iflag); iflag);
} }
if ((phba->sli_rev == LPFC_SLI_REV4) && if (phba->sli_rev == LPFC_SLI_REV4) {
(saveq->iocb_flag & LPFC_EXCHANGE_BUSY)) { if (saveq->iocb_flag &
/* Set cmdiocb flag for the exchange LPFC_EXCHANGE_BUSY) {
* busy so sgl (xri) will not be /* Set cmdiocb flag for the
* released until the abort xri is * exchange busy so sgl (xri)
* received from hba, clear the * will not be released until
* LPFC_DRIVER_ABORTED bit in case * the abort xri is received
* it was driver initiated abort. * from hba.
*/ */
spin_lock_irqsave(&phba->hbalock, spin_lock_irqsave(
iflag); &phba->hbalock, iflag);
cmdiocbp->iocb_flag &= cmdiocbp->iocb_flag |=
~LPFC_DRIVER_ABORTED; LPFC_EXCHANGE_BUSY;
cmdiocbp->iocb_flag |= spin_unlock_irqrestore(
LPFC_EXCHANGE_BUSY; &phba->hbalock, iflag);
spin_unlock_irqrestore(&phba->hbalock, }
iflag); if (cmdiocbp->iocb_flag &
cmdiocbp->iocb.ulpStatus = LPFC_DRIVER_ABORTED) {
IOSTAT_LOCAL_REJECT; /*
cmdiocbp->iocb.un.ulpWord[4] = * Clear LPFC_DRIVER_ABORTED
IOERR_ABORT_REQUESTED; * bit in case it was driver
/* * initiated abort.
* For SLI4, irsiocb contains NO_XRI */
* in sli_xritag, it shall not affect spin_lock_irqsave(
* releasing sgl (xri) process. &phba->hbalock, iflag);
*/ cmdiocbp->iocb_flag &=
saveq->iocb.ulpStatus = ~LPFC_DRIVER_ABORTED;
IOSTAT_LOCAL_REJECT; spin_unlock_irqrestore(
saveq->iocb.un.ulpWord[4] = &phba->hbalock, iflag);
IOERR_SLI_ABORTED; cmdiocbp->iocb.ulpStatus =
spin_lock_irqsave(&phba->hbalock, IOSTAT_LOCAL_REJECT;
iflag); cmdiocbp->iocb.un.ulpWord[4] =
saveq->iocb_flag |= LPFC_DELAY_MEM_FREE; IOERR_ABORT_REQUESTED;
spin_unlock_irqrestore(&phba->hbalock, /*
iflag); * For SLI4, irsiocb contains
* NO_XRI in sli_xritag, it
* shall not affect releasing
* sgl (xri) process.
*/
saveq->iocb.ulpStatus =
IOSTAT_LOCAL_REJECT;
saveq->iocb.un.ulpWord[4] =
IOERR_SLI_ABORTED;
spin_lock_irqsave(
&phba->hbalock, iflag);
saveq->iocb_flag |=
LPFC_DELAY_MEM_FREE;
spin_unlock_irqrestore(
&phba->hbalock, iflag);
}
} }
} }
(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq); (cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
...@@ -2515,14 +2534,16 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, ...@@ -2515,14 +2534,16 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring, cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
&rspiocbq); &rspiocbq);
if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) { if (unlikely(!cmdiocbq))
spin_unlock_irqrestore(&phba->hbalock, break;
iflag); if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
(cmdiocbq->iocb_cmpl)(phba, cmdiocbq, cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
&rspiocbq); if (cmdiocbq->iocb_cmpl) {
spin_lock_irqsave(&phba->hbalock, spin_unlock_irqrestore(&phba->hbalock, iflag);
iflag); (cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
} &rspiocbq);
spin_lock_irqsave(&phba->hbalock, iflag);
}
break; break;
case LPFC_UNSOL_IOCB: case LPFC_UNSOL_IOCB:
spin_unlock_irqrestore(&phba->hbalock, iflag); spin_unlock_irqrestore(&phba->hbalock, iflag);
...@@ -7451,6 +7472,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, ...@@ -7451,6 +7472,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
{ {
wait_queue_head_t *pdone_q; wait_queue_head_t *pdone_q;
unsigned long iflags; unsigned long iflags;
struct lpfc_scsi_buf *lpfc_cmd;
spin_lock_irqsave(&phba->hbalock, iflags); spin_lock_irqsave(&phba->hbalock, iflags);
cmdiocbq->iocb_flag |= LPFC_IO_WAKE; cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
...@@ -7458,6 +7480,14 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, ...@@ -7458,6 +7480,14 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb, memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
&rspiocbq->iocb, sizeof(IOCB_t)); &rspiocbq->iocb, sizeof(IOCB_t));
/* Set the exchange busy flag for task management commands */
if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
!(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
cur_iocbq);
lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
}
pdone_q = cmdiocbq->context_un.wait_queue; pdone_q = cmdiocbq->context_un.wait_queue;
if (pdone_q) if (pdone_q)
wake_up(pdone_q); wake_up(pdone_q);
...@@ -9076,6 +9106,12 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, ...@@ -9076,6 +9106,12 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
/* Fake the irspiocb and copy necessary response information */ /* Fake the irspiocb and copy necessary response information */
lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe); lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
spin_lock_irqsave(&phba->hbalock, iflags);
cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
spin_unlock_irqrestore(&phba->hbalock, iflags);
}
/* Pass the cmd_iocb and the rsp state to the upper layer */ /* Pass the cmd_iocb and the rsp state to the upper layer */
(cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq); (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
} }
......
...@@ -62,6 +62,7 @@ struct lpfc_iocbq { ...@@ -62,6 +62,7 @@ struct lpfc_iocbq {
#define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */ #define LPFC_DELAY_MEM_FREE 0x20 /* Defer free'ing of FC data */
#define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */ #define LPFC_EXCHANGE_BUSY 0x40 /* SLI4 hba reported XB in response */
#define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */ #define LPFC_USE_FCPWQIDX 0x80 /* Submit to specified FCPWQ index */
#define DSS_SECURITY_OP 0x100 /* security IO */
#define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ #define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */
#define LPFC_FIP_ELS_ID_SHIFT 14 #define LPFC_FIP_ELS_ID_SHIFT 14
......
...@@ -431,11 +431,18 @@ enum lpfc_sge_type { ...@@ -431,11 +431,18 @@ enum lpfc_sge_type {
SCSI_BUFF_TYPE SCSI_BUFF_TYPE
}; };
enum lpfc_sgl_state {
SGL_FREED,
SGL_ALLOCATED,
SGL_XRI_ABORTED
};
struct lpfc_sglq { struct lpfc_sglq {
/* lpfc_sglqs are used in double linked lists */ /* lpfc_sglqs are used in double linked lists */
struct list_head list; struct list_head list;
struct list_head clist; struct list_head clist;
enum lpfc_sge_type buff_type; /* is this a scsi sgl */ enum lpfc_sge_type buff_type; /* is this a scsi sgl */
enum lpfc_sgl_state state;
uint16_t iotag; /* pre-assigned IO tag */ uint16_t iotag; /* pre-assigned IO tag */
uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */ uint16_t sli4_xritag; /* pre-assigned XRI, (OXID) tag. */
struct sli4_sge *sgl; /* pre-assigned SGL */ struct sli4_sge *sgl; /* pre-assigned SGL */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment