Commit 0c9ab6f5 authored by James Smart's avatar James Smart Committed by James Bottomley

[SCSI] lpfc 8.3.10: Added round robin FCF failover

- Added round robin FCF failover on initial or FCF rediscovery FLOGI failure.
Signed-off-by: default avatarJames Smart <james.smart@emulex.com>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@suse.de>
parent fc2b989b
...@@ -63,6 +63,7 @@ void lpfc_linkdown_port(struct lpfc_vport *); ...@@ -63,6 +63,7 @@ void lpfc_linkdown_port(struct lpfc_vport *);
void lpfc_port_link_failure(struct lpfc_vport *); void lpfc_port_link_failure(struct lpfc_vport *);
void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
void lpfc_retry_pport_discovery(struct lpfc_hba *); void lpfc_retry_pport_discovery(struct lpfc_hba *);
void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
...@@ -222,6 +223,9 @@ void lpfc_unregister_unused_fcf(struct lpfc_hba *); ...@@ -222,6 +223,9 @@ void lpfc_unregister_unused_fcf(struct lpfc_hba *);
int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *); int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *); void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *); void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
int lpfc_mem_alloc(struct lpfc_hba *, int align); int lpfc_mem_alloc(struct lpfc_hba *, int align);
void lpfc_mem_free(struct lpfc_hba *); void lpfc_mem_free(struct lpfc_hba *);
......
...@@ -771,6 +771,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -771,6 +771,7 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
struct lpfc_nodelist *ndlp = cmdiocb->context1; struct lpfc_nodelist *ndlp = cmdiocb->context1;
struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp; struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
struct serv_parm *sp; struct serv_parm *sp;
uint16_t fcf_index;
int rc; int rc;
/* Check to see if link went down during discovery */ /* Check to see if link went down during discovery */
...@@ -788,6 +789,54 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -788,6 +789,54 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
vport->port_state); vport->port_state);
if (irsp->ulpStatus) { if (irsp->ulpStatus) {
/*
* In case of FIP mode, perform round robin FCF failover
* due to new FCF discovery
*/
if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
(phba->fcf.fcf_flag & FCF_DISCOVERY)) {
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
"2611 FLOGI failed on registered "
"FCF record fcf_index:%d, trying "
"to perform round robin failover\n",
phba->fcf.current_rec.fcf_indx);
fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
/*
* Exhausted the eligible FCF record list,
* fail through to retry FLOGI on current
* FCF record.
*/
lpfc_printf_log(phba, KERN_WARNING,
LOG_FIP | LOG_ELS,
"2760 FLOGI exhausted FCF "
"round robin failover list, "
"retry FLOGI on the current "
"registered FCF index:%d\n",
phba->fcf.current_rec.fcf_indx);
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
spin_unlock_irq(&phba->hbalock);
} else {
rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
fcf_index);
if (rc) {
lpfc_printf_log(phba, KERN_WARNING,
LOG_FIP | LOG_ELS,
"2761 FLOGI round "
"robin FCF failover "
"read FCF failed "
"rc:x%x, fcf_index:"
"%d\n", rc,
phba->fcf.current_rec.fcf_indx);
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
spin_unlock_irq(&phba->hbalock);
} else
goto out;
}
}
/* Check for retry */ /* Check for retry */
if (lpfc_els_retry(phba, cmdiocb, rspiocb)) if (lpfc_els_retry(phba, cmdiocb, rspiocb))
goto out; goto out;
...@@ -841,8 +890,18 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ...@@ -841,8 +890,18 @@ lpfc_cmpl_els_flogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
else else
rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp); rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
if (!rc) if (!rc) {
/* Mark the FCF discovery process done */
lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS,
"2769 FLOGI successful on FCF record: "
"current_fcf_index:x%x, terminate FCF "
"round robin failover process\n",
phba->fcf.current_rec.fcf_indx);
spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
spin_unlock_irq(&phba->hbalock);
goto out; goto out;
}
} }
flogifail: flogifail:
...@@ -6075,21 +6134,18 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport, ...@@ -6075,21 +6134,18 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
} }
/** /**
* lpfc_retry_pport_discovery - Start timer to retry FLOGI. * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
* @phba: pointer to lpfc hba data structure. * @phba: pointer to lpfc hba data structure.
* *
* This routine abort all pending discovery commands and * This routine cancels the retry delay timers to all the vports.
* start a timer to retry FLOGI for the physical port
* discovery.
**/ **/
void void
lpfc_retry_pport_discovery(struct lpfc_hba *phba) lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
{ {
struct lpfc_vport **vports; struct lpfc_vport **vports;
struct lpfc_nodelist *ndlp; struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost;
int i;
uint32_t link_state; uint32_t link_state;
int i;
/* Treat this failure as linkdown for all vports */ /* Treat this failure as linkdown for all vports */
link_state = phba->link_state; link_state = phba->link_state;
...@@ -6107,13 +6163,30 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba) ...@@ -6107,13 +6163,30 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba)
} }
lpfc_destroy_vport_work_array(phba, vports); lpfc_destroy_vport_work_array(phba, vports);
} }
}
/**
* lpfc_retry_pport_discovery - Start timer to retry FLOGI.
* @phba: pointer to lpfc hba data structure.
*
* This routine abort all pending discovery commands and
* start a timer to retry FLOGI for the physical port
* discovery.
**/
void
lpfc_retry_pport_discovery(struct lpfc_hba *phba)
{
struct lpfc_nodelist *ndlp;
struct Scsi_Host *shost;
/* Cancel the all vports retry delay retry timers */
lpfc_cancel_all_vport_retry_delay_timer(phba);
/* If fabric require FLOGI, then re-instantiate physical login */ /* If fabric require FLOGI, then re-instantiate physical login */
ndlp = lpfc_findnode_did(phba->pport, Fabric_DID); ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
if (!ndlp) if (!ndlp)
return; return;
shost = lpfc_shost_from_vport(phba->pport); shost = lpfc_shost_from_vport(phba->pport);
mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ); mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
spin_lock_irq(shost->host_lock); spin_lock_irq(shost->host_lock);
......
This diff is collapsed.
...@@ -2201,8 +2201,8 @@ __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba) ...@@ -2201,8 +2201,8 @@ __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
{ {
/* Clear pending FCF rediscovery wait and failover in progress flags */ /* Clear pending FCF rediscovery wait and failover in progress flags */
phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND | phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
FCF_DEAD_FOVER | FCF_DEAD_DISC |
FCF_CVL_FOVER); FCF_ACVL_DISC);
/* Now, try to stop the timer */ /* Now, try to stop the timer */
del_timer(&phba->fcf.redisc_wait); del_timer(&phba->fcf.redisc_wait);
} }
...@@ -2943,6 +2943,9 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) ...@@ -2943,6 +2943,9 @@ lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
/* FCF rediscovery event to worker thread */ /* FCF rediscovery event to worker thread */
phba->fcf.fcf_flag |= FCF_REDISC_EVT; phba->fcf.fcf_flag |= FCF_REDISC_EVT;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2776 FCF rediscover wait timer expired, post "
"a worker thread event for FCF table scan\n");
/* wake up worker thread */ /* wake up worker thread */
lpfc_worker_wake_up(phba); lpfc_worker_wake_up(phba);
} }
...@@ -3300,10 +3303,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, ...@@ -3300,10 +3303,11 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
switch (event_type) { switch (event_type) {
case LPFC_FCOE_EVENT_TYPE_NEW_FCF: case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD: case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
"2546 New FCF found index 0x%x tag 0x%x\n", "2546 New FCF found/FCF parameter modified event: "
acqe_fcoe->index, "evt_tag:x%x, fcf_index:x%x\n",
acqe_fcoe->event_tag); acqe_fcoe->event_tag, acqe_fcoe->index);
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) || if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
(phba->hba_flag & FCF_DISC_INPROGRESS)) { (phba->hba_flag & FCF_DISC_INPROGRESS)) {
...@@ -3314,6 +3318,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, ...@@ -3314,6 +3318,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
break; break;
} }
if (phba->fcf.fcf_flag & FCF_REDISC_EVT) { if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
/* /*
* If fast FCF failover rescan event is pending, * If fast FCF failover rescan event is pending,
...@@ -3324,12 +3329,33 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, ...@@ -3324,12 +3329,33 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
} }
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
/* Read the FCF table and re-discover SAN. */ if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
/*
* During period of FCF discovery, read the FCF
* table record indexed by the event to update
* FCF round robin failover eligible FCF bmask.
*/
lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
LOG_DISCOVERY,
"2779 Read new FCF record with "
"fcf_index:x%x for updating FCF "
"round robin failover bmask\n",
acqe_fcoe->index);
rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
}
/* Otherwise, scan the entire FCF table and re-discover SAN */
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
"2770 Start FCF table scan due to new FCF "
"event: evt_tag:x%x, fcf_index:x%x\n",
acqe_fcoe->event_tag, acqe_fcoe->index);
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
LPFC_FCOE_FCF_GET_FIRST);
if (rc) if (rc)
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
"2547 Read FCF record failed 0x%x\n", "2547 Issue FCF scan read FCF mailbox "
rc); "command failed 0x%x\n", rc);
break; break;
case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL: case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
...@@ -3340,7 +3366,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, ...@@ -3340,7 +3366,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
break; break;
case LPFC_FCOE_EVENT_TYPE_FCF_DEAD: case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
"2549 FCF disconnected from network index 0x%x" "2549 FCF disconnected from network index 0x%x"
" tag 0x%x\n", acqe_fcoe->index, " tag 0x%x\n", acqe_fcoe->index,
acqe_fcoe->event_tag); acqe_fcoe->event_tag);
...@@ -3349,21 +3375,32 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, ...@@ -3349,21 +3375,32 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
break; break;
/* We request port to rediscover the entire FCF table for /* We request port to rediscover the entire FCF table for
* a fast recovery from case that the current FCF record * a fast recovery from case that the current FCF record
* is no longer valid if the last CVL event hasn't already * is no longer valid if we are not in the middle of FCF
* triggered process. * failover process already.
*/ */
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
if (phba->fcf.fcf_flag & FCF_CVL_FOVER) { if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
/* Update FLOGI FCF failover eligible FCF bmask */
lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
break; break;
} }
/* Mark the fast failover process in progress */ /* Mark the fast failover process in progress */
phba->fcf.fcf_flag |= FCF_DEAD_FOVER; phba->fcf.fcf_flag |= FCF_DEAD_DISC;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
"2771 Start FCF fast failover process due to "
"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
"\n", acqe_fcoe->event_tag, acqe_fcoe->index);
rc = lpfc_sli4_redisc_fcf_table(phba); rc = lpfc_sli4_redisc_fcf_table(phba);
if (rc) { if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
LOG_DISCOVERY,
"2772 Issue FCF rediscover mabilbox "
"command failed, fail through to FCF "
"dead event\n");
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DEAD_FOVER; phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
/* /*
* Last resort will fail over by treating this * Last resort will fail over by treating this
...@@ -3378,7 +3415,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, ...@@ -3378,7 +3415,7 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
lpfc_sli4_perform_all_vport_cvl(phba); lpfc_sli4_perform_all_vport_cvl(phba);
break; break;
case LPFC_FCOE_EVENT_TYPE_CVL: case LPFC_FCOE_EVENT_TYPE_CVL:
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
"2718 Clear Virtual Link Received for VPI 0x%x" "2718 Clear Virtual Link Received for VPI 0x%x"
" tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag); " tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
vport = lpfc_find_vport_by_vpid(phba, vport = lpfc_find_vport_by_vpid(phba,
...@@ -3419,21 +3456,31 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba, ...@@ -3419,21 +3456,31 @@ lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
* Otherwise, we request port to rediscover * Otherwise, we request port to rediscover
* the entire FCF table for a fast recovery * the entire FCF table for a fast recovery
* from possible case that the current FCF * from possible case that the current FCF
* is no longer valid if the FCF_DEAD event * is no longer valid if we are not already
* hasn't already triggered process. * in the FCF failover process.
*/ */
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
if (phba->fcf.fcf_flag & FCF_DEAD_FOVER) { if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
break; break;
} }
/* Mark the fast failover process in progress */ /* Mark the fast failover process in progress */
phba->fcf.fcf_flag |= FCF_CVL_FOVER; phba->fcf.fcf_flag |= FCF_ACVL_DISC;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
LOG_DISCOVERY,
"2773 Start FCF fast failover due "
"to CVL event: evt_tag:x%x\n",
acqe_fcoe->event_tag);
rc = lpfc_sli4_redisc_fcf_table(phba); rc = lpfc_sli4_redisc_fcf_table(phba);
if (rc) { if (rc) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
LOG_DISCOVERY,
"2774 Issue FCF rediscover "
"mabilbox command failed, "
"through to CVL event\n");
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_CVL_FOVER; phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
/* /*
* Last resort will be re-try on the * Last resort will be re-try on the
...@@ -3537,11 +3584,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba) ...@@ -3537,11 +3584,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *phba)
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
/* Scan FCF table from the first entry to re-discover SAN */ /* Scan FCF table from the first entry to re-discover SAN */
rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST); lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
"2777 Start FCF table scan after FCF "
"rediscovery quiescent period over\n");
rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
if (rc) if (rc)
lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY, lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
"2747 Post FCF rediscovery read FCF record " "2747 Issue FCF scan read FCF mailbox "
"failed 0x%x\n", rc); "command failed 0x%x\n", rc);
} }
/** /**
...@@ -3833,6 +3883,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) ...@@ -3833,6 +3883,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size; int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0}; uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
struct lpfc_mqe *mqe; struct lpfc_mqe *mqe;
int longs;
/* Before proceed, wait for POST done and device ready */ /* Before proceed, wait for POST done and device ready */
rc = lpfc_sli4_post_status_check(phba); rc = lpfc_sli4_post_status_check(phba);
...@@ -4009,13 +4060,24 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) ...@@ -4009,13 +4060,24 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
goto out_free_active_sgl; goto out_free_active_sgl;
} }
/* Allocate eligible FCF bmask memory for FCF round robin failover */
longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
GFP_KERNEL);
if (!phba->fcf.fcf_rr_bmask) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2759 Failed allocate memory for FCF round "
"robin failover bmask\n");
goto out_remove_rpi_hdrs;
}
phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) * phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
phba->cfg_fcp_eq_count), GFP_KERNEL); phba->cfg_fcp_eq_count), GFP_KERNEL);
if (!phba->sli4_hba.fcp_eq_hdl) { if (!phba->sli4_hba.fcp_eq_hdl) {
lpfc_printf_log(phba, KERN_ERR, LOG_INIT, lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
"2572 Failed allocate memory for fast-path " "2572 Failed allocate memory for fast-path "
"per-EQ handle array\n"); "per-EQ handle array\n");
goto out_remove_rpi_hdrs; goto out_free_fcf_rr_bmask;
} }
phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) * phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
...@@ -4068,6 +4130,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) ...@@ -4068,6 +4130,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
out_free_fcp_eq_hdl: out_free_fcp_eq_hdl:
kfree(phba->sli4_hba.fcp_eq_hdl); kfree(phba->sli4_hba.fcp_eq_hdl);
out_free_fcf_rr_bmask:
kfree(phba->fcf.fcf_rr_bmask);
out_remove_rpi_hdrs: out_remove_rpi_hdrs:
lpfc_sli4_remove_rpi_hdrs(phba); lpfc_sli4_remove_rpi_hdrs(phba);
out_free_active_sgl: out_free_active_sgl:
...@@ -4113,6 +4177,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba) ...@@ -4113,6 +4177,9 @@ lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
lpfc_sli4_remove_rpi_hdrs(phba); lpfc_sli4_remove_rpi_hdrs(phba);
lpfc_sli4_remove_rpis(phba); lpfc_sli4_remove_rpis(phba);
/* Free eligible FCF index bmask */
kfree(phba->fcf.fcf_rr_bmask);
/* Free the ELS sgl list */ /* Free the ELS sgl list */
lpfc_free_active_sgl(phba); lpfc_free_active_sgl(phba);
lpfc_free_sgl_list(phba); lpfc_free_sgl_list(phba);
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#define LOG_VPORT 0x00004000 /* NPIV events */ #define LOG_VPORT 0x00004000 /* NPIV events */
#define LOF_SECURITY 0x00008000 /* Security events */ #define LOF_SECURITY 0x00008000 /* Security events */
#define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */ #define LOG_EVENT 0x00010000 /* CT,TEMP,DUMP, logging */
#define LOG_FIP 0x00020000 /* FIP events */
#define LOG_ALL_MSG 0xffffffff /* LOG all messages */ #define LOG_ALL_MSG 0xffffffff /* LOG all messages */
#define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \ #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
......
...@@ -1748,7 +1748,7 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox) ...@@ -1748,7 +1748,7 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
} }
/** /**
* lpfc_sli4_mbx_read_fcf_record - Allocate and construct read fcf mbox cmd * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd
* @phba: pointer to lpfc hba data structure. * @phba: pointer to lpfc hba data structure.
* @fcf_index: index to fcf table. * @fcf_index: index to fcf table.
* *
...@@ -1759,9 +1759,9 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox) ...@@ -1759,9 +1759,9 @@ lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
* NULL. * NULL.
**/ **/
int int
lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *phba, lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
struct lpfcMboxq *mboxq, struct lpfcMboxq *mboxq,
uint16_t fcf_index) uint16_t fcf_index)
{ {
void *virt_addr; void *virt_addr;
dma_addr_t phys_addr; dma_addr_t phys_addr;
......
...@@ -11996,15 +11996,19 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba, ...@@ -11996,15 +11996,19 @@ lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
} }
/** /**
* lpfc_sli4_read_fcf_record - Read the driver's default FCF Record. * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
* @phba: pointer to lpfc hba data structure. * @phba: pointer to lpfc hba data structure.
* @fcf_index: FCF table entry offset. * @fcf_index: FCF table entry offset.
* *
* This routine is invoked to read up to @fcf_num of FCF record from the * This routine is invoked to scan the entire FCF table by reading FCF
* device starting with the given @fcf_index. * record and processing it one at a time starting from the @fcf_index
* for initial FCF discovery or fast FCF failover rediscovery.
*
* Return 0 if the mailbox command is submitted sucessfully, none 0
* otherwise.
**/ **/
int int
lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
{ {
int rc = 0, error; int rc = 0, error;
LPFC_MBOXQ_t *mboxq; LPFC_MBOXQ_t *mboxq;
...@@ -12016,17 +12020,17 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) ...@@ -12016,17 +12020,17 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
"2000 Failed to allocate mbox for " "2000 Failed to allocate mbox for "
"READ_FCF cmd\n"); "READ_FCF cmd\n");
error = -ENOMEM; error = -ENOMEM;
goto fail_fcfscan; goto fail_fcf_scan;
} }
/* Construct the read FCF record mailbox command */ /* Construct the read FCF record mailbox command */
rc = lpfc_sli4_mbx_read_fcf_record(phba, mboxq, fcf_index); rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
if (rc) { if (rc) {
error = -EINVAL; error = -EINVAL;
goto fail_fcfscan; goto fail_fcf_scan;
} }
/* Issue the mailbox command asynchronously */ /* Issue the mailbox command asynchronously */
mboxq->vport = phba->pport; mboxq->vport = phba->pport;
mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record; mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED) if (rc == MBX_NOT_FINISHED)
error = -EIO; error = -EIO;
...@@ -12034,9 +12038,13 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) ...@@ -12034,9 +12038,13 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
phba->hba_flag |= FCF_DISC_INPROGRESS; phba->hba_flag |= FCF_DISC_INPROGRESS;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
/* Reset FCF round robin index bmask for new scan */
if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
memset(phba->fcf.fcf_rr_bmask, 0,
sizeof(*phba->fcf.fcf_rr_bmask));
error = 0; error = 0;
} }
fail_fcfscan: fail_fcf_scan:
if (error) { if (error) {
if (mboxq) if (mboxq)
lpfc_sli4_mbox_cmd_free(phba, mboxq); lpfc_sli4_mbox_cmd_free(phba, mboxq);
...@@ -12048,6 +12056,181 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index) ...@@ -12048,6 +12056,181 @@ lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
return error; return error;
} }
/**
* lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf.
* @phba: pointer to lpfc hba data structure.
* @fcf_index: FCF table entry offset.
*
* This routine is invoked to read an FCF record indicated by @fcf_index
* and to use it for FLOGI round robin FCF failover.
*
* Return 0 if the mailbox command is submitted sucessfully, none 0
* otherwise.
**/
int
lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
{
int rc = 0, error;
LPFC_MBOXQ_t *mboxq;
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
"2763 Failed to allocate mbox for "
"READ_FCF cmd\n");
error = -ENOMEM;
goto fail_fcf_read;
}
/* Construct the read FCF record mailbox command */
rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
if (rc) {
error = -EINVAL;
goto fail_fcf_read;
}
/* Issue the mailbox command asynchronously */
mboxq->vport = phba->pport;
mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
error = -EIO;
else
error = 0;
fail_fcf_read:
if (error && mboxq)
lpfc_sli4_mbox_cmd_free(phba, mboxq);
return error;
}
/**
* lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
* @phba: pointer to lpfc hba data structure.
* @fcf_index: FCF table entry offset.
*
* This routine is invoked to read an FCF record indicated by @fcf_index to
* determine whether it's eligible for FLOGI round robin failover list.
*
* Return 0 if the mailbox command is submitted sucessfully, none 0
* otherwise.
**/
int
lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
{
int rc = 0, error;
LPFC_MBOXQ_t *mboxq;
mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mboxq) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
"2758 Failed to allocate mbox for "
"READ_FCF cmd\n");
error = -ENOMEM;
goto fail_fcf_read;
}
/* Construct the read FCF record mailbox command */
rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
if (rc) {
error = -EINVAL;
goto fail_fcf_read;
}
/* Issue the mailbox command asynchronously */
mboxq->vport = phba->pport;
mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
if (rc == MBX_NOT_FINISHED)
error = -EIO;
else
error = 0;
fail_fcf_read:
if (error && mboxq)
lpfc_sli4_mbox_cmd_free(phba, mboxq);
return error;
}
/**
* lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
* @phba: pointer to lpfc hba data structure.
*
* This routine is to get the next eligible FCF record index in a round
* robin fashion. If the next eligible FCF record index equals to the
* initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
* shall be returned, otherwise, the next eligible FCF record's index
* shall be returned.
**/
uint16_t
lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
{
uint16_t next_fcf_index;
/* Search from the currently registered FCF index */
next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
LPFC_SLI4_FCF_TBL_INDX_MAX,
phba->fcf.current_rec.fcf_indx);
/* Wrap around condition on phba->fcf.fcf_rr_bmask */
if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
/* Round robin failover stop condition */
if (next_fcf_index == phba->fcf.fcf_rr_init_indx)
return LPFC_FCOE_FCF_NEXT_NONE;
return next_fcf_index;
}
/**
* lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
* @phba: pointer to lpfc hba data structure.
*
* This routine sets the FCF record index in to the eligible bmask for
* round robin failover search. It checks to make sure that the index
* does not go beyond the range of the driver allocated bmask dimension
* before setting the bit.
*
* Returns 0 if the index bit successfully set, otherwise, it returns
* -EINVAL.
**/
int
lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
{
if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"2610 HBA FCF index reached driver's "
"book keeping dimension: fcf_index:%d, "
"driver_bmask_max:%d\n",
fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
return -EINVAL;
}
/* Set the eligible FCF record index bmask */
set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
return 0;
}
/**
* lpfc_sli4_fcf_rr_index_set - Clear bmask from eligible fcf record index
* @phba: pointer to lpfc hba data structure.
*
* This routine clears the FCF record index from the eligible bmask for
* round robin failover search. It checks to make sure that the index
* does not go beyond the range of the driver allocated bmask dimension
* before clearing the bit.
**/
void
lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
{
if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"2762 HBA FCF index goes beyond driver's "
"book keeping dimension: fcf_index:%d, "
"driver_bmask_max:%d\n",
fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
return;
}
/* Clear the eligible FCF record index bmask */
clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
}
/** /**
* lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
* @phba: pointer to lpfc hba data structure. * @phba: pointer to lpfc hba data structure.
...@@ -12069,13 +12252,13 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) ...@@ -12069,13 +12252,13 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
&redisc_fcf->header.cfg_shdr.response); &redisc_fcf->header.cfg_shdr.response);
if (shdr_status || shdr_add_status) { if (shdr_status || shdr_add_status) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI, lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
"2746 Requesting for FCF rediscovery failed " "2746 Requesting for FCF rediscovery failed "
"status x%x add_status x%x\n", "status x%x add_status x%x\n",
shdr_status, shdr_add_status); shdr_status, shdr_add_status);
if (phba->fcf.fcf_flag & FCF_CVL_FOVER) { if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_CVL_FOVER; phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
/* /*
* CVL event triggered FCF rediscover request failed, * CVL event triggered FCF rediscover request failed,
...@@ -12084,7 +12267,7 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) ...@@ -12084,7 +12267,7 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
lpfc_retry_pport_discovery(phba); lpfc_retry_pport_discovery(phba);
} else { } else {
spin_lock_irq(&phba->hbalock); spin_lock_irq(&phba->hbalock);
phba->fcf.fcf_flag &= ~FCF_DEAD_FOVER; phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
spin_unlock_irq(&phba->hbalock); spin_unlock_irq(&phba->hbalock);
/* /*
* DEAD FCF event triggered FCF rediscover request * DEAD FCF event triggered FCF rediscover request
...@@ -12093,12 +12276,16 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox) ...@@ -12093,12 +12276,16 @@ lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
*/ */
lpfc_sli4_fcf_dead_failthrough(phba); lpfc_sli4_fcf_dead_failthrough(phba);
} }
} else } else {
lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
"2775 Start FCF rediscovery quiescent period "
"wait timer before scaning FCF table\n");
/* /*
* Start FCF rediscovery wait timer for pending FCF * Start FCF rediscovery wait timer for pending FCF
* before rescan FCF record table. * before rescan FCF record table.
*/ */
lpfc_fcf_redisc_wait_start_timer(phba); lpfc_fcf_redisc_wait_start_timer(phba);
}
mempool_free(mbox, phba->mbox_mem_pool); mempool_free(mbox, phba->mbox_mem_pool);
} }
...@@ -12117,6 +12304,9 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba) ...@@ -12117,6 +12304,9 @@ lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf; struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
int rc, length; int rc, length;
/* Cancel retry delay timers to all vports before FCF rediscover */
lpfc_cancel_all_vport_retry_delay_timer(phba);
mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
if (!mbox) { if (!mbox) {
lpfc_printf_log(phba, KERN_ERR, LOG_SLI, lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
......
...@@ -153,17 +153,27 @@ struct lpfc_fcf { ...@@ -153,17 +153,27 @@ struct lpfc_fcf {
#define FCF_REGISTERED 0x02 /* FCF registered with FW */ #define FCF_REGISTERED 0x02 /* FCF registered with FW */
#define FCF_SCAN_DONE 0x04 /* FCF table scan done */ #define FCF_SCAN_DONE 0x04 /* FCF table scan done */
#define FCF_IN_USE 0x08 /* Atleast one discovery completed */ #define FCF_IN_USE 0x08 /* Atleast one discovery completed */
#define FCF_DEAD_FOVER 0x10 /* FCF DEAD triggered fast FCF failover */ #define FCF_INIT_DISC 0x10 /* Initial FCF discovery */
#define FCF_CVL_FOVER 0x20 /* CVL triggered fast FCF failover */ #define FCF_DEAD_DISC 0x20 /* FCF DEAD fast FCF failover discovery */
#define FCF_REDISC_PEND 0x40 /* FCF rediscovery pending */ #define FCF_ACVL_DISC 0x40 /* All CVL fast FCF failover discovery */
#define FCF_REDISC_EVT 0x80 /* FCF rediscovery event to worker thread */ #define FCF_DISCOVERY (FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC)
#define FCF_REDISC_FOV 0x100 /* Post FCF rediscovery fast failover */ #define FCF_REDISC_PEND 0x80 /* FCF rediscovery pending */
#define FCF_REDISC_EVT 0x100 /* FCF rediscovery event to worker thread */
#define FCF_REDISC_FOV 0x200 /* Post FCF rediscovery fast failover */
uint32_t addr_mode; uint32_t addr_mode;
uint16_t fcf_rr_init_indx;
struct lpfc_fcf_rec current_rec; struct lpfc_fcf_rec current_rec;
struct lpfc_fcf_rec failover_rec; struct lpfc_fcf_rec failover_rec;
struct timer_list redisc_wait; struct timer_list redisc_wait;
unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
}; };
/*
* Maximum FCF table index, it is for driver internal book keeping, it
* just needs to be no less than the supported HBA's FCF table size.
*/
#define LPFC_SLI4_FCF_TBL_INDX_MAX 32
#define LPFC_REGION23_SIGNATURE "RG23" #define LPFC_REGION23_SIGNATURE "RG23"
#define LPFC_REGION23_VERSION 1 #define LPFC_REGION23_VERSION 1
#define LPFC_REGION23_LAST_REC 0xff #define LPFC_REGION23_LAST_REC 0xff
...@@ -472,8 +482,8 @@ void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *); ...@@ -472,8 +482,8 @@ void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t); void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t, void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
struct lpfc_mbx_sge *); struct lpfc_mbx_sge *);
int lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *, struct lpfcMboxq *, int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
uint16_t); uint16_t);
void lpfc_sli4_hba_reset(struct lpfc_hba *); void lpfc_sli4_hba_reset(struct lpfc_hba *);
struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t, struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
...@@ -532,8 +542,13 @@ int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t); ...@@ -532,8 +542,13 @@ int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool); uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool); uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t); void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t); int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t);
void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *); int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t);
int lpfc_sli4_read_fcf_rec(struct lpfc_hba *, uint16_t);
void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
int lpfc_sli4_post_status_check(struct lpfc_hba *); int lpfc_sli4_post_status_check(struct lpfc_hba *);
uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *); uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment