Commit 7ab07683 authored by Justin Tee's avatar Justin Tee Committed by Martin K. Petersen

scsi: lpfc: Resolve miscellaneous variable set but not used compiler warnings

The local variables called curr_data are incremented, but not actually used
for anything so they are removed.

The return value of lpfc_sli4_poll_eq is not used anywhere and is not
called outside of lpfc_sli.c.  Thus, its declaration is removed from
lpfc_crtn.h Also, lpfc_sli4_poll_eq's path argument is not used in the
routine so it is removed along with corresponding macros.
Signed-off-by: default avatarJustin Tee <justin.tee@broadcom.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 1f7b5f94
...@@ -1592,8 +1592,6 @@ struct lpfc_hba { ...@@ -1592,8 +1592,6 @@ struct lpfc_hba {
struct timer_list cpuhp_poll_timer; struct timer_list cpuhp_poll_timer;
struct list_head poll_list; /* slowpath eq polling list */ struct list_head poll_list; /* slowpath eq polling list */
#define LPFC_POLL_HB 1 /* slowpath heartbeat */ #define LPFC_POLL_HB 1 /* slowpath heartbeat */
#define LPFC_POLL_FASTPATH 0 /* called from fastpath */
#define LPFC_POLL_SLOWPATH 1 /* called from slowpath */
char os_host_name[MAXHOSTNAMELEN]; char os_host_name[MAXHOSTNAMELEN];
......
...@@ -253,7 +253,6 @@ int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap, ...@@ -253,7 +253,6 @@ int lpfc_read_object(struct lpfc_hba *phba, char *s, uint32_t *datap,
uint32_t len); uint32_t len);
void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba); void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba);
int lpfc_sli4_poll_eq(struct lpfc_queue *q, uint8_t path);
void lpfc_sli4_poll_hbtimer(struct timer_list *t); void lpfc_sli4_poll_hbtimer(struct timer_list *t);
void lpfc_sli4_start_polling(struct lpfc_queue *q); void lpfc_sli4_start_polling(struct lpfc_queue *q);
void lpfc_sli4_stop_polling(struct lpfc_queue *q); void lpfc_sli4_stop_polling(struct lpfc_queue *q);
......
...@@ -1689,7 +1689,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, ...@@ -1689,7 +1689,7 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct lpfc_pde6 *pde6 = NULL; struct lpfc_pde6 *pde6 = NULL;
struct lpfc_pde7 *pde7 = NULL; struct lpfc_pde7 *pde7 = NULL;
dma_addr_t dataphysaddr, protphysaddr; dma_addr_t dataphysaddr, protphysaddr;
unsigned short curr_data = 0, curr_prot = 0; unsigned short curr_prot = 0;
unsigned int split_offset; unsigned int split_offset;
unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
unsigned int protgrp_blks, protgrp_bytes; unsigned int protgrp_blks, protgrp_bytes;
...@@ -1858,7 +1858,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, ...@@ -1858,7 +1858,6 @@ lpfc_bg_setup_bpl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
bpl->tus.w = le32_to_cpu(bpl->tus.w); bpl->tus.w = le32_to_cpu(bpl->tus.w);
num_bde++; num_bde++;
curr_data++;
if (split_offset) if (split_offset)
break; break;
...@@ -2119,7 +2118,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, ...@@ -2119,7 +2118,7 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
struct scatterlist *sgpe = NULL; /* s/g prot entry */ struct scatterlist *sgpe = NULL; /* s/g prot entry */
struct sli4_sge_diseed *diseed = NULL; struct sli4_sge_diseed *diseed = NULL;
dma_addr_t dataphysaddr, protphysaddr; dma_addr_t dataphysaddr, protphysaddr;
unsigned short curr_data = 0, curr_prot = 0; unsigned short curr_prot = 0;
unsigned int split_offset; unsigned int split_offset;
unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder; unsigned int protgroup_len, protgroup_offset = 0, protgroup_remainder;
unsigned int protgrp_blks, protgrp_bytes; unsigned int protgrp_blks, protgrp_bytes;
...@@ -2364,7 +2363,6 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc, ...@@ -2364,7 +2363,6 @@ lpfc_bg_setup_sgl_prot(struct lpfc_hba *phba, struct scsi_cmnd *sc,
dma_offset += dma_len; dma_offset += dma_len;
num_sge++; num_sge++;
curr_data++;
if (split_offset) { if (split_offset) {
sgl++; sgl++;
......
...@@ -11270,6 +11270,30 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb) ...@@ -11270,6 +11270,30 @@ lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
} }
} }
inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq)
{
struct lpfc_hba *phba = eq->phba;
/*
* Unlocking an irq is one of the entry point to check
* for re-schedule, but we are good for io submission
* path as midlayer does a get_cpu to glue us in. Flush
* out the invalidate queue so we can see the updated
* value for flag.
*/
smp_rmb();
if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
/* We will not likely get the completion for the caller
* during this iteration but i guess that's fine.
* Future io's coming on this eq should be able to
* pick it up. As for the case of single io's, they
* will be handled through a sched from polling timer
* function which is currently triggered every 1msec.
*/
lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
}
/** /**
* lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
* @phba: Pointer to HBA context object. * @phba: Pointer to HBA context object.
...@@ -11309,7 +11333,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number, ...@@ -11309,7 +11333,7 @@ lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag); rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
spin_unlock_irqrestore(&pring->ring_lock, iflags); spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_sli4_poll_eq(eq, LPFC_POLL_FASTPATH); lpfc_sli4_poll_eq(eq);
} else { } else {
/* For now, SLI2/3 will still use hbalock */ /* For now, SLI2/3 will still use hbalock */
spin_lock_irqsave(&phba->hbalock, iflags); spin_lock_irqsave(&phba->hbalock, iflags);
...@@ -15625,12 +15649,11 @@ void lpfc_sli4_poll_hbtimer(struct timer_list *t) ...@@ -15625,12 +15649,11 @@ void lpfc_sli4_poll_hbtimer(struct timer_list *t)
{ {
struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer); struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
struct lpfc_queue *eq; struct lpfc_queue *eq;
int i = 0;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list) list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
i += lpfc_sli4_poll_eq(eq, LPFC_POLL_SLOWPATH); lpfc_sli4_poll_eq(eq);
if (!list_empty(&phba->poll_list)) if (!list_empty(&phba->poll_list))
mod_timer(&phba->cpuhp_poll_timer, mod_timer(&phba->cpuhp_poll_timer,
jiffies + msecs_to_jiffies(LPFC_POLL_HB)); jiffies + msecs_to_jiffies(LPFC_POLL_HB));
...@@ -15638,33 +15661,6 @@ void lpfc_sli4_poll_hbtimer(struct timer_list *t) ...@@ -15638,33 +15661,6 @@ void lpfc_sli4_poll_hbtimer(struct timer_list *t)
rcu_read_unlock(); rcu_read_unlock();
} }
inline int lpfc_sli4_poll_eq(struct lpfc_queue *eq, uint8_t path)
{
struct lpfc_hba *phba = eq->phba;
int i = 0;
/*
* Unlocking an irq is one of the entry point to check
* for re-schedule, but we are good for io submission
* path as midlayer does a get_cpu to glue us in. Flush
* out the invalidate queue so we can see the updated
* value for flag.
*/
smp_rmb();
if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
/* We will not likely get the completion for the caller
* during this iteration but i guess that's fine.
* Future io's coming on this eq should be able to
* pick it up. As for the case of single io's, they
* will be handled through a sched from polling timer
* function which is currently triggered every 1msec.
*/
i = lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM);
return i;
}
static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq) static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
{ {
struct lpfc_hba *phba = eq->phba; struct lpfc_hba *phba = eq->phba;
...@@ -21276,7 +21272,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, ...@@ -21276,7 +21272,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags); spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); lpfc_sli4_poll_eq(qp->hba_eq);
return 0; return 0;
} }
...@@ -21298,7 +21294,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, ...@@ -21298,7 +21294,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags); spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); lpfc_sli4_poll_eq(qp->hba_eq);
return 0; return 0;
} }
...@@ -21328,7 +21324,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp, ...@@ -21328,7 +21324,7 @@ lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
lpfc_sli_ringtxcmpl_put(phba, pring, pwqe); lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
spin_unlock_irqrestore(&pring->ring_lock, iflags); spin_unlock_irqrestore(&pring->ring_lock, iflags);
lpfc_sli4_poll_eq(qp->hba_eq, LPFC_POLL_FASTPATH); lpfc_sli4_poll_eq(qp->hba_eq);
return 0; return 0;
} }
return WQE_ERROR; return WQE_ERROR;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment