Commit a4c21acc authored by James Smart's avatar James Smart Committed by Martin K. Petersen

scsi: lpfc: Fix hdwq sgl locks and irq handling

Many of the sgl-per-hdwq paths are locking with spin_lock_irq() and
spin_unlock_irq() and may unwittingly raising irq when it shouldn't. Hard
deadlocks were seen around lpfc_scsi_prep_cmnd().

Fix by converting the locks to irqsave/irqrestore.

Fixes: d79c9e9d ("scsi: lpfc: Support dynamic unbounded SGL lists on G7 hardware.")
Link: https://lore.kernel.org/r/20190922035906.10977-16-jsmart2021@gmail.comSigned-off-by: default avatarDick Kennedy <dick.kennedy@broadcom.com>
Signed-off-by: default avatarJames Smart <jsmart2021@gmail.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent d38b4a52
...@@ -20444,8 +20444,9 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) ...@@ -20444,8 +20444,9 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
struct sli4_hybrid_sgl *allocated_sgl = NULL; struct sli4_hybrid_sgl *allocated_sgl = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->sgl_list; struct list_head *buf_list = &hdwq->sgl_list;
unsigned long iflags;
spin_lock_irq(&hdwq->hdwq_lock); spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(buf_list))) { if (likely(!list_empty(buf_list))) {
/* break off 1 chunk from the sgl_list */ /* break off 1 chunk from the sgl_list */
...@@ -20457,7 +20458,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) ...@@ -20457,7 +20458,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
} }
} else { } else {
/* allocate more */ /* allocate more */
spin_unlock_irq(&hdwq->hdwq_lock); spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
cpu_to_node(smp_processor_id())); cpu_to_node(smp_processor_id()));
if (!tmp) { if (!tmp) {
...@@ -20479,7 +20480,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) ...@@ -20479,7 +20480,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
return NULL; return NULL;
} }
spin_lock_irq(&hdwq->hdwq_lock); spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list); list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
} }
...@@ -20487,7 +20488,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) ...@@ -20487,7 +20488,7 @@ lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
struct sli4_hybrid_sgl, struct sli4_hybrid_sgl,
list_node); list_node);
spin_unlock_irq(&hdwq->hdwq_lock); spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return allocated_sgl; return allocated_sgl;
} }
...@@ -20511,8 +20512,9 @@ lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) ...@@ -20511,8 +20512,9 @@ lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
struct sli4_hybrid_sgl *tmp = NULL; struct sli4_hybrid_sgl *tmp = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->sgl_list; struct list_head *buf_list = &hdwq->sgl_list;
unsigned long iflags;
spin_lock_irq(&hdwq->hdwq_lock); spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) { if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
list_for_each_entry_safe(list_entry, tmp, list_for_each_entry_safe(list_entry, tmp,
...@@ -20525,7 +20527,7 @@ lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf) ...@@ -20525,7 +20527,7 @@ lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
rc = -EINVAL; rc = -EINVAL;
} }
spin_unlock_irq(&hdwq->hdwq_lock); spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return rc; return rc;
} }
...@@ -20546,8 +20548,9 @@ lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba, ...@@ -20546,8 +20548,9 @@ lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
struct list_head *buf_list = &hdwq->sgl_list; struct list_head *buf_list = &hdwq->sgl_list;
struct sli4_hybrid_sgl *list_entry = NULL; struct sli4_hybrid_sgl *list_entry = NULL;
struct sli4_hybrid_sgl *tmp = NULL; struct sli4_hybrid_sgl *tmp = NULL;
unsigned long iflags;
spin_lock_irq(&hdwq->hdwq_lock); spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
/* Free sgl pool */ /* Free sgl pool */
list_for_each_entry_safe(list_entry, tmp, list_for_each_entry_safe(list_entry, tmp,
...@@ -20559,7 +20562,7 @@ lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba, ...@@ -20559,7 +20562,7 @@ lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
kfree(list_entry); kfree(list_entry);
} }
spin_unlock_irq(&hdwq->hdwq_lock); spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
} }
/** /**
...@@ -20583,8 +20586,9 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, ...@@ -20583,8 +20586,9 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct fcp_cmd_rsp_buf *allocated_buf = NULL; struct fcp_cmd_rsp_buf *allocated_buf = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
unsigned long iflags;
spin_lock_irq(&hdwq->hdwq_lock); spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(buf_list))) { if (likely(!list_empty(buf_list))) {
/* break off 1 chunk from the list */ /* break off 1 chunk from the list */
...@@ -20597,7 +20601,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, ...@@ -20597,7 +20601,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
} }
} else { } else {
/* allocate more */ /* allocate more */
spin_unlock_irq(&hdwq->hdwq_lock); spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC, tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
cpu_to_node(smp_processor_id())); cpu_to_node(smp_processor_id()));
if (!tmp) { if (!tmp) {
...@@ -20624,7 +20628,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, ...@@ -20624,7 +20628,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd + tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
sizeof(struct fcp_cmnd)); sizeof(struct fcp_cmnd));
spin_lock_irq(&hdwq->hdwq_lock); spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list); list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
} }
...@@ -20632,7 +20636,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, ...@@ -20632,7 +20636,7 @@ lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct fcp_cmd_rsp_buf, struct fcp_cmd_rsp_buf,
list_node); list_node);
spin_unlock_irq(&hdwq->hdwq_lock); spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return allocated_buf; return allocated_buf;
} }
...@@ -20657,8 +20661,9 @@ lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, ...@@ -20657,8 +20661,9 @@ lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct fcp_cmd_rsp_buf *tmp = NULL; struct fcp_cmd_rsp_buf *tmp = NULL;
struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq; struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
unsigned long iflags;
spin_lock_irq(&hdwq->hdwq_lock); spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) { if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
list_for_each_entry_safe(list_entry, tmp, list_for_each_entry_safe(list_entry, tmp,
...@@ -20671,7 +20676,7 @@ lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, ...@@ -20671,7 +20676,7 @@ lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
rc = -EINVAL; rc = -EINVAL;
} }
spin_unlock_irq(&hdwq->hdwq_lock); spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
return rc; return rc;
} }
...@@ -20692,8 +20697,9 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, ...@@ -20692,8 +20697,9 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
struct list_head *buf_list = &hdwq->cmd_rsp_buf_list; struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
struct fcp_cmd_rsp_buf *list_entry = NULL; struct fcp_cmd_rsp_buf *list_entry = NULL;
struct fcp_cmd_rsp_buf *tmp = NULL; struct fcp_cmd_rsp_buf *tmp = NULL;
unsigned long iflags;
spin_lock_irq(&hdwq->hdwq_lock); spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
/* Free cmd_rsp buf pool */ /* Free cmd_rsp buf pool */
list_for_each_entry_safe(list_entry, tmp, list_for_each_entry_safe(list_entry, tmp,
...@@ -20706,5 +20712,5 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba, ...@@ -20706,5 +20712,5 @@ lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
kfree(list_entry); kfree(list_entry);
} }
spin_unlock_irq(&hdwq->hdwq_lock); spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment