Commit f23f5bec authored by Keith Busch's avatar Keith Busch Committed by Jens Axboe

blk-mq: Allow PCI vector offset for mapping queues

The PCI interrupt vectors intended to be associated with a queue may
not start at 0; a driver may allocate pre_vectors for special use. This
patch adds an offset parameter so blk-mq may find the intended affinity
mask and updates all drivers using this API accordingly.

Cc: Don Brace <don.brace@microsemi.com>
Cc: <qla2xxx-upstream@qlogic.com>
Cc: <linux-scsi@vger.kernel.org>
Signed-off-by: default avatarKeith Busch <keith.busch@intel.com>
Reviewed-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3148ffbd
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
* blk_mq_pci_map_queues - provide a default queue mapping for PCI device * blk_mq_pci_map_queues - provide a default queue mapping for PCI device
* @set: tagset to provide the mapping for * @set: tagset to provide the mapping for
* @pdev: PCI device associated with @set. * @pdev: PCI device associated with @set.
* @offset: Offset to use for the pci irq vector
* *
* This function assumes the PCI device @pdev has at least as many available * This function assumes the PCI device @pdev has at least as many available
* interrupt vectors as @set has queues. It will then query the vector * interrupt vectors as @set has queues. It will then query the vector
...@@ -28,13 +29,14 @@ ...@@ -28,13 +29,14 @@
* that maps a queue to the CPUs that have irq affinity for the corresponding * that maps a queue to the CPUs that have irq affinity for the corresponding
* vector. * vector.
*/ */
int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev) int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
int offset)
{ {
const struct cpumask *mask; const struct cpumask *mask;
unsigned int queue, cpu; unsigned int queue, cpu;
for (queue = 0; queue < set->nr_hw_queues; queue++) { for (queue = 0; queue < set->nr_hw_queues; queue++) {
mask = pci_irq_get_affinity(pdev, queue); mask = pci_irq_get_affinity(pdev, queue + offset);
if (!mask) if (!mask)
goto fallback; goto fallback;
......
...@@ -414,7 +414,7 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set) ...@@ -414,7 +414,7 @@ static int nvme_pci_map_queues(struct blk_mq_tag_set *set)
{ {
struct nvme_dev *dev = set->driver_data; struct nvme_dev *dev = set->driver_data;
return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev)); return blk_mq_pci_map_queues(set, to_pci_dev(dev->dev), 0);
} }
/** /**
......
...@@ -6805,7 +6805,7 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost) ...@@ -6805,7 +6805,7 @@ static int qla2xxx_map_queues(struct Scsi_Host *shost)
if (USER_CTRL_IRQ(vha->hw)) if (USER_CTRL_IRQ(vha->hw))
rc = blk_mq_map_queues(&shost->tag_set); rc = blk_mq_map_queues(&shost->tag_set);
else else
rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev); rc = blk_mq_pci_map_queues(&shost->tag_set, vha->hw->pdev, 0);
return rc; return rc;
} }
......
...@@ -5348,7 +5348,7 @@ static int pqi_map_queues(struct Scsi_Host *shost) ...@@ -5348,7 +5348,7 @@ static int pqi_map_queues(struct Scsi_Host *shost)
{ {
struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost); struct pqi_ctrl_info *ctrl_info = shost_to_hba(shost);
return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev); return blk_mq_pci_map_queues(&shost->tag_set, ctrl_info->pci_dev, 0);
} }
static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info, static int pqi_getpciinfo_ioctl(struct pqi_ctrl_info *ctrl_info,
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
struct blk_mq_tag_set; struct blk_mq_tag_set;
struct pci_dev; struct pci_dev;
int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev); int blk_mq_pci_map_queues(struct blk_mq_tag_set *set, struct pci_dev *pdev,
int offset);
#endif /* _LINUX_BLK_MQ_PCI_H */ #endif /* _LINUX_BLK_MQ_PCI_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment