Commit 61928bab authored by Suravee Suthikulpanit's avatar Suravee Suthikulpanit Committed by Joerg Roedel

iommu/amd: Define per-IOMMU iopf_queue

AMD IOMMU hardware supports PCI Peripheral Paging Request (PPR) using
a PPR log, which is a circular buffer containing requests from downstream
end-point devices.

There is one PPR log per IOMMU instance. Therefore, allocate an iopf_queue
per IOMMU instance during driver initialization, and free the queue during
driver deinitialization.

Also rename enable_iommus_v2() -> enable_iommus_ppr() to reflect its
usage. And add amd_iommu_gt_ppr_supported() check before enabling PPR
log.
Signed-off-by: default avatarSuravee Suthikulpanit <suravee.suthikulpanit@amd.com>
Co-developed-by: default avatarVasant Hegde <vasant.hegde@amd.com>
Signed-off-by: default avatarVasant Hegde <vasant.hegde@amd.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/20240418103400.6229-10-vasant.hegde@amd.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 25efbb05
...@@ -46,6 +46,10 @@ extern int amd_iommu_gpt_level; ...@@ -46,6 +46,10 @@ extern int amd_iommu_gpt_level;
bool amd_iommu_pasid_supported(void); bool amd_iommu_pasid_supported(void);
/* IOPF */
int amd_iommu_iopf_init(struct amd_iommu *iommu);
void amd_iommu_iopf_uninit(struct amd_iommu *iommu);
/* GCR3 setup */ /* GCR3 setup */
int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data, int amd_iommu_set_gcr3(struct iommu_dev_data *dev_data,
ioasid_t pasid, unsigned long gcr3); ioasid_t pasid, unsigned long gcr3);
......
...@@ -762,6 +762,10 @@ struct amd_iommu { ...@@ -762,6 +762,10 @@ struct amd_iommu {
/* DebugFS Info */ /* DebugFS Info */
struct dentry *debugfs; struct dentry *debugfs;
#endif #endif
/* IOPF support */
struct iopf_queue *iopf_queue;
unsigned char iopfq_name[32];
}; };
static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev) static inline struct amd_iommu *dev_to_amd_iommu(struct device *dev)
......
...@@ -1639,6 +1639,7 @@ static void __init free_iommu_one(struct amd_iommu *iommu) ...@@ -1639,6 +1639,7 @@ static void __init free_iommu_one(struct amd_iommu *iommu)
amd_iommu_free_ppr_log(iommu); amd_iommu_free_ppr_log(iommu);
free_ga_log(iommu); free_ga_log(iommu);
iommu_unmap_mmio_space(iommu); iommu_unmap_mmio_space(iommu);
amd_iommu_iopf_uninit(iommu);
} }
static void __init free_iommu_all(void) static void __init free_iommu_all(void)
...@@ -2108,6 +2109,16 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) ...@@ -2108,6 +2109,16 @@ static int __init iommu_init_pci(struct amd_iommu *iommu)
if (ret) if (ret)
return ret; return ret;
/*
* Allocate per IOMMU IOPF queue here so that in attach device path,
* PRI capable device can be added to IOPF queue
*/
if (amd_iommu_gt_ppr_supported()) {
ret = amd_iommu_iopf_init(iommu);
if (ret)
return ret;
}
iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL); iommu_device_register(&iommu->iommu, &amd_iommu_ops, NULL);
return pci_enable_device(iommu->dev); return pci_enable_device(iommu->dev);
...@@ -2793,10 +2804,13 @@ static void early_enable_iommus(void) ...@@ -2793,10 +2804,13 @@ static void early_enable_iommus(void)
} }
} }
static void enable_iommus_v2(void) static void enable_iommus_ppr(void)
{ {
struct amd_iommu *iommu; struct amd_iommu *iommu;
if (!amd_iommu_gt_ppr_supported())
return;
for_each_iommu(iommu) for_each_iommu(iommu)
amd_iommu_enable_ppr_log(iommu); amd_iommu_enable_ppr_log(iommu);
} }
...@@ -3134,7 +3148,7 @@ static int amd_iommu_enable_interrupts(void) ...@@ -3134,7 +3148,7 @@ static int amd_iommu_enable_interrupts(void)
* PPR and GA log interrupt for all IOMMUs. * PPR and GA log interrupt for all IOMMUs.
*/ */
enable_iommus_vapic(); enable_iommus_vapic();
enable_iommus_v2(); enable_iommus_ppr();
out: out:
return ret; return ret;
......
...@@ -108,3 +108,34 @@ void amd_iommu_poll_ppr_log(struct amd_iommu *iommu) ...@@ -108,3 +108,34 @@ void amd_iommu_poll_ppr_log(struct amd_iommu *iommu)
/* TODO: PPR Handler will be added when we add IOPF support */ /* TODO: PPR Handler will be added when we add IOPF support */
} }
} }
/**************************************************************
*
* IOPF handling stuff
*/
/* Setup per-IOMMU IOPF queue if not exist. */
int amd_iommu_iopf_init(struct amd_iommu *iommu)
{
int ret = 0;
if (iommu->iopf_queue)
return ret;
snprintf(iommu->iopfq_name, sizeof(iommu->iopfq_name),
"amdiommu-%#x-iopfq",
PCI_SEG_DEVID_TO_SBDF(iommu->pci_seg->id, iommu->devid));
iommu->iopf_queue = iopf_queue_alloc(iommu->iopfq_name);
if (!iommu->iopf_queue)
ret = -ENOMEM;
return ret;
}
/* Destroy per-IOMMU IOPF queue if no longer needed. */
void amd_iommu_iopf_uninit(struct amd_iommu *iommu)
{
iopf_queue_free(iommu->iopf_queue);
iommu->iopf_queue = NULL;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment