Commit 80af5a45 authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Joerg Roedel

iommu: Add ops->domain_alloc_sva()

Make a new op that receives the device and the mm_struct that the SVA
domain should be created for. Unlike domain_alloc_paging() the dev
argument is never NULL here.

This allows drivers to fully initialize the SVA domain and allocate the
mmu_notifier during allocation. It allows the notifier lifetime to follow
the lifetime of the iommu_domain.

Since we have only one call site, upgrade the new op to return ERR_PTR
instead of NULL.
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
[Removed smmu3 related changes - Vasant]
Signed-off-by: default avatarVasant Hegde <vasant.hegde@amd.com>
Reviewed-by: default avatarTina Zhang <tina.zhang@intel.com>
Link: https://lore.kernel.org/r/20240418103400.6229-15-vasant.hegde@amd.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 1af95763
...@@ -108,8 +108,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm ...@@ -108,8 +108,8 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
/* Allocate a new domain and set it on device pasid. */ /* Allocate a new domain and set it on device pasid. */
domain = iommu_sva_domain_alloc(dev, mm); domain = iommu_sva_domain_alloc(dev, mm);
if (!domain) { if (IS_ERR(domain)) {
ret = -ENOMEM; ret = PTR_ERR(domain);
goto out_free_handle; goto out_free_handle;
} }
...@@ -283,9 +283,15 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev, ...@@ -283,9 +283,15 @@ struct iommu_domain *iommu_sva_domain_alloc(struct device *dev,
const struct iommu_ops *ops = dev_iommu_ops(dev); const struct iommu_ops *ops = dev_iommu_ops(dev);
struct iommu_domain *domain; struct iommu_domain *domain;
domain = ops->domain_alloc(IOMMU_DOMAIN_SVA); if (ops->domain_alloc_sva) {
if (!domain) domain = ops->domain_alloc_sva(dev, mm);
return NULL; if (IS_ERR(domain))
return domain;
} else {
domain = ops->domain_alloc(IOMMU_DOMAIN_SVA);
if (!domain)
return ERR_PTR(-ENOMEM);
}
domain->type = IOMMU_DOMAIN_SVA; domain->type = IOMMU_DOMAIN_SVA;
mmgrab(mm); mmgrab(mm);
......
...@@ -518,6 +518,7 @@ static inline int __iommu_copy_struct_from_user_array( ...@@ -518,6 +518,7 @@ static inline int __iommu_copy_struct_from_user_array(
* Upon failure, ERR_PTR must be returned. * Upon failure, ERR_PTR must be returned.
* @domain_alloc_paging: Allocate an iommu_domain that can be used for * @domain_alloc_paging: Allocate an iommu_domain that can be used for
* UNMANAGED, DMA, and DMA_FQ domain types. * UNMANAGED, DMA, and DMA_FQ domain types.
* @domain_alloc_sva: Allocate an iommu_domain for Shared Virtual Addressing.
* @probe_device: Add device to iommu driver handling * @probe_device: Add device to iommu driver handling
* @release_device: Remove device from iommu driver handling * @release_device: Remove device from iommu driver handling
* @probe_finalize: Do final setup work after the device is added to an IOMMU * @probe_finalize: Do final setup work after the device is added to an IOMMU
...@@ -558,6 +559,8 @@ struct iommu_ops { ...@@ -558,6 +559,8 @@ struct iommu_ops {
struct device *dev, u32 flags, struct iommu_domain *parent, struct device *dev, u32 flags, struct iommu_domain *parent,
const struct iommu_user_data *user_data); const struct iommu_user_data *user_data);
struct iommu_domain *(*domain_alloc_paging)(struct device *dev); struct iommu_domain *(*domain_alloc_paging)(struct device *dev);
struct iommu_domain *(*domain_alloc_sva)(struct device *dev,
struct mm_struct *mm);
struct iommu_device *(*probe_device)(struct device *dev); struct iommu_device *(*probe_device)(struct device *dev);
void (*release_device)(struct device *dev); void (*release_device)(struct device *dev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment