Commit bd111e98 authored by Robin Murphy's avatar Robin Murphy Committed by Joerg Roedel

iommu: Retire map/unmap ops

With everyone now implementing the new interfaces, clean up the last
remnants of the old map/unmap ops and simplify the calling logic again.
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/d2afdf13b2fbf537713c3ec642dfd49d16dd9e6a.1694525662.git.robin.murphy@arm.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 39f823df
......@@ -2509,30 +2509,6 @@ static size_t iommu_pgsize(struct iommu_domain *domain, unsigned long iova,
return pgsize;
}
static int __iommu_map_pages(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot,
gfp_t gfp, size_t *mapped)
{
const struct iommu_domain_ops *ops = domain->ops;
size_t pgsize, count;
int ret;
pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
iova, &paddr, pgsize, count);
if (ops->map_pages) {
ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
gfp, mapped);
} else {
ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
*mapped = ret ? 0 : pgsize;
}
return ret;
}
static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
{
......@@ -2543,8 +2519,7 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t orig_paddr = paddr;
int ret = 0;
if (unlikely(!(ops->map || ops->map_pages) ||
domain->pgsize_bitmap == 0UL))
if (unlikely(!ops->map_pages || domain->pgsize_bitmap == 0UL))
return -ENODEV;
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
......@@ -2567,10 +2542,14 @@ static int __iommu_map(struct iommu_domain *domain, unsigned long iova,
pr_debug("map: iova 0x%lx pa %pa size 0x%zx\n", iova, &paddr, size);
while (size) {
size_t mapped = 0;
size_t pgsize, count, mapped = 0;
pgsize = iommu_pgsize(domain, iova, paddr, size, &count);
ret = __iommu_map_pages(domain, iova, paddr, size, prot, gfp,
&mapped);
pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx count %zu\n",
iova, &paddr, pgsize, count);
ret = ops->map_pages(domain, iova, paddr, pgsize, count, prot,
gfp, &mapped);
/*
* Some pages may have been mapped, even if an error occurred,
* so we should account for those so they can be unmapped.
......@@ -2614,19 +2593,6 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova,
}
EXPORT_SYMBOL_GPL(iommu_map);
static size_t __iommu_unmap_pages(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather)
{
const struct iommu_domain_ops *ops = domain->ops;
size_t pgsize, count;
pgsize = iommu_pgsize(domain, iova, iova, size, &count);
return ops->unmap_pages ?
ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather) :
ops->unmap(domain, iova, pgsize, iotlb_gather);
}
static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size,
struct iommu_iotlb_gather *iotlb_gather)
......@@ -2636,8 +2602,7 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
unsigned long orig_iova = iova;
unsigned int min_pagesz;
if (unlikely(!(ops->unmap || ops->unmap_pages) ||
domain->pgsize_bitmap == 0UL))
if (unlikely(!ops->unmap_pages || domain->pgsize_bitmap == 0UL))
return 0;
if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
......@@ -2664,9 +2629,10 @@ static size_t __iommu_unmap(struct iommu_domain *domain,
* or we hit an area that isn't mapped.
*/
while (unmapped < size) {
unmapped_page = __iommu_unmap_pages(domain, iova,
size - unmapped,
iotlb_gather);
size_t pgsize, count;
pgsize = iommu_pgsize(domain, iova, iova, size - unmapped, &count);
unmapped_page = ops->unmap_pages(domain, iova, pgsize, count, iotlb_gather);
if (!unmapped_page)
break;
......
......@@ -322,10 +322,8 @@ struct iommu_ops {
* * ENODEV - device specific errors, not able to be attached
* * <others> - treated as ENODEV by the caller. Use is discouraged
* @set_dev_pasid: set an iommu domain to a pasid of device
* @map: map a physically contiguous memory region to an iommu domain
* @map_pages: map a physically contiguous set of pages of the same size to
* an iommu domain.
* @unmap: unmap a physically contiguous memory region from an iommu domain
* @unmap_pages: unmap a number of pages of the same size from an iommu domain
* @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain
* @iotlb_sync_map: Sync mappings created recently using @map to the hardware
......@@ -344,13 +342,9 @@ struct iommu_domain_ops {
int (*set_dev_pasid)(struct iommu_domain *domain, struct device *dev,
ioasid_t pasid);
int (*map)(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
int (*map_pages)(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped);
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
size_t size, struct iommu_iotlb_gather *iotlb_gather);
size_t (*unmap_pages)(struct iommu_domain *domain, unsigned long iova,
size_t pgsize, size_t pgcount,
struct iommu_iotlb_gather *iotlb_gather);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment