Commit d41a4adb authored by Jiang Liu's avatar Jiang Liu Committed by Joerg Roedel

iommu/vt-d: Simplify intel_unmap_sg() and kill duplicated code

Introduce intel_unmap() to reduce duplicated code in intel_unmap_sg()
and intel_unmap_page().

Also let dma_pte_free_pagetable() to call dma_pte_clear_range() directly,
so caller only needs to call dma_pte_free_pagetable().
Signed-off-by: default avatarJiang Liu <jiang.liu@linux.intel.com>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 2a41ccee
...@@ -984,6 +984,8 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, ...@@ -984,6 +984,8 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
BUG_ON(start_pfn > last_pfn); BUG_ON(start_pfn > last_pfn);
dma_pte_clear_range(domain, start_pfn, last_pfn);
/* We don't need lock here; nobody else touches the iova range */ /* We don't need lock here; nobody else touches the iova range */
dma_pte_free_level(domain, agaw_to_level(domain->agaw), dma_pte_free_level(domain, agaw_to_level(domain->agaw),
domain->pgd, 0, start_pfn, last_pfn); domain->pgd, 0, start_pfn, last_pfn);
...@@ -2011,12 +2013,14 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, ...@@ -2011,12 +2013,14 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
/* It is large page*/ /* It is large page*/
if (largepage_lvl > 1) { if (largepage_lvl > 1) {
pteval |= DMA_PTE_LARGE_PAGE; pteval |= DMA_PTE_LARGE_PAGE;
/* Ensure that old small page tables are removed to make room lvl_pages = lvl_to_nr_pages(largepage_lvl);
for superpage, if they exist. */ /*
dma_pte_clear_range(domain, iov_pfn, * Ensure that old small page tables are
iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1); * removed to make room for superpage,
* if they exist.
*/
dma_pte_free_pagetable(domain, iov_pfn, dma_pte_free_pagetable(domain, iov_pfn,
iov_pfn + lvl_to_nr_pages(largepage_lvl) - 1); iov_pfn + lvl_pages - 1);
} else { } else {
pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE; pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
} }
...@@ -3148,9 +3152,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *f ...@@ -3148,9 +3152,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova, struct page *f
spin_unlock_irqrestore(&async_umap_flush_lock, flags); spin_unlock_irqrestore(&async_umap_flush_lock, flags);
} }
static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, static void intel_unmap(struct device *dev, dma_addr_t dev_addr)
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{ {
struct dmar_domain *domain; struct dmar_domain *domain;
unsigned long start_pfn, last_pfn; unsigned long start_pfn, last_pfn;
...@@ -3194,6 +3196,13 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, ...@@ -3194,6 +3196,13 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
} }
} }
static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
intel_unmap(dev, dev_addr);
}
static void *intel_alloc_coherent(struct device *dev, size_t size, static void *intel_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags, dma_addr_t *dma_handle, gfp_t flags,
struct dma_attrs *attrs) struct dma_attrs *attrs)
...@@ -3250,7 +3259,7 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr, ...@@ -3250,7 +3259,7 @@ static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
order = get_order(size); order = get_order(size);
intel_unmap_page(dev, dma_handle, size, DMA_BIDIRECTIONAL, NULL); intel_unmap(dev, dma_handle);
if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT)) if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
__free_pages(page, order); __free_pages(page, order);
} }
...@@ -3259,43 +3268,7 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist, ...@@ -3259,43 +3268,7 @@ static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nelems, enum dma_data_direction dir, int nelems, enum dma_data_direction dir,
struct dma_attrs *attrs) struct dma_attrs *attrs)
{ {
struct dmar_domain *domain; intel_unmap(dev, sglist[0].dma_address);
unsigned long start_pfn, last_pfn;
struct iova *iova;
struct intel_iommu *iommu;
struct page *freelist;
if (iommu_no_mapping(dev))
return;
domain = find_domain(dev);
BUG_ON(!domain);
iommu = domain_get_iommu(domain);
iova = find_iova(&domain->iovad, IOVA_PFN(sglist[0].dma_address));
if (WARN_ONCE(!iova, "Driver unmaps unmatched sglist at PFN %llx\n",
(unsigned long long)sglist[0].dma_address))
return;
start_pfn = mm_to_dma_pfn(iova->pfn_lo);
last_pfn = mm_to_dma_pfn(iova->pfn_hi + 1) - 1;
freelist = domain_unmap(domain, start_pfn, last_pfn);
if (intel_iommu_strict) {
iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
last_pfn - start_pfn + 1, !freelist, 0);
/* free iova */
__free_iova(&domain->iovad, iova);
dma_free_pagelist(freelist);
} else {
add_unmap(domain, iova, freelist);
/*
* queue up the release of the unmap to save the 1/6th of the
* cpu used up by the iotlb flush operation...
*/
}
} }
static int intel_nontranslate_map_sg(struct device *hddev, static int intel_nontranslate_map_sg(struct device *hddev,
...@@ -3359,13 +3332,8 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele ...@@ -3359,13 +3332,8 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot); ret = domain_sg_mapping(domain, start_vpfn, sglist, size, prot);
if (unlikely(ret)) { if (unlikely(ret)) {
/* clear the page */
dma_pte_clear_range(domain, start_vpfn,
start_vpfn + size - 1);
/* free page tables */
dma_pte_free_pagetable(domain, start_vpfn, dma_pte_free_pagetable(domain, start_vpfn,
start_vpfn + size - 1); start_vpfn + size - 1);
/* free iova */
__free_iova(&domain->iovad, iova); __free_iova(&domain->iovad, iova);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment