Commit 1e9d90db authored by Nicolin Chen's avatar Nicolin Chen Committed by Christoph Hellwig

dma-mapping: introduce dma_get_seg_boundary_nr_pages()

We found that callers of dma_get_seg_boundary mostly do an ALIGN
with page mask and then do a page shift to get number of pages:
    ALIGN(boundary + 1, 1 << shift) >> shift

However, the boundary might be as large as ULONG_MAX, which means
that a device has no specific boundary limit. So either "+ 1" or
passing it to ALIGN() would potentially overflow.

According to kernel defines:
    #define ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
    #define ALIGN(x, a)	ALIGN_MASK(x, (typeof(x))(a) - 1)

We can simplify the logic here into a helper function doing:
  ALIGN(boundary + 1, 1 << shift) >> shift
= ALIGN_MASK(b + 1, (1 << s) - 1) >> s
= {[b + 1 + (1 << s) - 1] & ~[(1 << s) - 1]} >> s
= [b + 1 + (1 << s) - 1] >> s
= [b + (1 << s)] >> s
= (b >> s) + 1

This patch introduces and applies dma_get_seg_boundary_nr_pages()
as an overflow-free helper for the dma_get_seg_boundary() callers
to get numbers of pages. It also takes care of the NULL dev case
for non-DMA API callers.
Suggested-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarNicolin Chen <nicoleotsuka@gmail.com>
Acked-by: default avatarNiklas Schnelle <schnelle@linux.ibm.com>
Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc)
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 2281f797
...@@ -141,12 +141,7 @@ iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena, ...@@ -141,12 +141,7 @@ iommu_arena_find_pages(struct device *dev, struct pci_iommu_arena *arena,
unsigned long boundary_size; unsigned long boundary_size;
base = arena->dma_base >> PAGE_SHIFT; base = arena->dma_base >> PAGE_SHIFT;
if (dev) { boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT);
boundary_size = dma_get_seg_boundary(dev) + 1;
boundary_size >>= PAGE_SHIFT;
} else {
boundary_size = 1UL << (32 - PAGE_SHIFT);
}
/* Search forward for the first mask-aligned sequence of N free ptes */ /* Search forward for the first mask-aligned sequence of N free ptes */
ptes = arena->ptes; ptes = arena->ptes;
......
...@@ -485,8 +485,7 @@ sba_search_bitmap(struct ioc *ioc, struct device *dev, ...@@ -485,8 +485,7 @@ sba_search_bitmap(struct ioc *ioc, struct device *dev,
ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0); ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
ASSERT(res_ptr < res_end); ASSERT(res_ptr < res_end);
boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1; boundary_size = dma_get_seg_boundary_nr_pages(dev, iovp_shift);
boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;
BUG_ON(ioc->ibase & ~iovp_mask); BUG_ON(ioc->ibase & ~iovp_mask);
shift = ioc->ibase >> iovp_shift; shift = ioc->ibase >> iovp_shift;
......
...@@ -172,7 +172,6 @@ static unsigned long iommu_range_alloc(struct device *dev, ...@@ -172,7 +172,6 @@ static unsigned long iommu_range_alloc(struct device *dev,
int largealloc = npages > 15; int largealloc = npages > 15;
int pass = 0; int pass = 0;
unsigned long align_mask; unsigned long align_mask;
unsigned long boundary_size;
unsigned long flags; unsigned long flags;
unsigned int pool_nr; unsigned int pool_nr;
struct iommu_pool *pool; struct iommu_pool *pool;
...@@ -236,15 +235,9 @@ static unsigned long iommu_range_alloc(struct device *dev, ...@@ -236,15 +235,9 @@ static unsigned long iommu_range_alloc(struct device *dev,
} }
} }
if (dev)
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
1 << tbl->it_page_shift);
else
boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
boundary_size >> tbl->it_page_shift, align_mask); dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift),
align_mask);
if (n == -1) { if (n == -1) {
if (likely(pass == 0)) { if (likely(pass == 0)) {
/* First try the pool from the start */ /* First try the pool from the start */
......
...@@ -261,13 +261,11 @@ static unsigned long __dma_alloc_iommu(struct device *dev, ...@@ -261,13 +261,11 @@ static unsigned long __dma_alloc_iommu(struct device *dev,
unsigned long start, int size) unsigned long start, int size)
{ {
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
unsigned long boundary_size;
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
PAGE_SIZE) >> PAGE_SHIFT;
return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages, return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
start, size, zdev->start_dma >> PAGE_SHIFT, start, size, zdev->start_dma >> PAGE_SHIFT,
boundary_size, 0); dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT),
0);
} }
static dma_addr_t dma_alloc_address(struct device *dev, int size) static dma_addr_t dma_alloc_address(struct device *dev, int size)
......
...@@ -166,13 +166,6 @@ unsigned long iommu_tbl_range_alloc(struct device *dev, ...@@ -166,13 +166,6 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
} }
} }
if (dev)
boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
1 << iommu->table_shift);
else
boundary_size = ALIGN(1ULL << 32, 1 << iommu->table_shift);
boundary_size = boundary_size >> iommu->table_shift;
/* /*
* if the skip_span_boundary_check had been set during init, we set * if the skip_span_boundary_check had been set during init, we set
* things up so that iommu_is_span_boundary() merely checks if the * things up so that iommu_is_span_boundary() merely checks if the
...@@ -181,6 +174,9 @@ unsigned long iommu_tbl_range_alloc(struct device *dev, ...@@ -181,6 +174,9 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) { if ((iommu->flags & IOMMU_NO_SPAN_BOUND) != 0) {
shift = 0; shift = 0;
boundary_size = iommu->poolsize * iommu->nr_pools; boundary_size = iommu->poolsize * iommu->nr_pools;
} else {
boundary_size = dma_get_seg_boundary_nr_pages(dev,
iommu->table_shift);
} }
n = iommu_area_alloc(iommu->map, limit, start, npages, shift, n = iommu_area_alloc(iommu->map, limit, start, npages, shift,
boundary_size, align_mask); boundary_size, align_mask);
......
...@@ -472,8 +472,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -472,8 +472,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
outs->dma_length = 0; outs->dma_length = 0;
max_seg_size = dma_get_max_seg_size(dev); max_seg_size = dma_get_max_seg_size(dev);
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT; base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
for_each_sg(sglist, s, nelems, i) { for_each_sg(sglist, s, nelems, i) {
unsigned long paddr, npages, entry, out_entry = 0, slen; unsigned long paddr, npages, entry, out_entry = 0, slen;
......
...@@ -508,8 +508,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -508,8 +508,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
iommu_batch_start(dev, prot, ~0UL); iommu_batch_start(dev, prot, ~0UL);
max_seg_size = dma_get_max_seg_size(dev); max_seg_size = dma_get_max_seg_size(dev);
seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1, seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
mask = *dev->dma_mask; mask = *dev->dma_mask;
if (!iommu_use_atu(iommu, mask)) if (!iommu_use_atu(iommu, mask))
......
...@@ -96,8 +96,7 @@ static unsigned long alloc_iommu(struct device *dev, int size, ...@@ -96,8 +96,7 @@ static unsigned long alloc_iommu(struct device *dev, int size,
base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev), base_index = ALIGN(iommu_bus_base & dma_get_seg_boundary(dev),
PAGE_SIZE) >> PAGE_SHIFT; PAGE_SIZE) >> PAGE_SHIFT;
boundary_size = ALIGN((u64)dma_get_seg_boundary(dev) + 1, boundary_size = dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT);
PAGE_SIZE) >> PAGE_SHIFT;
spin_lock_irqsave(&iommu_bitmap_lock, flags); spin_lock_irqsave(&iommu_bitmap_lock, flags);
offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit, offset = iommu_area_alloc(iommu_gart_bitmap, iommu_pages, next_bit,
......
...@@ -356,8 +356,7 @@ ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size) ...@@ -356,8 +356,7 @@ ccio_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
** ggg sacrifices another 710 to the computer gods. ** ggg sacrifices another 710 to the computer gods.
*/ */
boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1, boundary_size = dma_get_seg_boundary_nr_pages(dev, IOVP_SHIFT);
1ULL << IOVP_SHIFT) >> IOVP_SHIFT;
if (pages_needed <= 8) { if (pages_needed <= 8) {
/* /*
......
...@@ -342,8 +342,7 @@ sba_search_bitmap(struct ioc *ioc, struct device *dev, ...@@ -342,8 +342,7 @@ sba_search_bitmap(struct ioc *ioc, struct device *dev,
unsigned long shift; unsigned long shift;
int ret; int ret;
boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1, boundary_size = dma_get_seg_boundary_nr_pages(dev, IOVP_SHIFT);
1ULL << IOVP_SHIFT) >> IOVP_SHIFT;
#if defined(ZX1_SUPPORT) #if defined(ZX1_SUPPORT)
BUG_ON(ioc->ibase & ~IOVP_MASK); BUG_ON(ioc->ibase & ~IOVP_MASK);
......
...@@ -632,6 +632,25 @@ static inline unsigned long dma_get_seg_boundary(struct device *dev) ...@@ -632,6 +632,25 @@ static inline unsigned long dma_get_seg_boundary(struct device *dev)
return DMA_BIT_MASK(32); return DMA_BIT_MASK(32);
} }
/**
* dma_get_seg_boundary_nr_pages - return the segment boundary in "page" units
* @dev: device to guery the boundary for
* @page_shift: ilog() of the IOMMU page size
*
* Return the segment boundary in IOMMU page units (which may be different from
* the CPU page size) for the passed in device.
*
* If @dev is NULL a boundary of U32_MAX is assumed, this case is just for
* non-DMA API callers.
*/
static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev,
unsigned int page_shift)
{
if (!dev)
return (U32_MAX >> page_shift) + 1;
return (dma_get_seg_boundary(dev) >> page_shift) + 1;
}
static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
{ {
if (dev->dma_parms) { if (dev->dma_parms) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment