Commit 17f61abb authored by Sakari Ailus's avatar Sakari Ailus Committed by Mauro Carvalho Chehab

media: staging: imgu: Drop support for other page sizes

The hardware only supports 4ki pages; drop support for other sizes.
Signed-off-by: default avatarSakari Ailus <sakari.ailus@linux.intel.com>
Tested-by: default avatarRajmohan Mani <rajmohan.mani@intel.com>
Signed-off-by: default avatarMauro Carvalho Chehab <mchehab+samsung@kernel.org>
parent 3efcbe3e
......@@ -31,12 +31,11 @@ static void imgu_dmamap_free_buffer(struct page **pages,
* Based on the implementation of __iommu_dma_alloc_pages()
* defined in drivers/iommu/dma-iommu.c
*/
static struct page **imgu_dmamap_alloc_buffer(size_t size,
unsigned long order_mask,
gfp_t gfp)
static struct page **imgu_dmamap_alloc_buffer(size_t size, gfp_t gfp)
{
struct page **pages;
unsigned int i = 0, count = size >> PAGE_SHIFT;
unsigned int order_mask = 1;
const gfp_t high_order_gfp = __GFP_NOWARN | __GFP_NORETRY;
/* Allocate mem for array of page ptrs */
......@@ -45,10 +44,6 @@ static struct page **imgu_dmamap_alloc_buffer(size_t size,
if (!pages)
return NULL;
order_mask &= (2U << MAX_ORDER) - 1;
if (!order_mask)
return NULL;
gfp |= __GFP_HIGHMEM | __GFP_ZERO;
while (count) {
......@@ -99,7 +94,6 @@ void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
size_t len)
{
unsigned long shift = iova_shift(&imgu->iova_domain);
unsigned int alloc_sizes = imgu->mmu->pgsize_bitmap;
struct device *dev = &imgu->pci_dev->dev;
size_t size = PAGE_ALIGN(len);
struct page **pages;
......@@ -114,8 +108,7 @@ void *imgu_dmamap_alloc(struct imgu_device *imgu, struct imgu_css_map *map,
if (!iova)
return NULL;
pages = imgu_dmamap_alloc_buffer(size, alloc_sizes >> PAGE_SHIFT,
GFP_KERNEL);
pages = imgu_dmamap_alloc_buffer(size, GFP_KERNEL);
if (!pages)
goto out_free_iova;
......@@ -257,7 +250,7 @@ int imgu_dmamap_init(struct imgu_device *imgu)
if (ret)
return ret;
order = __ffs(imgu->mmu->pgsize_bitmap);
order = __ffs(IPU3_PAGE_SIZE);
base_pfn = max_t(unsigned long, 1, imgu->mmu->aperture_start >> order);
init_iova_domain(&imgu->iova_domain, 1UL << order, base_pfn);
......
......@@ -20,9 +20,6 @@
#include "ipu3-mmu.h"
#define IPU3_PAGE_SHIFT 12
#define IPU3_PAGE_SIZE (1UL << IPU3_PAGE_SHIFT)
#define IPU3_PT_BITS 10
#define IPU3_PT_PTES (1UL << IPU3_PT_BITS)
#define IPU3_PT_SIZE (IPU3_PT_PTES << 2)
......@@ -238,43 +235,6 @@ static int __imgu_mmu_map(struct imgu_mmu *mmu, unsigned long iova,
return 0;
}
/*
* The following four functions are implemented based on iommu.c
* drivers/iommu/iommu.c/iommu_pgsize().
*/
static size_t imgu_mmu_pgsize(unsigned long pgsize_bitmap,
unsigned long addr_merge, size_t size)
{
unsigned int pgsize_idx;
size_t pgsize;
/* Max page size that still fits into 'size' */
pgsize_idx = __fls(size);
/* need to consider alignment requirements ? */
if (likely(addr_merge)) {
/* Max page size allowed by address */
unsigned int align_pgsize_idx = __ffs(addr_merge);
pgsize_idx = min(pgsize_idx, align_pgsize_idx);
}
/* build a mask of acceptable page sizes */
pgsize = (1UL << (pgsize_idx + 1)) - 1;
/* throw away page sizes not supported by the hardware */
pgsize &= pgsize_bitmap;
/* make sure we're still sane */
WARN_ON(!pgsize);
/* pick the biggest page */
pgsize_idx = __fls(pgsize);
pgsize = 1UL << pgsize_idx;
return pgsize;
}
/**
* imgu_mmu_map - map a buffer to a physical address
*
......@@ -290,20 +250,16 @@ int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
phys_addr_t paddr, size_t size)
{
struct imgu_mmu *mmu = to_imgu_mmu(info);
unsigned int min_pagesz;
int ret = 0;
/* find out the minimum page size supported */
min_pagesz = 1 << __ffs(mmu->geometry.pgsize_bitmap);
/*
* both the virtual address and the physical one, as well as
* the size of the mapping, must be aligned (at least) to the
* size of the smallest page supported by the hardware
*/
if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
dev_err(mmu->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
iova, &paddr, size, min_pagesz);
if (!IS_ALIGNED(iova | paddr | size, IPU3_PAGE_SIZE)) {
dev_err(mmu->dev, "unaligned: iova 0x%lx pa %pa size 0x%zx\n",
iova, &paddr, size);
return -EINVAL;
}
......@@ -311,19 +267,15 @@ int imgu_mmu_map(struct imgu_mmu_info *info, unsigned long iova,
iova, &paddr, size);
while (size) {
size_t pgsize = imgu_mmu_pgsize(mmu->geometry.pgsize_bitmap,
iova | paddr, size);
dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa pgsize 0x%zx\n",
iova, &paddr, pgsize);
dev_dbg(mmu->dev, "mapping: iova 0x%lx pa %pa\n", iova, &paddr);
ret = __imgu_mmu_map(mmu, iova, paddr);
if (ret)
break;
iova += pgsize;
paddr += pgsize;
size -= pgsize;
iova += IPU3_PAGE_SIZE;
paddr += IPU3_PAGE_SIZE;
size -= IPU3_PAGE_SIZE;
}
call_if_imgu_is_powered(mmu, imgu_mmu_tlb_invalidate);
......@@ -348,21 +300,19 @@ size_t imgu_mmu_map_sg(struct imgu_mmu_info *info, unsigned long iova,
struct imgu_mmu *mmu = to_imgu_mmu(info);
struct scatterlist *s;
size_t s_length, mapped = 0;
unsigned int i, min_pagesz;
unsigned int i;
int ret;
min_pagesz = 1 << __ffs(mmu->geometry.pgsize_bitmap);
for_each_sg(sg, s, nents, i) {
phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
s_length = s->length;
if (!IS_ALIGNED(s->offset, min_pagesz))
if (!IS_ALIGNED(s->offset, IPU3_PAGE_SIZE))
goto out_err;
/* must be min_pagesz aligned to be mapped singlely */
if (i == nents - 1 && !IS_ALIGNED(s->length, min_pagesz))
/* must be IPU3_PAGE_SIZE aligned to be mapped singlely */
if (i == nents - 1 && !IS_ALIGNED(s->length, IPU3_PAGE_SIZE))
s_length = PAGE_ALIGN(s->length);
ret = imgu_mmu_map(info, iova + mapped, phys, s_length);
......@@ -429,19 +379,15 @@ size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
{
struct imgu_mmu *mmu = to_imgu_mmu(info);
size_t unmapped_page, unmapped = 0;
unsigned int min_pagesz;
/* find out the minimum page size supported */
min_pagesz = 1 << __ffs(mmu->geometry.pgsize_bitmap);
/*
* The virtual address, as well as the size of the mapping, must be
* aligned (at least) to the size of the smallest page supported
* by the hardware
*/
if (!IS_ALIGNED(iova | size, min_pagesz)) {
dev_err(mmu->dev, "unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
iova, size, min_pagesz);
if (!IS_ALIGNED(iova | size, IPU3_PAGE_SIZE)) {
dev_err(mmu->dev, "unaligned: iova 0x%lx size 0x%zx\n",
iova, size);
return -EINVAL;
}
......@@ -452,10 +398,7 @@ size_t imgu_mmu_unmap(struct imgu_mmu_info *info, unsigned long iova,
* or we hit an area that isn't mapped.
*/
while (unmapped < size) {
size_t pgsize = imgu_mmu_pgsize(mmu->geometry.pgsize_bitmap,
iova, size - unmapped);
unmapped_page = __imgu_mmu_unmap(mmu, iova, pgsize);
unmapped_page = __imgu_mmu_unmap(mmu, iova, IPU3_PAGE_SIZE);
if (!unmapped_page)
break;
......@@ -535,7 +478,6 @@ struct imgu_mmu_info *imgu_mmu_init(struct device *parent, void __iomem *base)
mmu->geometry.aperture_start = 0;
mmu->geometry.aperture_end = DMA_BIT_MASK(IPU3_MMU_ADDRESS_BITS);
mmu->geometry.pgsize_bitmap = IPU3_PAGE_SIZE;
return &mmu->geometry;
......
......@@ -5,17 +5,18 @@
#ifndef __IPU3_MMU_H
#define __IPU3_MMU_H
#define IPU3_PAGE_SHIFT 12
#define IPU3_PAGE_SIZE (1UL << IPU3_PAGE_SHIFT)
/**
* struct imgu_mmu_info - Describes mmu geometry
*
* @aperture_start: First address that can be mapped
* @aperture_end: Last address that can be mapped
* @pgsize_bitmap: Bitmap of page sizes in use
*/
struct imgu_mmu_info {
dma_addr_t aperture_start;
dma_addr_t aperture_end;
unsigned long pgsize_bitmap;
};
struct device;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment