Commit 5dc3bafb authored by Laura Abbott's avatar Laura Abbott Committed by Greg Kroah-Hartman

staging: android: ion: Remove crufty cache support

Now that we call dma_map in the dma_buf API callbacks there is no need
to use the existing cache APIs. Remove the sync ioctl and the existing
bad dma_sync calls. Explicit caching can be handled with the dma_buf
sync API.
Signed-off-by: default avatarLaura Abbott <labbott@redhat.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 0499a614
...@@ -186,7 +186,6 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -186,7 +186,6 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
case ION_IOC_SHARE: case ION_IOC_SHARE:
case ION_IOC_MAP: case ION_IOC_MAP:
case ION_IOC_IMPORT: case ION_IOC_IMPORT:
case ION_IOC_SYNC:
return filp->f_op->unlocked_ioctl(filp, cmd, return filp->f_op->unlocked_ioctl(filp, cmd,
(unsigned long)compat_ptr(arg)); (unsigned long)compat_ptr(arg));
default: default:
......
...@@ -51,7 +51,6 @@ static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg) ...@@ -51,7 +51,6 @@ static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
static unsigned int ion_ioctl_dir(unsigned int cmd) static unsigned int ion_ioctl_dir(unsigned int cmd)
{ {
switch (cmd) { switch (cmd) {
case ION_IOC_SYNC:
case ION_IOC_FREE: case ION_IOC_FREE:
case ION_IOC_CUSTOM: case ION_IOC_CUSTOM:
return _IOC_WRITE; return _IOC_WRITE;
...@@ -146,11 +145,6 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) ...@@ -146,11 +145,6 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
data.handle.handle = handle->id; data.handle.handle = handle->id;
break; break;
} }
case ION_IOC_SYNC:
{
ret = ion_sync_for_device(client, data.fd.fd);
break;
}
case ION_IOC_CUSTOM: case ION_IOC_CUSTOM:
{ {
if (!dev->custom_ioctl) if (!dev->custom_ioctl)
......
...@@ -863,22 +863,6 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, ...@@ -863,22 +863,6 @@ static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction); dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
} }
void ion_pages_sync_for_device(struct device *dev, struct page *page,
size_t size, enum dma_data_direction dir)
{
struct scatterlist sg;
sg_init_table(&sg, 1);
sg_set_page(&sg, page, size, 0);
/*
* This is not correct - sg_dma_address needs a dma_addr_t that is valid
* for the targeted device, but this works on the currently targeted
* hardware.
*/
sg_dma_address(&sg) = page_to_phys(page);
dma_sync_sg_for_device(dev, &sg, 1, dir);
}
static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
{ {
struct ion_buffer *buffer = dmabuf->priv; struct ion_buffer *buffer = dmabuf->priv;
...@@ -1097,30 +1081,6 @@ struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd) ...@@ -1097,30 +1081,6 @@ struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
} }
EXPORT_SYMBOL(ion_import_dma_buf_fd); EXPORT_SYMBOL(ion_import_dma_buf_fd);
int ion_sync_for_device(struct ion_client *client, int fd)
{
struct dma_buf *dmabuf;
struct ion_buffer *buffer;
dmabuf = dma_buf_get(fd);
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
/* if this memory came from ion */
if (dmabuf->ops != &dma_buf_ops) {
pr_err("%s: can not sync dmabuf from another exporter\n",
__func__);
dma_buf_put(dmabuf);
return -EINVAL;
}
buffer = dmabuf->priv;
dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
buffer->sg_table->nents, DMA_BIDIRECTIONAL);
dma_buf_put(dmabuf);
return 0;
}
int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query) int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query)
{ {
struct ion_device *dev = client->dev; struct ion_device *dev = client->dev;
......
...@@ -100,10 +100,6 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer) ...@@ -100,10 +100,6 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer)
ion_heap_buffer_zero(buffer); ion_heap_buffer_zero(buffer);
if (ion_buffer_cached(buffer))
dma_sync_sg_for_device(NULL, table->sgl, table->nents,
DMA_BIDIRECTIONAL);
ion_carveout_free(heap, paddr, buffer->size); ion_carveout_free(heap, paddr, buffer->size);
sg_free_table(table); sg_free_table(table);
kfree(table); kfree(table);
...@@ -128,8 +124,6 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) ...@@ -128,8 +124,6 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data)
page = pfn_to_page(PFN_DOWN(heap_data->base)); page = pfn_to_page(PFN_DOWN(heap_data->base));
size = heap_data->size; size = heap_data->size;
ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
......
...@@ -101,10 +101,6 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer) ...@@ -101,10 +101,6 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
ion_heap_buffer_zero(buffer); ion_heap_buffer_zero(buffer);
if (ion_buffer_cached(buffer))
dma_sync_sg_for_device(NULL, table->sgl, table->nents,
DMA_BIDIRECTIONAL);
for_each_sg(table->sgl, sg, table->nents, i) { for_each_sg(table->sgl, sg, table->nents, i) {
gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)),
sg->length); sg->length);
...@@ -132,8 +128,6 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) ...@@ -132,8 +128,6 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
page = pfn_to_page(PFN_DOWN(heap_data->base)); page = pfn_to_page(PFN_DOWN(heap_data->base));
size = heap_data->size; size = heap_data->size;
ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL);
ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL));
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
......
...@@ -30,9 +30,6 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool) ...@@ -30,9 +30,6 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
if (!page) if (!page)
return NULL; return NULL;
if (!pool->cached)
ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order,
DMA_BIDIRECTIONAL);
return page; return page;
} }
......
...@@ -440,21 +440,8 @@ void ion_page_pool_free(struct ion_page_pool *pool, struct page *page); ...@@ -440,21 +440,8 @@ void ion_page_pool_free(struct ion_page_pool *pool, struct page *page);
int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask,
int nr_to_scan); int nr_to_scan);
/**
* ion_pages_sync_for_device - cache flush pages for use with the specified
* device
* @dev: the device the pages will be used with
* @page: the first page to be flushed
* @size: size in bytes of region to be flushed
* @dir: direction of dma transfer
*/
void ion_pages_sync_for_device(struct device *dev, struct page *page,
size_t size, enum dma_data_direction dir);
long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
int ion_sync_for_device(struct ion_client *client, int fd);
struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
int id); int id);
......
...@@ -75,9 +75,6 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap, ...@@ -75,9 +75,6 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap,
page = ion_page_pool_alloc(pool); page = ion_page_pool_alloc(pool);
if (cached)
ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order,
DMA_BIDIRECTIONAL);
return page; return page;
} }
...@@ -401,8 +398,6 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap, ...@@ -401,8 +398,6 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
buffer->sg_table = table; buffer->sg_table = table;
ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
return 0; return 0;
free_table: free_table:
......
...@@ -206,16 +206,6 @@ struct ion_heap_query { ...@@ -206,16 +206,6 @@ struct ion_heap_query {
*/ */
#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data) #define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
/**
* DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
*
* Deprecated in favor of using the dma_buf api's correctly (syncing
* will happen automatically when the buffer is mapped to a device).
* If necessary should be used after touching a cached buffer from the cpu,
* this will make the buffer in memory coherent.
*/
#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
/** /**
* DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment