Commit 856661d5 authored by Rebecca Schultz Zavin's avatar Rebecca Schultz Zavin Committed by Greg Kroah-Hartman

gpu: ion: Add support for cached mappings that don't fault

We have found that faulting in the mappings for cached
allocations has a significant performance impact and is
only a benefit if only a small part of the buffer is
touched by the cpu (an uncommon case for software rendering).
This patch introduces a ION_FLAG_CACHED_NEEDS_SYNC
which determines whether a mapping should be created by
faulting or at mmap time.  If this flag is set,
userspace must manage the caches explictly using the SYNC ioctl.
Signed-off-by: default avatarRebecca Schultz Zavin <rebecca@android.com>
[jstultz: modified patch to apply to staging directory]
Signed-off-by: default avatarJohn Stultz <john.stultz@linaro.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent d3c0bced
......@@ -164,7 +164,8 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
return ERR_PTR(PTR_ERR(table));
}
buffer->sg_table = table;
if (buffer->flags & ION_FLAG_CACHED) {
if (buffer->flags & ION_FLAG_CACHED &&
!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC)) {
for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents,
i) {
if (sg_dma_len(sg) == PAGE_SIZE)
......@@ -763,7 +764,8 @@ static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
pr_debug("%s: syncing for device %s\n", __func__,
dev ? dev_name(dev) : "null");
if (!(buffer->flags & ION_FLAG_CACHED))
if (!(buffer->flags & ION_FLAG_CACHED) ||
(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC))
return;
mutex_lock(&buffer->lock);
......@@ -853,17 +855,21 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
return -EINVAL;
}
if (buffer->flags & ION_FLAG_CACHED) {
if (buffer->flags & ION_FLAG_CACHED &&
!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC)) {
vma->vm_private_data = buffer;
vma->vm_ops = &ion_vma_ops;
ion_vm_open(vma);
} else {
return 0;
}
if (!(buffer->flags & ION_FLAG_CACHED))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
mutex_lock(&buffer->lock);
/* now map it to userspace */
ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
mutex_unlock(&buffer->lock);
}
if (ret)
pr_err("%s: failure mapping buffer to userspace\n",
......@@ -1021,7 +1027,9 @@ static int ion_sync_for_device(struct ion_client *client, int fd)
return -EINVAL;
}
buffer = dmabuf->priv;
ion_buffer_sync_for_device(buffer, NULL, DMA_BIDIRECTIONAL);
dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
buffer->sg_table->nents, DMA_BIDIRECTIONAL);
dma_buf_put(dmabuf);
return 0;
}
......
......@@ -50,6 +50,9 @@ enum ion_heap_type {
cached, ion will do cache
maintenance when the buffer is
mapped for dma */
#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created
at mmap time, if this is set
caches must be managed manually */
#ifdef __KERNEL__
struct ion_device;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment