Commit 27b67278 authored by Lucas Stach's avatar Lucas Stach

drm/etnaviv: rework MMU handling

This reworks the MMU handling to make it possible to have multiple MMU contexts.
A context is basically one instance of GPU page tables. Currently we have one
set of page tables per GPU, which isn't all that clever, as it has the
following two consequences:

1. All GPU clients (aka processes) are sharing the same pagetables, which means
there is no isolation between clients, but only between GPU assigned memory
spaces and the rest of the system. Better than nothing, but also not great.

2. Clients operating on the same set of buffers with different etnaviv GPU
cores, e.g. a workload using both the 2D and 3D GPU, need to map the used
buffers into the pagetable sets of each used GPU.

This patch reworks all the MMU handling to introduce the abstraction of the
MMU context. A context can be shared across different GPU cores, as long as
they have compatible MMU implementations, which is the case for all systems
with Vivante GPUs seen in the wild.

As MMUv1 is not able to change pagetables on the fly, without a
"stop the world" operation, which stops GPU, changes pagetables via CPU
interaction, restarts GPU, the implementation introduces a shared context on
MMUv1, which is returned whenever there is a request for a new context.

This patch assigns a MMU context to each GPU, so on MMUv2 systems there is
still one set of pagetables per GPU, but due to the shared context MMUv1
systems see a change in behavior as now a single pagetable set is used
across all GPU cores.
Signed-off-by: default avatarLucas Stach <l.stach@pengutronix.de>
Reviewed-by: default avatarPhilipp Zabel <p.zabel@pengutronix.de>
Reviewed-by: default avatarGuido Günther <agx@sigxcpu.org>
parent 4900dda9
...@@ -207,7 +207,7 @@ u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe ...@@ -207,7 +207,7 @@ u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe
return buffer->user_size / 8; return buffer->user_size / 8;
} }
u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu) u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id)
{ {
struct etnaviv_cmdbuf *buffer = &gpu->buffer; struct etnaviv_cmdbuf *buffer = &gpu->buffer;
...@@ -216,7 +216,7 @@ u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu) ...@@ -216,7 +216,7 @@ u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu)
buffer->user_size = 0; buffer->user_size = 0;
CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG, CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
VIVS_MMUv2_PTA_CONFIG_INDEX(0)); VIVS_MMUv2_PTA_CONFIG_INDEX(id));
CMD_END(buffer); CMD_END(buffer);
...@@ -315,7 +315,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, ...@@ -315,7 +315,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
u32 return_target, return_dwords; u32 return_target, return_dwords;
u32 link_target, link_dwords; u32 link_target, link_dwords;
bool switch_context = gpu->exec_state != exec_state; bool switch_context = gpu->exec_state != exec_state;
unsigned int new_flush_seq = READ_ONCE(gpu->mmu->flush_seq); unsigned int new_flush_seq = READ_ONCE(gpu->mmu_context->flush_seq);
bool need_flush = gpu->flush_seq != new_flush_seq; bool need_flush = gpu->flush_seq != new_flush_seq;
lockdep_assert_held(&gpu->lock); lockdep_assert_held(&gpu->lock);
...@@ -339,7 +339,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, ...@@ -339,7 +339,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
/* flush command */ /* flush command */
if (need_flush) { if (need_flush) {
if (gpu->mmu->version == ETNAVIV_IOMMU_V1) if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1)
extra_dwords += 1; extra_dwords += 1;
else else
extra_dwords += 3; extra_dwords += 3;
...@@ -353,7 +353,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, ...@@ -353,7 +353,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
if (need_flush) { if (need_flush) {
/* Add the MMU flush */ /* Add the MMU flush */
if (gpu->mmu->version == ETNAVIV_IOMMU_V1) { if (gpu->mmu_context->global->version == ETNAVIV_IOMMU_V1) {
CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU, CMD_LOAD_STATE(buffer, VIVS_GL_FLUSH_MMU,
VIVS_GL_FLUSH_MMU_FLUSH_FEMMU | VIVS_GL_FLUSH_MMU_FLUSH_FEMMU |
VIVS_GL_FLUSH_MMU_FLUSH_UNK1 | VIVS_GL_FLUSH_MMU_FLUSH_UNK1 |
......
...@@ -60,18 +60,18 @@ etnaviv_cmdbuf_suballoc_new(struct device *dev) ...@@ -60,18 +60,18 @@ etnaviv_cmdbuf_suballoc_new(struct device *dev)
} }
int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc, int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc,
struct etnaviv_iommu *mmu, struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping, struct etnaviv_vram_mapping *mapping,
u32 memory_base) u32 memory_base)
{ {
return etnaviv_iommu_get_suballoc_va(mmu, mapping, memory_base, return etnaviv_iommu_get_suballoc_va(context, mapping, memory_base,
suballoc->paddr, SUBALLOC_SIZE); suballoc->paddr, SUBALLOC_SIZE);
} }
void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu *mmu, void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping) struct etnaviv_vram_mapping *mapping)
{ {
etnaviv_iommu_put_suballoc_va(mmu, mapping); etnaviv_iommu_put_suballoc_va(context, mapping);
} }
void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc) void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc)
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
#include <linux/types.h> #include <linux/types.h>
struct device; struct device;
struct etnaviv_iommu; struct etnaviv_iommu_context;
struct etnaviv_vram_mapping; struct etnaviv_vram_mapping;
struct etnaviv_cmdbuf_suballoc; struct etnaviv_cmdbuf_suballoc;
struct etnaviv_perfmon_request; struct etnaviv_perfmon_request;
...@@ -28,10 +28,10 @@ struct etnaviv_cmdbuf_suballoc * ...@@ -28,10 +28,10 @@ struct etnaviv_cmdbuf_suballoc *
etnaviv_cmdbuf_suballoc_new(struct device *dev); etnaviv_cmdbuf_suballoc_new(struct device *dev);
void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc); void etnaviv_cmdbuf_suballoc_destroy(struct etnaviv_cmdbuf_suballoc *suballoc);
int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc, int etnaviv_cmdbuf_suballoc_map(struct etnaviv_cmdbuf_suballoc *suballoc,
struct etnaviv_iommu *mmu, struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping, struct etnaviv_vram_mapping *mapping,
u32 memory_base); u32 memory_base);
void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu *mmu, void etnaviv_cmdbuf_suballoc_unmap(struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping); struct etnaviv_vram_mapping *mapping);
......
...@@ -119,9 +119,9 @@ static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m) ...@@ -119,9 +119,9 @@ static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev)); seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
mutex_lock(&gpu->mmu->lock); mutex_lock(&gpu->mmu_context->lock);
drm_mm_print(&gpu->mmu->mm, &p); drm_mm_print(&gpu->mmu_context->mm, &p);
mutex_unlock(&gpu->mmu->lock); mutex_unlock(&gpu->mmu_context->lock);
return 0; return 0;
} }
......
...@@ -22,6 +22,7 @@ struct etnaviv_gpu; ...@@ -22,6 +22,7 @@ struct etnaviv_gpu;
struct etnaviv_mmu; struct etnaviv_mmu;
struct etnaviv_gem_object; struct etnaviv_gem_object;
struct etnaviv_gem_submit; struct etnaviv_gem_submit;
struct etnaviv_iommu_global;
struct etnaviv_file_private { struct etnaviv_file_private {
/* /*
...@@ -37,6 +38,7 @@ struct etnaviv_drm_private { ...@@ -37,6 +38,7 @@ struct etnaviv_drm_private {
struct etnaviv_gpu *gpu[ETNA_MAX_PIPES]; struct etnaviv_gpu *gpu[ETNA_MAX_PIPES];
struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc; struct etnaviv_cmdbuf_suballoc *cmdbuf_suballoc;
struct etnaviv_iommu_global *mmu_global;
/* list of GEM objects: */ /* list of GEM objects: */
struct mutex gem_lock; struct mutex gem_lock;
...@@ -69,7 +71,7 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file, ...@@ -69,7 +71,7 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
uintptr_t ptr, u32 size, u32 flags, u32 *handle); uintptr_t ptr, u32 size, u32 flags, u32 *handle);
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu); u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr); u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu); u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu, unsigned short id);
void etnaviv_buffer_end(struct etnaviv_gpu *gpu); void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event); void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event);
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
......
...@@ -93,7 +93,7 @@ static void etnaviv_core_dump_registers(struct core_dump_iterator *iter, ...@@ -93,7 +93,7 @@ static void etnaviv_core_dump_registers(struct core_dump_iterator *iter,
} }
static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter, static void etnaviv_core_dump_mmu(struct core_dump_iterator *iter,
struct etnaviv_iommu *mmu, size_t mmu_size) struct etnaviv_iommu_context *mmu, size_t mmu_size)
{ {
etnaviv_iommu_dump(mmu, iter->data); etnaviv_iommu_dump(mmu, iter->data);
...@@ -125,9 +125,9 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit) ...@@ -125,9 +125,9 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
return; return;
etnaviv_dump_core = false; etnaviv_dump_core = false;
mutex_lock(&gpu->mmu->lock); mutex_lock(&gpu->mmu_context->lock);
mmu_size = etnaviv_iommu_dump_size(gpu->mmu); mmu_size = etnaviv_iommu_dump_size(gpu->mmu_context);
/* We always dump registers, mmu, ring, hanging cmdbuf and end marker */ /* We always dump registers, mmu, ring, hanging cmdbuf and end marker */
n_obj = 5; n_obj = 5;
...@@ -157,7 +157,7 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit) ...@@ -157,7 +157,7 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
PAGE_KERNEL); PAGE_KERNEL);
if (!iter.start) { if (!iter.start) {
mutex_unlock(&gpu->mmu->lock); mutex_unlock(&gpu->mmu_context->lock);
dev_warn(gpu->dev, "failed to allocate devcoredump file\n"); dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
return; return;
} }
...@@ -169,7 +169,7 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit) ...@@ -169,7 +169,7 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
memset(iter.hdr, 0, iter.data - iter.start); memset(iter.hdr, 0, iter.data - iter.start);
etnaviv_core_dump_registers(&iter, gpu); etnaviv_core_dump_registers(&iter, gpu);
etnaviv_core_dump_mmu(&iter, gpu->mmu, mmu_size); etnaviv_core_dump_mmu(&iter, gpu->mmu_context, mmu_size);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr, etnaviv_core_dump_mem(&iter, ETDUMP_BUF_RING, gpu->buffer.vaddr,
gpu->buffer.size, gpu->buffer.size,
etnaviv_cmdbuf_get_va(&gpu->buffer, etnaviv_cmdbuf_get_va(&gpu->buffer,
...@@ -221,7 +221,7 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit) ...@@ -221,7 +221,7 @@ void etnaviv_core_dump(struct etnaviv_gem_submit *submit)
obj->base.size); obj->base.size);
} }
mutex_unlock(&gpu->mmu->lock); mutex_unlock(&gpu->mmu_context->lock);
etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data); etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
......
...@@ -223,12 +223,12 @@ int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset) ...@@ -223,12 +223,12 @@ int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
static struct etnaviv_vram_mapping * static struct etnaviv_vram_mapping *
etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj, etnaviv_gem_get_vram_mapping(struct etnaviv_gem_object *obj,
struct etnaviv_iommu *mmu) struct etnaviv_iommu_context *context)
{ {
struct etnaviv_vram_mapping *mapping; struct etnaviv_vram_mapping *mapping;
list_for_each_entry(mapping, &obj->vram_list, obj_node) { list_for_each_entry(mapping, &obj->vram_list, obj_node) {
if (mapping->mmu == mmu) if (mapping->context == context)
return mapping; return mapping;
} }
...@@ -256,7 +256,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( ...@@ -256,7 +256,7 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
int ret = 0; int ret = 0;
mutex_lock(&etnaviv_obj->lock); mutex_lock(&etnaviv_obj->lock);
mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu); mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu_context);
if (mapping) { if (mapping) {
/* /*
* Holding the object lock prevents the use count changing * Holding the object lock prevents the use count changing
...@@ -265,12 +265,12 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( ...@@ -265,12 +265,12 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
* the MMU owns this mapping to close this race. * the MMU owns this mapping to close this race.
*/ */
if (mapping->use == 0) { if (mapping->use == 0) {
mutex_lock(&gpu->mmu->lock); mutex_lock(&gpu->mmu_context->lock);
if (mapping->mmu == gpu->mmu) if (mapping->context == gpu->mmu_context)
mapping->use += 1; mapping->use += 1;
else else
mapping = NULL; mapping = NULL;
mutex_unlock(&gpu->mmu->lock); mutex_unlock(&gpu->mmu_context->lock);
if (mapping) if (mapping)
goto out; goto out;
} else { } else {
...@@ -303,11 +303,11 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get( ...@@ -303,11 +303,11 @@ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
list_del(&mapping->obj_node); list_del(&mapping->obj_node);
} }
mapping->mmu = gpu->mmu; mapping->context = gpu->mmu_context;
mapping->use = 1; mapping->use = 1;
ret = etnaviv_iommu_map_gem(gpu->mmu, etnaviv_obj, gpu->memory_base, ret = etnaviv_iommu_map_gem(gpu->mmu_context, etnaviv_obj,
mapping); gpu->memory_base, mapping);
if (ret < 0) if (ret < 0)
kfree(mapping); kfree(mapping);
else else
...@@ -525,12 +525,12 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj) ...@@ -525,12 +525,12 @@ void etnaviv_gem_free_object(struct drm_gem_object *obj)
list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list, list_for_each_entry_safe(mapping, tmp, &etnaviv_obj->vram_list,
obj_node) { obj_node) {
struct etnaviv_iommu *mmu = mapping->mmu; struct etnaviv_iommu_context *context = mapping->context;
WARN_ON(mapping->use); WARN_ON(mapping->use);
if (mmu) if (context)
etnaviv_iommu_unmap_gem(mmu, mapping); etnaviv_iommu_unmap_gem(context, mapping);
list_del(&mapping->obj_node); list_del(&mapping->obj_node);
kfree(mapping); kfree(mapping);
......
...@@ -25,7 +25,7 @@ struct etnaviv_vram_mapping { ...@@ -25,7 +25,7 @@ struct etnaviv_vram_mapping {
struct list_head scan_node; struct list_head scan_node;
struct list_head mmu_node; struct list_head mmu_node;
struct etnaviv_gem_object *object; struct etnaviv_gem_object *object;
struct etnaviv_iommu *mmu; struct etnaviv_iommu_context *context;
struct drm_mm_node vram_node; struct drm_mm_node vram_node;
unsigned int use; unsigned int use;
u32 iova; u32 iova;
......
...@@ -681,7 +681,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) ...@@ -681,7 +681,7 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu)
etnaviv_gpu_setup_pulse_eater(gpu); etnaviv_gpu_setup_pulse_eater(gpu);
/* setup the MMU */ /* setup the MMU */
etnaviv_iommu_restore(gpu); etnaviv_iommu_restore(gpu, gpu->mmu_context);
/* Start command processor */ /* Start command processor */
prefetch = etnaviv_buffer_init(gpu); prefetch = etnaviv_buffer_init(gpu);
...@@ -754,14 +754,19 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) ...@@ -754,14 +754,19 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
goto fail; goto fail;
} }
gpu->mmu = etnaviv_iommu_new(gpu); ret = etnaviv_iommu_global_init(gpu);
if (IS_ERR(gpu->mmu)) { if (ret)
dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
ret = PTR_ERR(gpu->mmu);
goto fail; goto fail;
gpu->mmu_context = etnaviv_iommu_context_init(priv->mmu_global);
if (IS_ERR(gpu->mmu_context)) {
dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n");
ret = PTR_ERR(gpu->mmu_context);
goto iommu_global_fini;
} }
ret = etnaviv_cmdbuf_suballoc_map(priv->cmdbuf_suballoc, gpu->mmu, ret = etnaviv_cmdbuf_suballoc_map(priv->cmdbuf_suballoc,
gpu->mmu_context,
&gpu->cmdbuf_mapping, &gpu->cmdbuf_mapping,
gpu->memory_base); gpu->memory_base);
if (ret) { if (ret) {
...@@ -777,7 +782,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) ...@@ -777,7 +782,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
goto unmap_suballoc; goto unmap_suballoc;
} }
if (gpu->mmu->version == ETNAVIV_IOMMU_V1 && if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION) &&
etnaviv_cmdbuf_get_va(&gpu->buffer, &gpu->cmdbuf_mapping) > 0x80000000) { etnaviv_cmdbuf_get_va(&gpu->buffer, &gpu->cmdbuf_mapping) > 0x80000000) {
ret = -EINVAL; ret = -EINVAL;
dev_err(gpu->dev, dev_err(gpu->dev,
...@@ -808,9 +813,11 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) ...@@ -808,9 +813,11 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
free_buffer: free_buffer:
etnaviv_cmdbuf_free(&gpu->buffer); etnaviv_cmdbuf_free(&gpu->buffer);
unmap_suballoc: unmap_suballoc:
etnaviv_cmdbuf_suballoc_unmap(gpu->mmu, &gpu->cmdbuf_mapping); etnaviv_cmdbuf_suballoc_unmap(gpu->mmu_context, &gpu->cmdbuf_mapping);
destroy_iommu: destroy_iommu:
etnaviv_iommu_destroy(gpu->mmu); etnaviv_iommu_context_put(gpu->mmu_context);
iommu_global_fini:
etnaviv_iommu_global_fini(gpu);
fail: fail:
pm_runtime_mark_last_busy(gpu->dev); pm_runtime_mark_last_busy(gpu->dev);
pm_runtime_put_autosuspend(gpu->dev); pm_runtime_put_autosuspend(gpu->dev);
...@@ -1683,8 +1690,10 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master, ...@@ -1683,8 +1690,10 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
if (gpu->initialized) { if (gpu->initialized) {
etnaviv_cmdbuf_free(&gpu->buffer); etnaviv_cmdbuf_free(&gpu->buffer);
etnaviv_cmdbuf_suballoc_unmap(gpu->mmu, &gpu->cmdbuf_mapping); etnaviv_cmdbuf_suballoc_unmap(gpu->mmu_context,
etnaviv_iommu_destroy(gpu->mmu); &gpu->cmdbuf_mapping);
etnaviv_iommu_context_put(gpu->mmu_context);
etnaviv_iommu_global_fini(gpu);
gpu->initialized = false; gpu->initialized = false;
} }
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include "etnaviv_cmdbuf.h" #include "etnaviv_cmdbuf.h"
#include "etnaviv_gem.h" #include "etnaviv_gem.h"
#include "etnaviv_mmu.h"
#include "etnaviv_drv.h" #include "etnaviv_drv.h"
struct etnaviv_gem_submit; struct etnaviv_gem_submit;
...@@ -136,7 +137,7 @@ struct etnaviv_gpu { ...@@ -136,7 +137,7 @@ struct etnaviv_gpu {
void __iomem *mmio; void __iomem *mmio;
int irq; int irq;
struct etnaviv_iommu *mmu; struct etnaviv_iommu_context *mmu_context;
unsigned int flush_seq; unsigned int flush_seq;
/* Power Control: */ /* Power Control: */
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include "etnaviv_gpu.h" #include "etnaviv_gpu.h"
#include "etnaviv_mmu.h" #include "etnaviv_mmu.h"
#include "etnaviv_iommu.h"
#include "state_hi.xml.h" #include "state_hi.xml.h"
#define PT_SIZE SZ_2M #define PT_SIZE SZ_2M
...@@ -19,113 +18,78 @@ ...@@ -19,113 +18,78 @@
#define GPU_MEM_START 0x80000000 #define GPU_MEM_START 0x80000000
struct etnaviv_iommuv1_domain { struct etnaviv_iommuv1_context {
struct etnaviv_iommu_domain base; struct etnaviv_iommu_context base;
u32 *pgtable_cpu; u32 *pgtable_cpu;
dma_addr_t pgtable_dma; dma_addr_t pgtable_dma;
}; };
static struct etnaviv_iommuv1_domain * static struct etnaviv_iommuv1_context *
to_etnaviv_domain(struct etnaviv_iommu_domain *domain) to_v1_context(struct etnaviv_iommu_context *context)
{ {
return container_of(domain, struct etnaviv_iommuv1_domain, base); return container_of(context, struct etnaviv_iommuv1_context, base);
} }
static int __etnaviv_iommu_init(struct etnaviv_iommuv1_domain *etnaviv_domain) static void etnaviv_iommuv1_free(struct etnaviv_iommu_context *context)
{ {
u32 *p; struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
int i;
etnaviv_domain->base.bad_page_cpu =
dma_alloc_wc(etnaviv_domain->base.dev, SZ_4K,
&etnaviv_domain->base.bad_page_dma,
GFP_KERNEL);
if (!etnaviv_domain->base.bad_page_cpu)
return -ENOMEM;
p = etnaviv_domain->base.bad_page_cpu;
for (i = 0; i < SZ_4K / 4; i++)
*p++ = 0xdead55aa;
etnaviv_domain->pgtable_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
PT_SIZE,
&etnaviv_domain->pgtable_dma,
GFP_KERNEL);
if (!etnaviv_domain->pgtable_cpu) {
dma_free_wc(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->base.bad_page_cpu,
etnaviv_domain->base.bad_page_dma);
return -ENOMEM;
}
memset32(etnaviv_domain->pgtable_cpu, etnaviv_domain->base.bad_page_dma,
PT_ENTRIES);
return 0;
}
static void etnaviv_iommuv1_domain_free(struct etnaviv_iommu_domain *domain) drm_mm_takedown(&context->mm);
{
struct etnaviv_iommuv1_domain *etnaviv_domain =
to_etnaviv_domain(domain);
dma_free_wc(etnaviv_domain->base.dev, PT_SIZE, dma_free_wc(context->global->dev, PT_SIZE, v1_context->pgtable_cpu,
etnaviv_domain->pgtable_cpu, etnaviv_domain->pgtable_dma); v1_context->pgtable_dma);
dma_free_wc(etnaviv_domain->base.dev, SZ_4K, context->global->v1.shared_context = NULL;
etnaviv_domain->base.bad_page_cpu,
etnaviv_domain->base.bad_page_dma);
kfree(etnaviv_domain); kfree(v1_context);
} }
static int etnaviv_iommuv1_map(struct etnaviv_iommu_domain *domain, static int etnaviv_iommuv1_map(struct etnaviv_iommu_context *context,
unsigned long iova, phys_addr_t paddr, unsigned long iova, phys_addr_t paddr,
size_t size, int prot) size_t size, int prot)
{ {
struct etnaviv_iommuv1_domain *etnaviv_domain = to_etnaviv_domain(domain); struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
unsigned int index = (iova - GPU_MEM_START) / SZ_4K; unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
if (size != SZ_4K) if (size != SZ_4K)
return -EINVAL; return -EINVAL;
etnaviv_domain->pgtable_cpu[index] = paddr; v1_context->pgtable_cpu[index] = paddr;
return 0; return 0;
} }
static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_domain *domain, static size_t etnaviv_iommuv1_unmap(struct etnaviv_iommu_context *context,
unsigned long iova, size_t size) unsigned long iova, size_t size)
{ {
struct etnaviv_iommuv1_domain *etnaviv_domain = struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
to_etnaviv_domain(domain);
unsigned int index = (iova - GPU_MEM_START) / SZ_4K; unsigned int index = (iova - GPU_MEM_START) / SZ_4K;
if (size != SZ_4K) if (size != SZ_4K)
return -EINVAL; return -EINVAL;
etnaviv_domain->pgtable_cpu[index] = etnaviv_domain->base.bad_page_dma; v1_context->pgtable_cpu[index] = context->global->bad_page_dma;
return SZ_4K; return SZ_4K;
} }
static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_domain *domain) static size_t etnaviv_iommuv1_dump_size(struct etnaviv_iommu_context *context)
{ {
return PT_SIZE; return PT_SIZE;
} }
static void etnaviv_iommuv1_dump(struct etnaviv_iommu_domain *domain, void *buf) static void etnaviv_iommuv1_dump(struct etnaviv_iommu_context *context,
void *buf)
{ {
struct etnaviv_iommuv1_domain *etnaviv_domain = struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
to_etnaviv_domain(domain);
memcpy(buf, etnaviv_domain->pgtable_cpu, PT_SIZE); memcpy(buf, v1_context->pgtable_cpu, PT_SIZE);
} }
void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu) static void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu,
struct etnaviv_iommu_context *context)
{ {
struct etnaviv_iommuv1_domain *etnaviv_domain = struct etnaviv_iommuv1_context *v1_context = to_v1_context(context);
to_etnaviv_domain(gpu->mmu->domain);
u32 pgtable; u32 pgtable;
/* set base addresses */ /* set base addresses */
...@@ -136,7 +100,7 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu) ...@@ -136,7 +100,7 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base); gpu_write(gpu, VIVS_MC_MEMORY_BASE_ADDR_PE, gpu->memory_base);
/* set page table address in MC */ /* set page table address in MC */
pgtable = (u32)etnaviv_domain->pgtable_dma; pgtable = (u32)v1_context->pgtable_dma;
gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable); gpu_write(gpu, VIVS_MC_MMU_FE_PAGE_TABLE, pgtable);
gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable); gpu_write(gpu, VIVS_MC_MMU_TX_PAGE_TABLE, pgtable);
...@@ -145,39 +109,62 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu) ...@@ -145,39 +109,62 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable); gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
} }
static const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
.free = etnaviv_iommuv1_domain_free, const struct etnaviv_iommu_ops etnaviv_iommuv1_ops = {
.free = etnaviv_iommuv1_free,
.map = etnaviv_iommuv1_map, .map = etnaviv_iommuv1_map,
.unmap = etnaviv_iommuv1_unmap, .unmap = etnaviv_iommuv1_unmap,
.dump_size = etnaviv_iommuv1_dump_size, .dump_size = etnaviv_iommuv1_dump_size,
.dump = etnaviv_iommuv1_dump, .dump = etnaviv_iommuv1_dump,
.restore = etnaviv_iommuv1_restore,
}; };
struct etnaviv_iommu_domain * struct etnaviv_iommu_context *
etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu) etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global)
{ {
struct etnaviv_iommuv1_domain *etnaviv_domain; struct etnaviv_iommuv1_context *v1_context;
struct etnaviv_iommu_domain *domain; struct etnaviv_iommu_context *context;
int ret;
mutex_lock(&global->lock);
/*
* MMUv1 does not support switching between different contexts without
* a stop the world operation, so we only support a single shared
* context with this version.
*/
if (global->v1.shared_context) {
context = global->v1.shared_context;
etnaviv_iommu_context_get(context);
mutex_unlock(&global->lock);
return context;
}
etnaviv_domain = kzalloc(sizeof(*etnaviv_domain), GFP_KERNEL); v1_context = kzalloc(sizeof(*v1_context), GFP_KERNEL);
if (!etnaviv_domain) if (!v1_context)
return NULL; return NULL;
domain = &etnaviv_domain->base; v1_context->pgtable_cpu = dma_alloc_wc(global->dev, PT_SIZE,
&v1_context->pgtable_dma,
GFP_KERNEL);
if (!v1_context->pgtable_cpu)
goto out_free;
domain->dev = gpu->dev; memset32(v1_context->pgtable_cpu, global->bad_page_dma, PT_ENTRIES);
domain->base = GPU_MEM_START;
domain->size = PT_ENTRIES * SZ_4K;
domain->ops = &etnaviv_iommuv1_ops;
ret = __etnaviv_iommu_init(etnaviv_domain); context = &v1_context->base;
if (ret) context->global = global;
goto out_free; kref_init(&context->refcount);
mutex_init(&context->lock);
INIT_LIST_HEAD(&context->mappings);
drm_mm_init(&context->mm, GPU_MEM_START, PT_ENTRIES * SZ_4K);
context->global->v1.shared_context = context;
mutex_unlock(&global->lock);
return &etnaviv_domain->base; return context;
out_free: out_free:
kfree(etnaviv_domain); mutex_unlock(&global->lock);
kfree(v1_context);
return NULL; return NULL;
} }
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2014-2018 Etnaviv Project
*/
#ifndef __ETNAVIV_IOMMU_H__
#define __ETNAVIV_IOMMU_H__
struct etnaviv_gpu;
struct etnaviv_iommu_domain;
struct etnaviv_iommu_domain *
etnaviv_iommuv1_domain_alloc(struct etnaviv_gpu *gpu);
void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu);
struct etnaviv_iommu_domain *
etnaviv_iommuv2_domain_alloc(struct etnaviv_gpu *gpu);
void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu);
#endif /* __ETNAVIV_IOMMU_H__ */
This diff is collapsed.
This diff is collapsed.
...@@ -16,33 +16,58 @@ enum etnaviv_iommu_version { ...@@ -16,33 +16,58 @@ enum etnaviv_iommu_version {
struct etnaviv_gpu; struct etnaviv_gpu;
struct etnaviv_vram_mapping; struct etnaviv_vram_mapping;
struct etnaviv_iommu_domain; struct etnaviv_iommu_global;
struct etnaviv_iommu_context;
struct etnaviv_iommu_domain_ops { struct etnaviv_iommu_ops {
void (*free)(struct etnaviv_iommu_domain *); struct etnaviv_iommu_context *(*init)(struct etnaviv_iommu_global *);
int (*map)(struct etnaviv_iommu_domain *domain, unsigned long iova, void (*free)(struct etnaviv_iommu_context *);
int (*map)(struct etnaviv_iommu_context *context, unsigned long iova,
phys_addr_t paddr, size_t size, int prot); phys_addr_t paddr, size_t size, int prot);
size_t (*unmap)(struct etnaviv_iommu_domain *domain, unsigned long iova, size_t (*unmap)(struct etnaviv_iommu_context *context, unsigned long iova,
size_t size); size_t size);
size_t (*dump_size)(struct etnaviv_iommu_domain *); size_t (*dump_size)(struct etnaviv_iommu_context *);
void (*dump)(struct etnaviv_iommu_domain *, void *); void (*dump)(struct etnaviv_iommu_context *, void *);
void (*restore)(struct etnaviv_gpu *, struct etnaviv_iommu_context *);
}; };
struct etnaviv_iommu_domain { extern const struct etnaviv_iommu_ops etnaviv_iommuv1_ops;
extern const struct etnaviv_iommu_ops etnaviv_iommuv2_ops;
#define ETNAVIV_PTA_SIZE SZ_4K
#define ETNAVIV_PTA_ENTRIES (ETNAVIV_PTA_SIZE / sizeof(u64))
struct etnaviv_iommu_global {
struct device *dev; struct device *dev;
enum etnaviv_iommu_version version;
const struct etnaviv_iommu_ops *ops;
unsigned int use;
struct mutex lock;
void *bad_page_cpu; void *bad_page_cpu;
dma_addr_t bad_page_dma; dma_addr_t bad_page_dma;
u64 base;
u64 size;
const struct etnaviv_iommu_domain_ops *ops; /*
* This union holds members needed by either MMUv1 or MMUv2, which
* can not exist at the same time.
*/
union {
struct {
struct etnaviv_iommu_context *shared_context;
} v1;
struct {
/* P(age) T(able) A(rray) */
u64 *pta_cpu;
dma_addr_t pta_dma;
struct spinlock pta_lock;
DECLARE_BITMAP(pta_alloc, ETNAVIV_PTA_ENTRIES);
} v2;
};
}; };
struct etnaviv_iommu { struct etnaviv_iommu_context {
struct etnaviv_gpu *gpu; struct kref refcount;
struct etnaviv_iommu_domain *domain; struct etnaviv_iommu_global *global;
enum etnaviv_iommu_version version;
/* memory manager for GPU address area */ /* memory manager for GPU address area */
struct mutex lock; struct mutex lock;
...@@ -51,26 +76,40 @@ struct etnaviv_iommu { ...@@ -51,26 +76,40 @@ struct etnaviv_iommu {
unsigned int flush_seq; unsigned int flush_seq;
}; };
int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu);
void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu);
struct etnaviv_gem_object; struct etnaviv_gem_object;
int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu, int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
struct etnaviv_gem_object *etnaviv_obj, u32 memory_base, struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
struct etnaviv_vram_mapping *mapping); struct etnaviv_vram_mapping *mapping);
void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu, void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
struct etnaviv_vram_mapping *mapping); struct etnaviv_vram_mapping *mapping);
int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu *mmu, int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *ctx,
struct etnaviv_vram_mapping *mapping, struct etnaviv_vram_mapping *mapping,
u32 memory_base, dma_addr_t paddr, u32 memory_base, dma_addr_t paddr,
size_t size); size_t size);
void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu *mmu, void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *ctx,
struct etnaviv_vram_mapping *mapping); struct etnaviv_vram_mapping *mapping);
size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu); size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *ctx);
void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf); void etnaviv_iommu_dump(struct etnaviv_iommu_context *ctx, void *buf);
struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu); struct etnaviv_iommu_context *
void etnaviv_iommu_destroy(struct etnaviv_iommu *iommu); etnaviv_iommu_context_init(struct etnaviv_iommu_global *global);
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu); static inline void etnaviv_iommu_context_get(struct etnaviv_iommu_context *ctx)
{
kref_get(&ctx->refcount);
}
void etnaviv_iommu_context_put(struct etnaviv_iommu_context *ctx);
void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
struct etnaviv_iommu_context *ctx);
struct etnaviv_iommu_context *
etnaviv_iommuv1_context_alloc(struct etnaviv_iommu_global *global);
struct etnaviv_iommu_context *
etnaviv_iommuv2_context_alloc(struct etnaviv_iommu_global *global);
#endif /* __ETNAVIV_MMU_H__ */ #endif /* __ETNAVIV_MMU_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment