Commit 65e51e30 authored by Steven Price's avatar Steven Price Committed by Rob Herring

drm/panfrost: Prevent race when handling page fault

When handling a GPU page fault addr_to_drm_mm_node() is used to
translate the GPU address to a buffer object. However it is possible for
the buffer object to be freed after the function has returned resulting
in a use-after-free of the BO.

Change addr_to_drm_mm_node to return the panfrost_gem_object with an
extra reference on it, preventing the BO from being freed until after
the page fault has been handled.
Signed-off-by: default avatarSteven Price <steven.price@arm.com>
Signed-off-by: default avatarRob Herring <robh@kernel.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20190913160310.50444-1-steven.price@arm.com
parent d18a9662
...@@ -386,28 +386,40 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv) ...@@ -386,28 +386,40 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv)
free_io_pgtable_ops(mmu->pgtbl_ops); free_io_pgtable_ops(mmu->pgtbl_ops);
} }
static struct drm_mm_node *addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) static struct panfrost_gem_object *
addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr)
{ {
struct drm_mm_node *node = NULL; struct panfrost_gem_object *bo = NULL;
struct panfrost_file_priv *priv;
struct drm_mm_node *node;
u64 offset = addr >> PAGE_SHIFT; u64 offset = addr >> PAGE_SHIFT;
struct panfrost_mmu *mmu; struct panfrost_mmu *mmu;
spin_lock(&pfdev->as_lock); spin_lock(&pfdev->as_lock);
list_for_each_entry(mmu, &pfdev->as_lru_list, list) { list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
struct panfrost_file_priv *priv; if (as == mmu->as)
if (as != mmu->as) break;
continue; }
if (as != mmu->as)
goto out;
priv = container_of(mmu, struct panfrost_file_priv, mmu);
priv = container_of(mmu, struct panfrost_file_priv, mmu); spin_lock(&priv->mm_lock);
drm_mm_for_each_node(node, &priv->mm) {
if (offset >= node->start && offset < (node->start + node->size)) drm_mm_for_each_node(node, &priv->mm) {
goto out; if (offset >= node->start &&
offset < (node->start + node->size)) {
bo = drm_mm_node_to_panfrost_bo(node);
drm_gem_object_get(&bo->base.base);
break;
} }
} }
spin_unlock(&priv->mm_lock);
out: out:
spin_unlock(&pfdev->as_lock); spin_unlock(&pfdev->as_lock);
return node; return bo;
} }
#define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE) #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
...@@ -415,29 +427,28 @@ static struct drm_mm_node *addr_to_drm_mm_node(struct panfrost_device *pfdev, in ...@@ -415,29 +427,28 @@ static struct drm_mm_node *addr_to_drm_mm_node(struct panfrost_device *pfdev, in
int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
{ {
int ret, i; int ret, i;
struct drm_mm_node *node;
struct panfrost_gem_object *bo; struct panfrost_gem_object *bo;
struct address_space *mapping; struct address_space *mapping;
pgoff_t page_offset; pgoff_t page_offset;
struct sg_table *sgt; struct sg_table *sgt;
struct page **pages; struct page **pages;
node = addr_to_drm_mm_node(pfdev, as, addr); bo = addr_to_drm_mm_node(pfdev, as, addr);
if (!node) if (!bo)
return -ENOENT; return -ENOENT;
bo = drm_mm_node_to_panfrost_bo(node);
if (!bo->is_heap) { if (!bo->is_heap) {
dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)", dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
node->start << PAGE_SHIFT); bo->node.start << PAGE_SHIFT);
return -EINVAL; ret = -EINVAL;
goto err_bo;
} }
WARN_ON(bo->mmu->as != as); WARN_ON(bo->mmu->as != as);
/* Assume 2MB alignment and size multiple */ /* Assume 2MB alignment and size multiple */
addr &= ~((u64)SZ_2M - 1); addr &= ~((u64)SZ_2M - 1);
page_offset = addr >> PAGE_SHIFT; page_offset = addr >> PAGE_SHIFT;
page_offset -= node->start; page_offset -= bo->node.start;
mutex_lock(&bo->base.pages_lock); mutex_lock(&bo->base.pages_lock);
...@@ -446,7 +457,8 @@ int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) ...@@ -446,7 +457,8 @@ int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO); sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
if (!bo->sgts) { if (!bo->sgts) {
mutex_unlock(&bo->base.pages_lock); mutex_unlock(&bo->base.pages_lock);
return -ENOMEM; ret = -ENOMEM;
goto err_bo;
} }
pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT, pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
...@@ -455,7 +467,8 @@ int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) ...@@ -455,7 +467,8 @@ int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
kfree(bo->sgts); kfree(bo->sgts);
bo->sgts = NULL; bo->sgts = NULL;
mutex_unlock(&bo->base.pages_lock); mutex_unlock(&bo->base.pages_lock);
return -ENOMEM; ret = -ENOMEM;
goto err_bo;
} }
bo->base.pages = pages; bo->base.pages = pages;
bo->base.pages_use_count = 1; bo->base.pages_use_count = 1;
...@@ -493,12 +506,16 @@ int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) ...@@ -493,12 +506,16 @@ int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr)
dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
drm_gem_object_put_unlocked(&bo->base.base);
return 0; return 0;
err_map: err_map:
sg_free_table(sgt); sg_free_table(sgt);
err_pages: err_pages:
drm_gem_shmem_put_pages(&bo->base); drm_gem_shmem_put_pages(&bo->base);
err_bo:
drm_gem_object_put_unlocked(&bo->base.base);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment