Commit 3e977ac6 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Prevent writing into a read-only object via a GGTT mmap

If the user has created a read-only object, they should not be allowed
to circumvent the write protection by using a GGTT mmapping. Deny it.

Also most machines do not support read-only GGTT PTEs, so again we have
to reject attempted writes. Fortunately, this is known a priori, so we
can at least reject in the call to create the mmap (with a sanity check
in the fault handler).

v2: Check the vma->vm_flags during mmap() to allow readonly access.
v3: Remove VM_MAYWRITE to curtail mprotect()

Testcase: igt/gem_userptr_blits/readonly_mmap*
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Jon Bloomfield <jon.bloomfield@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: David Herrmann <dh.herrmann@gmail.com>
Reviewed-by: Matthew Auld <matthew.william.auld@gmail.com> #v1
Reviewed-by: default avatarJon Bloomfield <jon.bloomfield@intel.com>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180712185315.3288-4-chris@chris-wilson.co.uk
parent c9e66688
......@@ -1036,6 +1036,15 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return -EACCES;
}
if (node->readonly) {
if (vma->vm_flags & VM_WRITE) {
drm_gem_object_put_unlocked(obj);
return -EINVAL;
}
vma->vm_flags &= ~VM_MAYWRITE;
}
ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
vma);
......
......@@ -2012,6 +2012,10 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf)
pgoff_t page_offset;
int ret;
/* Sanity check that we allow writing into this object */
if (i915_gem_object_is_readonly(obj) && write)
return VM_FAULT_SIGBUS;
/* We don't use vmf->pgoff since that has the fake offset */
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
......
......@@ -206,7 +206,7 @@ static int ppgtt_bind_vma(struct i915_vma *vma,
/* Applicable to VLV, and gen8+ */
pte_flags = 0;
if (vma->obj->gt_ro)
if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
......@@ -2491,8 +2491,10 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
dma_addr_t addr;
/* The GTT does not support read-only mappings */
GEM_BUG_ON(flags & PTE_READ_ONLY);
/*
* Note that we ignore PTE_READ_ONLY here. The caller must be careful
* not to allow the user to override access to a read only page.
*/
gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
gtt_entries += vma->node.start >> PAGE_SHIFT;
......@@ -2731,7 +2733,7 @@ static int ggtt_bind_vma(struct i915_vma *vma,
/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
pte_flags = 0;
if (obj->gt_ro)
if (i915_gem_object_is_readonly(obj))
pte_flags |= PTE_READ_ONLY;
intel_runtime_pm_get(i915);
......@@ -2769,7 +2771,7 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma,
/* Currently applicable only to VLV */
pte_flags = 0;
if (vma->obj->gt_ro)
if (i915_gem_object_is_readonly(vma->obj))
pte_flags |= PTE_READ_ONLY;
if (flags & I915_VMA_LOCAL_BIND) {
......
......@@ -141,7 +141,6 @@ struct drm_i915_gem_object {
* Is the object to be mapped as read-only to the GPU
* Only honoured if hardware has relevant pte bit
*/
unsigned long gt_ro:1;
unsigned int cache_level:3;
unsigned int cache_coherent:2;
#define I915_BO_CACHE_COHERENT_FOR_READ BIT(0)
......@@ -358,6 +357,18 @@ static inline void i915_gem_object_unlock(struct drm_i915_gem_object *obj)
reservation_object_unlock(obj->resv);
}
static inline void
i915_gem_object_set_readonly(struct drm_i915_gem_object *obj)
{
obj->base.vma_node.readonly = true;
}
static inline bool
i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj)
{
return obj->base.vma_node.readonly;
}
static inline bool
i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj)
{
......
......@@ -1100,7 +1100,7 @@ intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
* if supported by the platform's GGTT.
*/
if (vm->has_read_only)
obj->gt_ro = 1;
i915_gem_object_set_readonly(obj);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma))
......
......@@ -487,7 +487,8 @@ static int igt_ctx_readonly(void *arg)
goto out_unlock;
}
obj->gt_ro = prandom_u32_state(&prng);
if (prandom_u32_state(&prng) & 1)
i915_gem_object_set_readonly(obj);
}
intel_runtime_pm_get(i915);
......@@ -518,7 +519,7 @@ static int igt_ctx_readonly(void *arg)
unsigned int num_writes;
num_writes = rem;
if (obj->gt_ro)
if (i915_gem_object_is_readonly(obj))
num_writes = 0;
err = cpu_check(obj, num_writes);
......
......@@ -41,6 +41,7 @@ struct drm_vma_offset_node {
rwlock_t vm_lock;
struct drm_mm_node vm_node;
struct rb_root vm_files;
bool readonly:1;
};
struct drm_vma_offset_manager {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment