Commit ee8efa80 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Check domains for userptr on release

When we return pages to the system, we release control over them and
should defensively return them to the CPU write domain so that we catch
any external writes on reacquiring them (e.g. to transparently
swapout/swapin). While we did this defensive clflushing for ordinary
shmem pages, it was forgotten for userptr. Fortunately, userptr objects
are normally cache coherent and so oblivious to the forgotten domain
tracking.

References: a679f58d ("drm/i915: Flush pages on acquisition")
References: 754a2544 ("drm/i915: Skip object locking around a no-op set-domain ioctl")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Matthew Auld <matthew.william.auld@gmail.com>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.william.auld@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190331094620.15185-1-chris@chris-wilson.co.uk
parent cde5f7ed
...@@ -308,7 +308,7 @@ static void __start_cpu_write(struct drm_i915_gem_object *obj) ...@@ -308,7 +308,7 @@ static void __start_cpu_write(struct drm_i915_gem_object *obj)
obj->cache_dirty = true; obj->cache_dirty = true;
} }
static void void
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
struct sg_table *pages, struct sg_table *pages,
bool needs_clflush) bool needs_clflush)
...@@ -2202,7 +2202,6 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj, ...@@ -2202,7 +2202,6 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
struct page *page; struct page *page;
__i915_gem_object_release_shmem(obj, pages, true); __i915_gem_object_release_shmem(obj, pages, true);
i915_gem_gtt_finish_pages(obj, pages); i915_gem_gtt_finish_pages(obj, pages);
if (i915_gem_object_needs_bit17_swizzle(obj)) if (i915_gem_object_needs_bit17_swizzle(obj))
......
...@@ -502,4 +502,8 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj, ...@@ -502,4 +502,8 @@ void i915_gem_object_set_cache_coherency(struct drm_i915_gem_object *obj,
unsigned int cache_level); unsigned int cache_level);
void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj); void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj);
void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
struct sg_table *pages,
bool needs_clflush);
#endif #endif
...@@ -673,9 +673,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, ...@@ -673,9 +673,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
if (!pages) if (!pages)
return; return;
if (obj->mm.madv != I915_MADV_WILLNEED) __i915_gem_object_release_shmem(obj, pages, true);
obj->mm.dirty = false;
i915_gem_gtt_finish_pages(obj, pages); i915_gem_gtt_finish_pages(obj, pages);
for_each_sgt_page(page, sgt_iter, pages) { for_each_sgt_page(page, sgt_iter, pages) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment