Commit eeb52ee6 authored by Matthew Auld's avatar Matthew Auld Committed by Chris Wilson

drm/i915: clear the shadow batch

The shadow batch is an internal object, which doesn't have any page
clearing, and since the batch_len can be smaller than the object, we
should take care to clear it.

Testcase: igt/gen9_exec_parse/shadow-peek
Fixes: 4f7af194 ("drm/i915: Support ro ppgtt mapped cmdparser shadow buffers")
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20201224151358.401345-1-matthew.auld@intel.com
Cc: stable@vger.kernel.org
parent 177b7a52
...@@ -1167,7 +1167,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, ...@@ -1167,7 +1167,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
} }
} }
if (IS_ERR(src)) { if (IS_ERR(src)) {
unsigned long x, n; unsigned long x, n, remain;
void *ptr; void *ptr;
/* /*
...@@ -1178,14 +1178,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, ...@@ -1178,14 +1178,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
* We don't care about copying too much here as we only * We don't care about copying too much here as we only
* validate up to the end of the batch. * validate up to the end of the batch.
*/ */
remain = length;
if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) if (!(dst_obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
length = round_up(length, remain = round_up(remain,
boot_cpu_data.x86_clflush_size); boot_cpu_data.x86_clflush_size);
ptr = dst; ptr = dst;
x = offset_in_page(offset); x = offset_in_page(offset);
for (n = offset >> PAGE_SHIFT; length; n++) { for (n = offset >> PAGE_SHIFT; remain; n++) {
int len = min(length, PAGE_SIZE - x); int len = min(remain, PAGE_SIZE - x);
src = kmap_atomic(i915_gem_object_get_page(src_obj, n)); src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
if (needs_clflush) if (needs_clflush)
...@@ -1194,13 +1195,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, ...@@ -1194,13 +1195,15 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
kunmap_atomic(src); kunmap_atomic(src);
ptr += len; ptr += len;
length -= len; remain -= len;
x = 0; x = 0;
} }
} }
i915_gem_object_unpin_pages(src_obj); i915_gem_object_unpin_pages(src_obj);
memset32(dst + length, 0, (dst_obj->base.size - length) / sizeof(u32));
/* dst_obj is returned with vmap pinned */ /* dst_obj is returned with vmap pinned */
return dst; return dst;
} }
...@@ -1393,11 +1396,6 @@ static unsigned long *alloc_whitelist(u32 batch_length) ...@@ -1393,11 +1396,6 @@ static unsigned long *alloc_whitelist(u32 batch_length)
#define LENGTH_BIAS 2 #define LENGTH_BIAS 2
static bool shadow_needs_clflush(struct drm_i915_gem_object *obj)
{
return !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE);
}
/** /**
* intel_engine_cmd_parser() - parse a batch buffer for privilege violations * intel_engine_cmd_parser() - parse a batch buffer for privilege violations
* @engine: the engine on which the batch is to execute * @engine: the engine on which the batch is to execute
...@@ -1539,16 +1537,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine, ...@@ -1539,16 +1537,9 @@ int intel_engine_cmd_parser(struct intel_engine_cs *engine,
ret = 0; /* allow execution */ ret = 0; /* allow execution */
} }
} }
if (shadow_needs_clflush(shadow->obj))
drm_clflush_virt_range(batch_end, 8);
} }
if (shadow_needs_clflush(shadow->obj)) { i915_gem_object_flush_map(shadow->obj);
void *ptr = page_mask_bits(shadow->obj->mm.mapping);
drm_clflush_virt_range(ptr, (void *)(cmd + 1) - ptr);
}
if (!IS_ERR_OR_NULL(jump_whitelist)) if (!IS_ERR_OR_NULL(jump_whitelist))
kfree(jump_whitelist); kfree(jump_whitelist);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment