Commit 52a42cec authored by Chris Wilson's avatar Chris Wilson

drm/i915/cmdparser: Accelerate copies from WC memory

If we need to use clflush to prepare our batch for reads from memory, we
can bypass the cache instead by using non-temporal copies.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.william.auld@gmail.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160818161718.27187-39-chris@chris-wilson.co.uk
parent 76ff480e
...@@ -965,8 +965,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, ...@@ -965,8 +965,7 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
{ {
unsigned int src_needs_clflush; unsigned int src_needs_clflush;
unsigned int dst_needs_clflush; unsigned int dst_needs_clflush;
void *dst, *ptr; void *dst, *src;
int offset, n;
int ret; int ret;
ret = i915_gem_obj_prepare_shmem_read(src_obj, &src_needs_clflush); ret = i915_gem_obj_prepare_shmem_read(src_obj, &src_needs_clflush);
...@@ -983,31 +982,48 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, ...@@ -983,31 +982,48 @@ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj,
if (IS_ERR(dst)) if (IS_ERR(dst))
goto unpin_dst; goto unpin_dst;
ptr = dst; src = ERR_PTR(-ENODEV);
offset = offset_in_page(batch_start_offset); if (src_needs_clflush &&
i915_memcpy_from_wc((void *)(uintptr_t)batch_start_offset, 0, 0)) {
/* We can avoid clflushing partial cachelines before the write if we src = i915_gem_object_pin_map(src_obj, I915_MAP_WC);
* only every write full cache-lines. Since we know that both the if (!IS_ERR(src)) {
* source and destination are in multiples of PAGE_SIZE, we can simply i915_memcpy_from_wc(dst,
* round up to the next cacheline. We don't care about copying too much src + batch_start_offset,
* here as we only validate up to the end of the batch. ALIGN(batch_len, 16));
*/ i915_gem_object_unpin_map(src_obj);
if (dst_needs_clflush & CLFLUSH_BEFORE) }
batch_len = roundup(batch_len, boot_cpu_data.x86_clflush_size); }
if (IS_ERR(src)) {
for (n = batch_start_offset >> PAGE_SHIFT; batch_len; n++) { void *ptr;
int len = min_t(int, batch_len, PAGE_SIZE - offset); int offset, n;
void *vaddr;
offset = offset_in_page(batch_start_offset);
vaddr = kmap_atomic(i915_gem_object_get_page(src_obj, n));
if (src_needs_clflush) /* We can avoid clflushing partial cachelines before the write
drm_clflush_virt_range(vaddr + offset, len); * if we only every write full cache-lines. Since we know that
memcpy(ptr, vaddr + offset, len); * both the source and destination are in multiples of
kunmap_atomic(vaddr); * PAGE_SIZE, we can simply round up to the next cacheline.
* We don't care about copying too much here as we only
ptr += len; * validate up to the end of the batch.
batch_len -= len; */
offset = 0; if (dst_needs_clflush & CLFLUSH_BEFORE)
batch_len = roundup(batch_len,
boot_cpu_data.x86_clflush_size);
ptr = dst;
for (n = batch_start_offset >> PAGE_SHIFT; batch_len; n++) {
int len = min_t(int, batch_len, PAGE_SIZE - offset);
src = kmap_atomic(i915_gem_object_get_page(src_obj, n));
if (src_needs_clflush)
drm_clflush_virt_range(src + offset, len);
memcpy(ptr, src + offset, len);
kunmap_atomic(src);
ptr += len;
batch_len -= len;
offset = 0;
}
} }
/* dst_obj is returned with vmap pinned */ /* dst_obj is returned with vmap pinned */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment