Commit e79a7417 authored by Matthew Auld's avatar Matthew Auld Committed by Rodrigo Vivi

drm/i915/ttm: fix CCS handling

Crucible + recent Mesa seems to sometimes hit:

GEM_BUG_ON(num_ccs_blks > NUM_CCS_BLKS_PER_XFER)

And it looks like we can also trigger this with gem_lmem_swapping, if we
modify the test to use slightly larger object sizes.

Looking closer it looks like we have the following issues in
migrate_copy():

  - We are using plain integer in various places, which we can easily
    overflow with a large object.

  - We pass the entire object size (when the src is lmem) into
    emit_pte() and then try to copy it, which doesn't work, since we
    only have a few fixed sized windows in which to map the pages and
    perform the copy. With an object > 8M we therefore aren't properly
    copying the pages. And then with an object > 64M we trigger the
    GEM_BUG_ON(num_ccs_blks > NUM_CCS_BLKS_PER_XFER).

So it looks like our copy handling for any object > 8M (which is our
CHUNK_SZ) is currently broken on DG2.

Fixes: da0595ae ("drm/i915/migrate: Evict and restore the flatccs capable lmem obj")
Testcase: igt@gem_lmem_swapping
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Ramalingam C <ramalingam.c@intel.com>
Reviewed-by: Ramalingam C<ramalingam.c@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20220805132240.442747-2-matthew.auld@intel.com
(cherry picked from commit 8676145e)
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 58091b49
...@@ -638,9 +638,9 @@ static int emit_copy(struct i915_request *rq, ...@@ -638,9 +638,9 @@ static int emit_copy(struct i915_request *rq,
return 0; return 0;
} }
static int scatter_list_length(struct scatterlist *sg) static u64 scatter_list_length(struct scatterlist *sg)
{ {
int len = 0; u64 len = 0;
while (sg && sg_dma_len(sg)) { while (sg && sg_dma_len(sg)) {
len += sg_dma_len(sg); len += sg_dma_len(sg);
...@@ -650,28 +650,26 @@ static int scatter_list_length(struct scatterlist *sg) ...@@ -650,28 +650,26 @@ static int scatter_list_length(struct scatterlist *sg)
return len; return len;
} }
static void static int
calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem, calculate_chunk_sz(struct drm_i915_private *i915, bool src_is_lmem,
int *src_sz, u32 bytes_to_cpy, u32 ccs_bytes_to_cpy) u64 bytes_to_cpy, u64 ccs_bytes_to_cpy)
{ {
if (ccs_bytes_to_cpy) { if (ccs_bytes_to_cpy && !src_is_lmem)
if (!src_is_lmem) /*
/* * When CHUNK_SZ is passed all the pages upto CHUNK_SZ
* When CHUNK_SZ is passed all the pages upto CHUNK_SZ * will be taken for the blt. in Flat-ccs supported
* will be taken for the blt. in Flat-ccs supported * platform Smem obj will have more pages than required
* platform Smem obj will have more pages than required * for main meory hence limit it to the required size
* for main meory hence limit it to the required size * for main memory
* for main memory */
*/ return min_t(u64, bytes_to_cpy, CHUNK_SZ);
*src_sz = min_t(int, bytes_to_cpy, CHUNK_SZ); else
} else { /* ccs handling is not required */ return CHUNK_SZ;
*src_sz = CHUNK_SZ;
}
} }
static void get_ccs_sg_sgt(struct sgt_dma *it, u32 bytes_to_cpy) static void get_ccs_sg_sgt(struct sgt_dma *it, u64 bytes_to_cpy)
{ {
u32 len; u64 len;
do { do {
GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg)); GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg));
...@@ -702,12 +700,12 @@ intel_context_migrate_copy(struct intel_context *ce, ...@@ -702,12 +700,12 @@ intel_context_migrate_copy(struct intel_context *ce,
{ {
struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs; struct sgt_dma it_src = sg_sgt(src), it_dst = sg_sgt(dst), it_ccs;
struct drm_i915_private *i915 = ce->engine->i915; struct drm_i915_private *i915 = ce->engine->i915;
u32 ccs_bytes_to_cpy = 0, bytes_to_cpy; u64 ccs_bytes_to_cpy = 0, bytes_to_cpy;
enum i915_cache_level ccs_cache_level; enum i915_cache_level ccs_cache_level;
u32 src_offset, dst_offset; u32 src_offset, dst_offset;
u8 src_access, dst_access; u8 src_access, dst_access;
struct i915_request *rq; struct i915_request *rq;
int src_sz, dst_sz; u64 src_sz, dst_sz;
bool ccs_is_src, overwrite_ccs; bool ccs_is_src, overwrite_ccs;
int err; int err;
...@@ -790,8 +788,8 @@ intel_context_migrate_copy(struct intel_context *ce, ...@@ -790,8 +788,8 @@ intel_context_migrate_copy(struct intel_context *ce,
if (err) if (err)
goto out_rq; goto out_rq;
calculate_chunk_sz(i915, src_is_lmem, &src_sz, src_sz = calculate_chunk_sz(i915, src_is_lmem,
bytes_to_cpy, ccs_bytes_to_cpy); bytes_to_cpy, ccs_bytes_to_cpy);
len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem, len = emit_pte(rq, &it_src, src_cache_level, src_is_lmem,
src_offset, src_sz); src_offset, src_sz);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment