Commit 8eb7fcce authored by Matthew Auld's avatar Matthew Auld

drm/i915/migrate: don't check the scratch page

The scratch page might not be allocated in LMEM(like on DG2), so instead
of using that as the deciding factor for where the paging structures
live, let's just query the pt before mapping it.
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Ramalingam C <ramalingam.c@intel.com>
Reviewed-by: default avatarRamalingam C <ramalingam.c@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20211206112539.3149779-1-matthew.auld@intel.com
parent 491fe469
...@@ -13,7 +13,6 @@ ...@@ -13,7 +13,6 @@
struct insert_pte_data { struct insert_pte_data {
u64 offset; u64 offset;
bool is_lmem;
}; };
#define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */ #define CHUNK_SZ SZ_8M /* ~1ms at 8GiB/s preemption delay */
...@@ -40,7 +39,7 @@ static void insert_pte(struct i915_address_space *vm, ...@@ -40,7 +39,7 @@ static void insert_pte(struct i915_address_space *vm,
struct insert_pte_data *d = data; struct insert_pte_data *d = data;
vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE, vm->insert_page(vm, px_dma(pt), d->offset, I915_CACHE_NONE,
d->is_lmem ? PTE_LM : 0); i915_gem_object_is_lmem(pt->base) ? PTE_LM : 0);
d->offset += PAGE_SIZE; d->offset += PAGE_SIZE;
} }
...@@ -134,7 +133,6 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt) ...@@ -134,7 +133,6 @@ static struct i915_address_space *migrate_vm(struct intel_gt *gt)
goto err_vm; goto err_vm;
/* Now allow the GPU to rewrite the PTE via its own ppGTT */ /* Now allow the GPU to rewrite the PTE via its own ppGTT */
d.is_lmem = i915_gem_object_is_lmem(vm->vm.scratch[0]);
vm->vm.foreach(&vm->vm, base, base + sz, insert_pte, &d); vm->vm.foreach(&vm->vm, base, base + sz, insert_pte, &d);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment