Commit 480ae795 authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Daniel Vetter

drm/i915/selftests: Prepare gtt tests for obj->mm.lock removal

We need to lock the global gtt dma_resv, use i915_vm_lock_objects
to handle this correctly. Add ww handling for this where required.

Add the object lock around unpin/put pages, and use the unlocked
versions of pin_pages and pin_map where required.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-61-maarten.lankhorst@linux.intel.com
parent b91e1b11
...@@ -130,7 +130,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size) ...@@ -130,7 +130,7 @@ fake_dma_object(struct drm_i915_private *i915, u64 size)
obj->cache_level = I915_CACHE_NONE; obj->cache_level = I915_CACHE_NONE;
/* Preallocate the "backing storage" */ /* Preallocate the "backing storage" */
if (i915_gem_object_pin_pages(obj)) if (i915_gem_object_pin_pages_unlocked(obj))
goto err_obj; goto err_obj;
i915_gem_object_unpin_pages(obj); i915_gem_object_unpin_pages(obj);
...@@ -146,6 +146,7 @@ static int igt_ppgtt_alloc(void *arg) ...@@ -146,6 +146,7 @@ static int igt_ppgtt_alloc(void *arg)
{ {
struct drm_i915_private *dev_priv = arg; struct drm_i915_private *dev_priv = arg;
struct i915_ppgtt *ppgtt; struct i915_ppgtt *ppgtt;
struct i915_gem_ww_ctx ww;
u64 size, last, limit; u64 size, last, limit;
int err = 0; int err = 0;
...@@ -171,6 +172,12 @@ static int igt_ppgtt_alloc(void *arg) ...@@ -171,6 +172,12 @@ static int igt_ppgtt_alloc(void *arg)
limit = totalram_pages() << PAGE_SHIFT; limit = totalram_pages() << PAGE_SHIFT;
limit = min(ppgtt->vm.total, limit); limit = min(ppgtt->vm.total, limit);
i915_gem_ww_ctx_init(&ww, false);
retry:
err = i915_vm_lock_objects(&ppgtt->vm, &ww);
if (err)
goto err_ppgtt_cleanup;
/* Check we can allocate the entire range */ /* Check we can allocate the entire range */
for (size = 4096; size <= limit; size <<= 2) { for (size = 4096; size <= limit; size <<= 2) {
struct i915_vm_pt_stash stash = {}; struct i915_vm_pt_stash stash = {};
...@@ -215,6 +222,13 @@ static int igt_ppgtt_alloc(void *arg) ...@@ -215,6 +222,13 @@ static int igt_ppgtt_alloc(void *arg)
} }
err_ppgtt_cleanup: err_ppgtt_cleanup:
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
i915_vm_put(&ppgtt->vm); i915_vm_put(&ppgtt->vm);
return err; return err;
} }
...@@ -276,7 +290,7 @@ static int lowlevel_hole(struct i915_address_space *vm, ...@@ -276,7 +290,7 @@ static int lowlevel_hole(struct i915_address_space *vm,
GEM_BUG_ON(obj->base.size != BIT_ULL(size)); GEM_BUG_ON(obj->base.size != BIT_ULL(size));
if (i915_gem_object_pin_pages(obj)) { if (i915_gem_object_pin_pages_unlocked(obj)) {
i915_gem_object_put(obj); i915_gem_object_put(obj);
kfree(order); kfree(order);
break; break;
...@@ -297,20 +311,36 @@ static int lowlevel_hole(struct i915_address_space *vm, ...@@ -297,20 +311,36 @@ static int lowlevel_hole(struct i915_address_space *vm,
if (vm->allocate_va_range) { if (vm->allocate_va_range) {
struct i915_vm_pt_stash stash = {}; struct i915_vm_pt_stash stash = {};
struct i915_gem_ww_ctx ww;
int err;
i915_gem_ww_ctx_init(&ww, false);
retry:
err = i915_vm_lock_objects(vm, &ww);
if (err)
goto alloc_vm_end;
err = -ENOMEM;
if (i915_vm_alloc_pt_stash(vm, &stash, if (i915_vm_alloc_pt_stash(vm, &stash,
BIT_ULL(size))) BIT_ULL(size)))
break; goto alloc_vm_end;
if (i915_vm_pin_pt_stash(vm, &stash)) {
i915_vm_free_pt_stash(vm, &stash);
break;
}
vm->allocate_va_range(vm, &stash, err = i915_vm_pin_pt_stash(vm, &stash);
addr, BIT_ULL(size)); if (!err)
vm->allocate_va_range(vm, &stash,
addr, BIT_ULL(size));
i915_vm_free_pt_stash(vm, &stash); i915_vm_free_pt_stash(vm, &stash);
alloc_vm_end:
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
if (err)
break;
} }
mock_vma->pages = obj->mm.pages; mock_vma->pages = obj->mm.pages;
...@@ -1166,7 +1196,7 @@ static int igt_ggtt_page(void *arg) ...@@ -1166,7 +1196,7 @@ static int igt_ggtt_page(void *arg)
if (IS_ERR(obj)) if (IS_ERR(obj))
return PTR_ERR(obj); return PTR_ERR(obj);
err = i915_gem_object_pin_pages(obj); err = i915_gem_object_pin_pages_unlocked(obj);
if (err) if (err)
goto out_free; goto out_free;
...@@ -1333,7 +1363,7 @@ static int igt_gtt_reserve(void *arg) ...@@ -1333,7 +1363,7 @@ static int igt_gtt_reserve(void *arg)
goto out; goto out;
} }
err = i915_gem_object_pin_pages(obj); err = i915_gem_object_pin_pages_unlocked(obj);
if (err) { if (err) {
i915_gem_object_put(obj); i915_gem_object_put(obj);
goto out; goto out;
...@@ -1385,7 +1415,7 @@ static int igt_gtt_reserve(void *arg) ...@@ -1385,7 +1415,7 @@ static int igt_gtt_reserve(void *arg)
goto out; goto out;
} }
err = i915_gem_object_pin_pages(obj); err = i915_gem_object_pin_pages_unlocked(obj);
if (err) { if (err) {
i915_gem_object_put(obj); i915_gem_object_put(obj);
goto out; goto out;
...@@ -1549,7 +1579,7 @@ static int igt_gtt_insert(void *arg) ...@@ -1549,7 +1579,7 @@ static int igt_gtt_insert(void *arg)
goto out; goto out;
} }
err = i915_gem_object_pin_pages(obj); err = i915_gem_object_pin_pages_unlocked(obj);
if (err) { if (err) {
i915_gem_object_put(obj); i915_gem_object_put(obj);
goto out; goto out;
...@@ -1658,7 +1688,7 @@ static int igt_gtt_insert(void *arg) ...@@ -1658,7 +1688,7 @@ static int igt_gtt_insert(void *arg)
goto out; goto out;
} }
err = i915_gem_object_pin_pages(obj); err = i915_gem_object_pin_pages_unlocked(obj);
if (err) { if (err) {
i915_gem_object_put(obj); i915_gem_object_put(obj);
goto out; goto out;
...@@ -1829,7 +1859,7 @@ static int igt_cs_tlb(void *arg) ...@@ -1829,7 +1859,7 @@ static int igt_cs_tlb(void *arg)
goto out_vm; goto out_vm;
} }
batch = i915_gem_object_pin_map(bbe, I915_MAP_WC); batch = i915_gem_object_pin_map_unlocked(bbe, I915_MAP_WC);
if (IS_ERR(batch)) { if (IS_ERR(batch)) {
err = PTR_ERR(batch); err = PTR_ERR(batch);
goto out_put_bbe; goto out_put_bbe;
...@@ -1845,7 +1875,7 @@ static int igt_cs_tlb(void *arg) ...@@ -1845,7 +1875,7 @@ static int igt_cs_tlb(void *arg)
} }
/* Track the execution of each request by writing into different slot */ /* Track the execution of each request by writing into different slot */
batch = i915_gem_object_pin_map(act, I915_MAP_WC); batch = i915_gem_object_pin_map_unlocked(act, I915_MAP_WC);
if (IS_ERR(batch)) { if (IS_ERR(batch)) {
err = PTR_ERR(batch); err = PTR_ERR(batch);
goto out_put_act; goto out_put_act;
...@@ -1892,7 +1922,7 @@ static int igt_cs_tlb(void *arg) ...@@ -1892,7 +1922,7 @@ static int igt_cs_tlb(void *arg)
goto out_put_out; goto out_put_out;
GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE); GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE);
result = i915_gem_object_pin_map(out, I915_MAP_WB); result = i915_gem_object_pin_map_unlocked(out, I915_MAP_WB);
if (IS_ERR(result)) { if (IS_ERR(result)) {
err = PTR_ERR(result); err = PTR_ERR(result);
goto out_put_out; goto out_put_out;
...@@ -1908,6 +1938,7 @@ static int igt_cs_tlb(void *arg) ...@@ -1908,6 +1938,7 @@ static int igt_cs_tlb(void *arg)
while (!__igt_timeout(end_time, NULL)) { while (!__igt_timeout(end_time, NULL)) {
struct i915_vm_pt_stash stash = {}; struct i915_vm_pt_stash stash = {};
struct i915_request *rq; struct i915_request *rq;
struct i915_gem_ww_ctx ww;
u64 offset; u64 offset;
offset = igt_random_offset(&prng, offset = igt_random_offset(&prng,
...@@ -1926,19 +1957,30 @@ static int igt_cs_tlb(void *arg) ...@@ -1926,19 +1957,30 @@ static int igt_cs_tlb(void *arg)
if (err) if (err)
goto end; goto end;
i915_gem_ww_ctx_init(&ww, false);
retry:
err = i915_vm_lock_objects(vm, &ww);
if (err)
goto end_ww;
err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size); err = i915_vm_alloc_pt_stash(vm, &stash, chunk_size);
if (err) if (err)
goto end; goto end_ww;
err = i915_vm_pin_pt_stash(vm, &stash); err = i915_vm_pin_pt_stash(vm, &stash);
if (err) { if (!err)
i915_vm_free_pt_stash(vm, &stash); vm->allocate_va_range(vm, &stash, offset, chunk_size);
goto end;
}
vm->allocate_va_range(vm, &stash, offset, chunk_size);
i915_vm_free_pt_stash(vm, &stash); i915_vm_free_pt_stash(vm, &stash);
end_ww:
if (err == -EDEADLK) {
err = i915_gem_ww_ctx_backoff(&ww);
if (!err)
goto retry;
}
i915_gem_ww_ctx_fini(&ww);
if (err)
goto end;
/* Prime the TLB with the dummy pages */ /* Prime the TLB with the dummy pages */
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment