Commit a4d86249 authored by Chris Wilson's avatar Chris Wilson

drm/i915/gt: Provide a utility to create a scratch buffer

Primarily used by selftests, but also by runtime debugging of engine
w/a, is a routine to create a temporarily bound buffer for readback.
Almagamate the duplicated routines into one.
Suggested-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201219020343.22681-2-chris@chris-wilson.co.uk
parent a0d3fdb6
...@@ -422,6 +422,35 @@ void setup_private_pat(struct intel_uncore *uncore) ...@@ -422,6 +422,35 @@ void setup_private_pat(struct intel_uncore *uncore)
bdw_setup_private_ppat(uncore); bdw_setup_private_ppat(uncore);
} }
struct i915_vma *
__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int err;
obj = i915_gem_object_create_internal(vm->i915, PAGE_ALIGN(size));
if (IS_ERR(obj))
return ERR_CAST(obj);
i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
return vma;
}
err = i915_vma_pin(vma, 0, 0,
i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
if (err) {
i915_vma_put(vma);
return ERR_PTR(err);
}
return vma;
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_gtt.c" #include "selftests/mock_gtt.c"
#endif #endif
...@@ -573,6 +573,9 @@ int i915_vm_pin_pt_stash(struct i915_address_space *vm, ...@@ -573,6 +573,9 @@ int i915_vm_pin_pt_stash(struct i915_address_space *vm,
void i915_vm_free_pt_stash(struct i915_address_space *vm, void i915_vm_free_pt_stash(struct i915_address_space *vm,
struct i915_vm_pt_stash *stash); struct i915_vm_pt_stash *stash);
struct i915_vma *
__vm_create_scratch_for_read(struct i915_address_space *vm, unsigned long size);
static inline struct sgt_dma { static inline struct sgt_dma {
struct scatterlist *sg; struct scatterlist *sg;
dma_addr_t dma, max; dma_addr_t dma, max;
......
...@@ -2086,39 +2086,6 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine) ...@@ -2086,39 +2086,6 @@ void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
wa_list_apply(engine->uncore, &engine->wa_list); wa_list_apply(engine->uncore, &engine->wa_list);
} }
static struct i915_vma *
create_scratch(struct i915_address_space *vm, int count)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
unsigned int size;
int err;
size = round_up(count * sizeof(u32), PAGE_SIZE);
obj = i915_gem_object_create_internal(vm->i915, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);
vma = i915_vma_instance(obj, vm, NULL);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
goto err_obj;
}
err = i915_vma_pin(vma, 0, 0,
i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
if (err)
goto err_obj;
return vma;
err_obj:
i915_gem_object_put(obj);
return ERR_PTR(err);
}
struct mcr_range { struct mcr_range {
u32 start; u32 start;
u32 end; u32 end;
...@@ -2221,7 +2188,8 @@ static int engine_wa_list_verify(struct intel_context *ce, ...@@ -2221,7 +2188,8 @@ static int engine_wa_list_verify(struct intel_context *ce,
if (!wal->count) if (!wal->count)
return 0; return 0;
vma = create_scratch(&ce->engine->gt->ggtt->vm, wal->count); vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
wal->count * sizeof(u32));
if (IS_ERR(vma)) if (IS_ERR(vma))
return PTR_ERR(vma); return PTR_ERR(vma);
......
...@@ -25,33 +25,6 @@ ...@@ -25,33 +25,6 @@
#define NUM_GPR 16 #define NUM_GPR 16
#define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */ #define NUM_GPR_DW (NUM_GPR * 2) /* each GPR is 2 dwords */
static struct i915_vma *create_scratch(struct intel_gt *gt)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int err;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj))
return ERR_CAST(obj);
i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
return vma;
}
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
if (err) {
i915_gem_object_put(obj);
return ERR_PTR(err);
}
return vma;
}
static bool is_active(struct i915_request *rq) static bool is_active(struct i915_request *rq)
{ {
if (i915_request_is_active(rq)) if (i915_request_is_active(rq))
...@@ -4167,7 +4140,8 @@ static int preserved_virtual_engine(struct intel_gt *gt, ...@@ -4167,7 +4140,8 @@ static int preserved_virtual_engine(struct intel_gt *gt,
int err = 0; int err = 0;
u32 *cs; u32 *cs;
scratch = create_scratch(siblings[0]->gt); scratch = __vm_create_scratch_for_read(&siblings[0]->gt->ggtt->vm,
PAGE_SIZE);
if (IS_ERR(scratch)) if (IS_ERR(scratch))
return PTR_ERR(scratch); return PTR_ERR(scratch);
......
...@@ -27,29 +27,7 @@ ...@@ -27,29 +27,7 @@
static struct i915_vma *create_scratch(struct intel_gt *gt) static struct i915_vma *create_scratch(struct intel_gt *gt)
{ {
struct drm_i915_gem_object *obj; return __vm_create_scratch_for_read(&gt->ggtt->vm, PAGE_SIZE);
struct i915_vma *vma;
int err;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj))
return ERR_CAST(obj);
i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
return vma;
}
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
if (err) {
i915_gem_object_put(obj);
return ERR_PTR(err);
}
return vma;
} }
static bool is_active(struct i915_request *rq) static bool is_active(struct i915_request *rq)
......
...@@ -57,33 +57,6 @@ static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) ...@@ -57,33 +57,6 @@ static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin)
return err; return err;
} }
static struct i915_vma *create_scratch(struct intel_gt *gt)
{
struct drm_i915_gem_object *obj;
struct i915_vma *vma;
int err;
obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE);
if (IS_ERR(obj))
return ERR_CAST(obj);
i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED);
vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
if (IS_ERR(vma)) {
i915_gem_object_put(obj);
return vma;
}
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
if (err) {
i915_gem_object_put(obj);
return ERR_PTR(err);
}
return vma;
}
static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt) static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
{ {
struct drm_i915_mocs_table table; struct drm_i915_mocs_table table;
...@@ -102,7 +75,7 @@ static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt) ...@@ -102,7 +75,7 @@ static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS)) if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
arg->mocs = table; arg->mocs = table;
arg->scratch = create_scratch(gt); arg->scratch = __vm_create_scratch_for_read(&gt->ggtt->vm, PAGE_SIZE);
if (IS_ERR(arg->scratch)) if (IS_ERR(arg->scratch))
return PTR_ERR(arg->scratch); return PTR_ERR(arg->scratch);
......
...@@ -486,10 +486,11 @@ static int check_dirty_whitelist(struct intel_context *ce) ...@@ -486,10 +486,11 @@ static int check_dirty_whitelist(struct intel_context *ce)
struct intel_engine_cs *engine = ce->engine; struct intel_engine_cs *engine = ce->engine;
struct i915_vma *scratch; struct i915_vma *scratch;
struct i915_vma *batch; struct i915_vma *batch;
int err = 0, i, v; int err = 0, i, v, sz;
u32 *cs, *results; u32 *cs, *results;
scratch = create_scratch(ce->vm, 2 * ARRAY_SIZE(values) + 1); sz = (2 * ARRAY_SIZE(values) + 1) * sizeof(u32);
scratch = __vm_create_scratch_for_read(ce->vm, sz);
if (IS_ERR(scratch)) if (IS_ERR(scratch))
return PTR_ERR(scratch); return PTR_ERR(scratch);
...@@ -1028,13 +1029,15 @@ static int live_isolated_whitelist(void *arg) ...@@ -1028,13 +1029,15 @@ static int live_isolated_whitelist(void *arg)
return 0; return 0;
for (i = 0; i < ARRAY_SIZE(client); i++) { for (i = 0; i < ARRAY_SIZE(client); i++) {
client[i].scratch[0] = create_scratch(gt->vm, 1024); client[i].scratch[0] =
__vm_create_scratch_for_read(gt->vm, 4096);
if (IS_ERR(client[i].scratch[0])) { if (IS_ERR(client[i].scratch[0])) {
err = PTR_ERR(client[i].scratch[0]); err = PTR_ERR(client[i].scratch[0]);
goto err; goto err;
} }
client[i].scratch[1] = create_scratch(gt->vm, 1024); client[i].scratch[1] =
__vm_create_scratch_for_read(gt->vm, 4096);
if (IS_ERR(client[i].scratch[1])) { if (IS_ERR(client[i].scratch[1])) {
err = PTR_ERR(client[i].scratch[1]); err = PTR_ERR(client[i].scratch[1]);
i915_vma_unpin_and_release(&client[i].scratch[0], 0); i915_vma_unpin_and_release(&client[i].scratch[0], 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment