Commit 9e952635 authored by Daniele Ceraolo Spurio's avatar Daniele Ceraolo Spurio Committed by Rodrigo Vivi

drm/xe: standardize vm-less kernel submissions

The current only submission in the driver that doesn't use a vm is the
WA setup. We still pass a vm structure (the migration one), but we don't
actually use it at submission time and we instead have an hack to use
GGTT for this particular engine.
Instead of special-casing the WA engine, we can skip providing a VM and
use that as selector for whether to use GGTT or PPGTT. As part of this
change, we can drop the special engine flag for the WA engine and switch
the WA submission to use the standard job functions instead of dedicated
ones.

v2: rebased on s/engine/exec_queue
Signed-off-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20230822173334.1664332-4-daniele.ceraolospurio@intel.comSigned-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 923e4238
...@@ -73,16 +73,6 @@ __xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr) ...@@ -73,16 +73,6 @@ __xe_bb_create_job(struct xe_exec_queue *q, struct xe_bb *bb, u64 *addr)
return xe_sched_job_create(q, addr); return xe_sched_job_create(q, addr);
} }
struct xe_sched_job *xe_bb_create_wa_job(struct xe_exec_queue *q,
struct xe_bb *bb, u64 batch_base_ofs)
{
u64 addr = batch_base_ofs + drm_suballoc_soffset(bb->bo);
XE_WARN_ON(!(q->vm->flags & XE_VM_FLAG_MIGRATION));
return __xe_bb_create_job(q, bb, &addr);
}
struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q, struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
struct xe_bb *bb, struct xe_bb *bb,
u64 batch_base_ofs, u64 batch_base_ofs,
......
...@@ -20,8 +20,6 @@ struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q, ...@@ -20,8 +20,6 @@ struct xe_sched_job *xe_bb_create_job(struct xe_exec_queue *q,
struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q, struct xe_sched_job *xe_bb_create_migration_job(struct xe_exec_queue *q,
struct xe_bb *bb, u64 batch_ofs, struct xe_bb *bb, u64 batch_ofs,
u32 second_idx); u32 second_idx);
struct xe_sched_job *xe_bb_create_wa_job(struct xe_exec_queue *q,
struct xe_bb *bb, u64 batch_ofs);
void xe_bb_free(struct xe_bb *bb, struct dma_fence *fence); void xe_bb_free(struct xe_bb *bb, struct dma_fence *fence);
#endif #endif
...@@ -95,7 +95,7 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe, ...@@ -95,7 +95,7 @@ static struct xe_exec_queue *__xe_exec_queue_create(struct xe_device *xe,
* can perform GuC CT actions when needed. Caller is expected to * can perform GuC CT actions when needed. Caller is expected to
* have already grabbed the rpm ref outside any sensitive locks. * have already grabbed the rpm ref outside any sensitive locks.
*/ */
if (q->flags & EXEC_QUEUE_FLAG_VM) if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM))
drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe)); drm_WARN_ON(&xe->drm, !xe_device_mem_access_get_if_ongoing(xe));
return q; return q;
...@@ -174,7 +174,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q) ...@@ -174,7 +174,7 @@ void xe_exec_queue_fini(struct xe_exec_queue *q)
xe_lrc_finish(q->lrc + i); xe_lrc_finish(q->lrc + i);
if (q->vm) if (q->vm)
xe_vm_put(q->vm); xe_vm_put(q->vm);
if (q->flags & EXEC_QUEUE_FLAG_VM) if (!(q->flags & EXEC_QUEUE_FLAG_PERMANENT) && (q->flags & EXEC_QUEUE_FLAG_VM))
xe_device_mem_access_put(gt_to_xe(q->gt)); xe_device_mem_access_put(gt_to_xe(q->gt));
kfree(q); kfree(q);
......
...@@ -79,8 +79,6 @@ struct xe_exec_queue { ...@@ -79,8 +79,6 @@ struct xe_exec_queue {
#define EXEC_QUEUE_FLAG_VM BIT(5) #define EXEC_QUEUE_FLAG_VM BIT(5)
/* child of VM queue for multi-tile VM jobs */ /* child of VM queue for multi-tile VM jobs */
#define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(6) #define EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD BIT(6)
/* queue used for WA setup */
#define EXEC_QUEUE_FLAG_WA BIT(7)
/** /**
* @flags: flags for this exec queue, should statically setup aside from ban * @flags: flags for this exec queue, should statically setup aside from ban
......
...@@ -87,15 +87,13 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q) ...@@ -87,15 +87,13 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_exec_queue *q)
struct xe_sched_job *job; struct xe_sched_job *job;
struct xe_bb *bb; struct xe_bb *bb;
struct dma_fence *fence; struct dma_fence *fence;
u64 batch_ofs;
long timeout; long timeout;
bb = xe_bb_new(gt, 4, false); bb = xe_bb_new(gt, 4, false);
if (IS_ERR(bb)) if (IS_ERR(bb))
return PTR_ERR(bb); return PTR_ERR(bb);
batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo); job = xe_bb_create_job(q, bb);
job = xe_bb_create_wa_job(q, bb, batch_ofs);
if (IS_ERR(job)) { if (IS_ERR(job)) {
xe_bb_free(bb, NULL); xe_bb_free(bb, NULL);
return PTR_ERR(job); return PTR_ERR(job);
...@@ -124,7 +122,6 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) ...@@ -124,7 +122,6 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
struct xe_sched_job *job; struct xe_sched_job *job;
struct xe_bb *bb; struct xe_bb *bb;
struct dma_fence *fence; struct dma_fence *fence;
u64 batch_ofs;
long timeout; long timeout;
int count = 0; int count = 0;
...@@ -143,8 +140,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) ...@@ -143,8 +140,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
} }
} }
batch_ofs = xe_bo_ggtt_addr(gt_to_tile(gt)->mem.kernel_bb_pool->bo); job = xe_bb_create_job(q, bb);
job = xe_bb_create_wa_job(q, bb, batch_ofs);
if (IS_ERR(job)) { if (IS_ERR(job)) {
xe_bb_free(bb, NULL); xe_bb_free(bb, NULL);
return PTR_ERR(job); return PTR_ERR(job);
...@@ -168,14 +164,12 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q) ...@@ -168,14 +164,12 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_exec_queue *q)
int xe_gt_record_default_lrcs(struct xe_gt *gt) int xe_gt_record_default_lrcs(struct xe_gt *gt)
{ {
struct xe_device *xe = gt_to_xe(gt); struct xe_device *xe = gt_to_xe(gt);
struct xe_tile *tile = gt_to_tile(gt);
struct xe_hw_engine *hwe; struct xe_hw_engine *hwe;
enum xe_hw_engine_id id; enum xe_hw_engine_id id;
int err = 0; int err = 0;
for_each_hw_engine(hwe, gt, id) { for_each_hw_engine(hwe, gt, id) {
struct xe_exec_queue *q, *nop_q; struct xe_exec_queue *q, *nop_q;
struct xe_vm *vm;
void *default_lrc; void *default_lrc;
if (gt->default_lrc[hwe->class]) if (gt->default_lrc[hwe->class])
...@@ -192,14 +186,13 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt) ...@@ -192,14 +186,13 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
if (!default_lrc) if (!default_lrc)
return -ENOMEM; return -ENOMEM;
vm = xe_migrate_get_vm(tile->migrate); q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance), 1,
q = xe_exec_queue_create(xe, vm, BIT(hwe->logical_instance), 1, hwe, EXEC_QUEUE_FLAG_KERNEL);
hwe, EXEC_QUEUE_FLAG_WA);
if (IS_ERR(q)) { if (IS_ERR(q)) {
err = PTR_ERR(q); err = PTR_ERR(q);
xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n", xe_gt_err(gt, "hwe %s: xe_exec_queue_create failed (%pe)\n",
hwe->name, q); hwe->name, q);
goto put_vm; return err;
} }
/* Prime golden LRC with known good state */ /* Prime golden LRC with known good state */
...@@ -210,8 +203,8 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt) ...@@ -210,8 +203,8 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
goto put_exec_queue; goto put_exec_queue;
} }
nop_q = xe_exec_queue_create(xe, vm, BIT(hwe->logical_instance), nop_q = xe_exec_queue_create(xe, NULL, BIT(hwe->logical_instance),
1, hwe, EXEC_QUEUE_FLAG_WA); 1, hwe, EXEC_QUEUE_FLAG_KERNEL);
if (IS_ERR(nop_q)) { if (IS_ERR(nop_q)) {
err = PTR_ERR(nop_q); err = PTR_ERR(nop_q);
xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n", xe_gt_err(gt, "hwe %s: nop xe_exec_queue_create failed (%pe)\n",
...@@ -245,8 +238,6 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt) ...@@ -245,8 +238,6 @@ int xe_gt_record_default_lrcs(struct xe_gt *gt)
xe_exec_queue_put(nop_q); xe_exec_queue_put(nop_q);
put_exec_queue: put_exec_queue:
xe_exec_queue_put(q); xe_exec_queue_put(q);
put_vm:
xe_vm_put(vm);
if (err) if (err)
break; break;
} }
......
...@@ -202,7 +202,7 @@ static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw, ...@@ -202,7 +202,7 @@ static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw,
static u32 get_ppgtt_flag(struct xe_sched_job *job) static u32 get_ppgtt_flag(struct xe_sched_job *job)
{ {
return !(job->q->flags & EXEC_QUEUE_FLAG_WA) ? BIT(8) : 0; return job->q->vm ? BIT(8) : 0;
} }
/* for engines that don't require any special HW handling (no EUs, no aux inval, etc) */ /* for engines that don't require any special HW handling (no EUs, no aux inval, etc) */
......
...@@ -59,8 +59,7 @@ static struct xe_sched_job *job_alloc(bool parallel) ...@@ -59,8 +59,7 @@ static struct xe_sched_job *job_alloc(bool parallel)
bool xe_sched_job_is_migration(struct xe_exec_queue *q) bool xe_sched_job_is_migration(struct xe_exec_queue *q)
{ {
return q->vm && (q->vm->flags & XE_VM_FLAG_MIGRATION) && return q->vm && (q->vm->flags & XE_VM_FLAG_MIGRATION);
!(q->flags & EXEC_QUEUE_FLAG_WA);
} }
static void job_free(struct xe_sched_job *job) static void job_free(struct xe_sched_job *job)
...@@ -91,8 +90,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q, ...@@ -91,8 +90,7 @@ struct xe_sched_job *xe_sched_job_create(struct xe_exec_queue *q,
XE_WARN_ON(!q->vm && !(q->flags & EXEC_QUEUE_FLAG_KERNEL)); XE_WARN_ON(!q->vm && !(q->flags & EXEC_QUEUE_FLAG_KERNEL));
/* Migration and kernel engines have their own locking */ /* Migration and kernel engines have their own locking */
if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM | if (!(q->flags & (EXEC_QUEUE_FLAG_KERNEL | EXEC_QUEUE_FLAG_VM))) {
EXEC_QUEUE_FLAG_WA))) {
lockdep_assert_held(&q->vm->lock); lockdep_assert_held(&q->vm->lock);
if (!xe_vm_no_dma_fences(q->vm)) if (!xe_vm_no_dma_fences(q->vm))
xe_vm_assert_held(q->vm); xe_vm_assert_held(q->vm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment