Commit 4202dd9f authored by Matthew Auld's avatar Matthew Auld Committed by Rodrigo Vivi

drm/xe/migrate: fix MI_ARB_ON_OFF usage

Spec says: "This is a privileged command; it will not be effective (will
be converted to a no-op) if executed from within a non-privileged batch
buffer." However here it looks like we are just emitting it inside some
bb which was jumped to via the ppGTT, which should be considered
a non-privileged address space.

It looks like we just need some way of preventing things like the
emit_pte() and later copy/clear being preempted in-between so rather
just emit directly in the ring for migration jobs.

Bspec: 45716
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Cc: Matthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 7eea3fb6
...@@ -406,12 +406,6 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile) ...@@ -406,12 +406,6 @@ struct xe_migrate *xe_migrate_init(struct xe_tile *tile)
return m; return m;
} }
static void emit_arb_clear(struct xe_bb *bb)
{
/* 1 dword */
bb->cs[bb->len++] = MI_ARB_ON_OFF | MI_ARB_DISABLE;
}
static u64 xe_migrate_res_sizes(struct xe_res_cursor *cur) static u64 xe_migrate_res_sizes(struct xe_res_cursor *cur)
{ {
/* /*
...@@ -745,10 +739,6 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, ...@@ -745,10 +739,6 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
goto err_sync; goto err_sync;
} }
/* Preemption is enabled again by the ring ops. */
if (!src_is_vram || !dst_is_vram)
emit_arb_clear(bb);
if (!src_is_vram) if (!src_is_vram)
emit_pte(m, bb, src_L0_pt, src_is_vram, &src_it, src_L0, emit_pte(m, bb, src_L0_pt, src_is_vram, &src_it, src_L0,
src_bo); src_bo);
...@@ -994,7 +984,6 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m, ...@@ -994,7 +984,6 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
/* Preemption is enabled again by the ring ops. */ /* Preemption is enabled again by the ring ops. */
if (!clear_vram) { if (!clear_vram) {
emit_arb_clear(bb);
emit_pte(m, bb, clear_L0_pt, clear_vram, &src_it, clear_L0, emit_pte(m, bb, clear_L0_pt, clear_vram, &src_it, clear_L0,
bo); bo);
} else { } else {
...@@ -1285,9 +1274,6 @@ xe_migrate_update_pgtables(struct xe_migrate *m, ...@@ -1285,9 +1274,6 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
VM_SA_UPDATE_UNIT_SIZE; VM_SA_UPDATE_UNIT_SIZE;
} }
/* Preemption is enabled again by the ring ops. */
emit_arb_clear(bb);
/* Map our PT's to gtt */ /* Map our PT's to gtt */
bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(num_updates); bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(num_updates);
bb->cs[bb->len++] = ppgtt_ofs * XE_PAGE_SIZE + page_ofs; bb->cs[bb->len++] = ppgtt_ofs * XE_PAGE_SIZE + page_ofs;
...@@ -1316,8 +1302,6 @@ xe_migrate_update_pgtables(struct xe_migrate *m, ...@@ -1316,8 +1302,6 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
bb->cs[bb->len++] = MI_BATCH_BUFFER_END; bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
update_idx = bb->len; update_idx = bb->len;
/* Preemption is enabled again by the ring ops. */
emit_arb_clear(bb);
for (i = 0; i < num_updates; i++) for (i = 0; i < num_updates; i++)
write_pgtable(tile, bb, 0, &updates[i], pt_update); write_pgtable(tile, bb, 0, &updates[i], pt_update);
} }
......
...@@ -355,6 +355,8 @@ static void emit_migration_job_gen12(struct xe_sched_job *job, ...@@ -355,6 +355,8 @@ static void emit_migration_job_gen12(struct xe_sched_job *job,
i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc), i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
seqno, dw, i); seqno, dw, i);
dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE; /* Enabled again below */
i = emit_bb_start(job->batch_addr[0], BIT(8), dw, i); i = emit_bb_start(job->batch_addr[0], BIT(8), dw, i);
/* XXX: Do we need this? Leaving for now. */ /* XXX: Do we need this? Leaving for now. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment