Commit 0a12a612 authored by Matt Roper's avatar Matt Roper Committed by Rodrigo Vivi

drm/xe: Let primary and media GT share a kernel_bb_pool

The media GT requires a valid gt->kernel_bb_pool during driver probe to
allocate the WA and NOOP batchbuffers used to record default context
images.  Dynamically allocate the bb_pools so that the primary and media
GT can use the same pool during driver init.

The media GT still shouldn't be need the USM pool, so only hook up the
kernel_bb_pool for now.

Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Link: https://lore.kernel.org/r/20230410200229.2726648-1-matthew.d.roper@intel.comSigned-off-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 2988cf02
...@@ -42,7 +42,7 @@ struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm) ...@@ -42,7 +42,7 @@ struct xe_bb *xe_bb_new(struct xe_gt *gt, u32 dwords, bool usm)
* space to accomodate the platform-specific hardware prefetch * space to accomodate the platform-specific hardware prefetch
* requirements. * requirements.
*/ */
bb->bo = xe_sa_bo_new(!usm ? &gt->kernel_bb_pool : &gt->usm.bb_pool, bb->bo = xe_sa_bo_new(!usm ? gt->kernel_bb_pool : gt->usm.bb_pool,
4 * (dwords + 1) + bb_prefetch(gt)); 4 * (dwords + 1) + bb_prefetch(gt));
if (IS_ERR(bb->bo)) { if (IS_ERR(bb->bo)) {
err = PTR_ERR(bb->bo); err = PTR_ERR(bb->bo);
......
...@@ -137,7 +137,7 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_engine *e) ...@@ -137,7 +137,7 @@ static int emit_nop_job(struct xe_gt *gt, struct xe_engine *e)
if (IS_ERR(bb)) if (IS_ERR(bb))
return PTR_ERR(bb); return PTR_ERR(bb);
batch_ofs = xe_bo_ggtt_addr(gt->kernel_bb_pool.bo); batch_ofs = xe_bo_ggtt_addr(gt->kernel_bb_pool->bo);
job = xe_bb_create_wa_job(e, bb, batch_ofs); job = xe_bb_create_wa_job(e, bb, batch_ofs);
if (IS_ERR(job)) { if (IS_ERR(job)) {
xe_bb_free(bb, NULL); xe_bb_free(bb, NULL);
...@@ -186,7 +186,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_engine *e) ...@@ -186,7 +186,7 @@ static int emit_wa_job(struct xe_gt *gt, struct xe_engine *e)
} }
} }
batch_ofs = xe_bo_ggtt_addr(gt->kernel_bb_pool.bo); batch_ofs = xe_bo_ggtt_addr(gt->kernel_bb_pool->bo);
job = xe_bb_create_wa_job(e, bb, batch_ofs); job = xe_bb_create_wa_job(e, bb, batch_ofs);
if (IS_ERR(job)) { if (IS_ERR(job)) {
xe_bb_free(bb, NULL); xe_bb_free(bb, NULL);
...@@ -439,27 +439,33 @@ static int all_fw_domain_init(struct xe_gt *gt) ...@@ -439,27 +439,33 @@ static int all_fw_domain_init(struct xe_gt *gt)
if (err) if (err)
goto err_force_wake; goto err_force_wake;
/*
* FIXME: This should be ok as SA should only be used by gt->migrate and
* vm->gt->migrate and both should be pointing to a non-media GT. But to
* realy safe, convert gt->kernel_bb_pool to a pointer and point a media
* GT to the kernel_bb_pool on a real tile.
*/
if (!xe_gt_is_media_type(gt)) { if (!xe_gt_is_media_type(gt)) {
err = xe_sa_bo_manager_init(gt, &gt->kernel_bb_pool, SZ_1M, 16); gt->kernel_bb_pool = xe_sa_bo_manager_init(gt, SZ_1M, 16);
if (err) if (IS_ERR(gt->kernel_bb_pool)) {
err = PTR_ERR(gt->kernel_bb_pool);
goto err_force_wake; goto err_force_wake;
}
/* /*
* USM has its only SA pool to non-block behind user operations * USM has its only SA pool to non-block behind user operations
*/ */
if (gt_to_xe(gt)->info.supports_usm) { if (gt_to_xe(gt)->info.supports_usm) {
err = xe_sa_bo_manager_init(gt, &gt->usm.bb_pool, gt->usm.bb_pool = xe_sa_bo_manager_init(gt, SZ_1M, 16);
SZ_1M, 16); if (IS_ERR(gt->usm.bb_pool)) {
if (err) err = PTR_ERR(gt->usm.bb_pool);
goto err_force_wake; goto err_force_wake;
} }
} }
} else {
struct xe_gt *full_gt = xe_find_full_gt(gt);
/*
* Media GT's kernel_bb_pool is only used while recording the
* default context during GT init. The USM pool should never
* be needed on the media GT.
*/
gt->kernel_bb_pool = full_gt->kernel_bb_pool;
}
if (!xe_gt_is_media_type(gt)) { if (!xe_gt_is_media_type(gt)) {
gt->migrate = xe_migrate_init(gt); gt->migrate = xe_migrate_init(gt);
......
...@@ -66,8 +66,8 @@ static int sa_info(struct seq_file *m, void *data) ...@@ -66,8 +66,8 @@ static int sa_info(struct seq_file *m, void *data)
struct xe_gt *gt = node_to_gt(m->private); struct xe_gt *gt = node_to_gt(m->private);
struct drm_printer p = drm_seq_file_printer(m); struct drm_printer p = drm_seq_file_printer(m);
drm_suballoc_dump_debug_info(&gt->kernel_bb_pool.base, &p, drm_suballoc_dump_debug_info(&gt->kernel_bb_pool->base, &p,
gt->kernel_bb_pool.gpu_addr); gt->kernel_bb_pool->gpu_addr);
return 0; return 0;
} }
......
...@@ -214,7 +214,7 @@ struct xe_gt { ...@@ -214,7 +214,7 @@ struct xe_gt {
* behind any user operations which may have resulted in a * behind any user operations which may have resulted in a
* fault. * fault.
*/ */
struct xe_sa_manager bb_pool; struct xe_sa_manager *bb_pool;
/** /**
* @reserved_bcs_instance: reserved BCS instance used for USM * @reserved_bcs_instance: reserved BCS instance used for USM
* operations (e.g. mmigrations, fixing page tables) * operations (e.g. mmigrations, fixing page tables)
...@@ -304,8 +304,12 @@ struct xe_gt { ...@@ -304,8 +304,12 @@ struct xe_gt {
/** @hw_engines: hardware engines on the GT */ /** @hw_engines: hardware engines on the GT */
struct xe_hw_engine hw_engines[XE_NUM_HW_ENGINES]; struct xe_hw_engine hw_engines[XE_NUM_HW_ENGINES];
/** @kernel_bb_pool: Pool from which batchbuffers are allocated */ /**
struct xe_sa_manager kernel_bb_pool; * @kernel_bb_pool: Pool from which batchbuffers are allocated.
*
* Media GT shares a pool with its primary GT.
*/
struct xe_sa_manager *kernel_bb_pool;
/** @migrate: Migration helper for vram blits and clearing */ /** @migrate: Migration helper for vram blits and clearing */
struct xe_migrate *migrate; struct xe_migrate *migrate;
......
...@@ -161,7 +161,7 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m, ...@@ -161,7 +161,7 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level; u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
u32 map_ofs, level, i; u32 map_ofs, level, i;
struct xe_device *xe = gt_to_xe(m->gt); struct xe_device *xe = gt_to_xe(m->gt);
struct xe_bo *bo, *batch = gt->kernel_bb_pool.bo; struct xe_bo *bo, *batch = gt->kernel_bb_pool->bo;
u64 entry; u64 entry;
int ret; int ret;
...@@ -229,7 +229,7 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m, ...@@ -229,7 +229,7 @@ static int xe_migrate_prepare_vm(struct xe_gt *gt, struct xe_migrate *m,
m->batch_base_ofs = xe_migrate_vram_ofs(batch_addr); m->batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
if (xe->info.supports_usm) { if (xe->info.supports_usm) {
batch = gt->usm.bb_pool.bo; batch = gt->usm.bb_pool->bo;
batch_addr = xe_bo_addr(batch, 0, GEN8_PAGE_SIZE, batch_addr = xe_bo_addr(batch, 0, GEN8_PAGE_SIZE,
&is_vram); &is_vram);
m->usm_batch_base_ofs = xe_migrate_vram_ofs(batch_addr); m->usm_batch_base_ofs = xe_migrate_vram_ofs(batch_addr);
......
...@@ -33,13 +33,18 @@ static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg) ...@@ -33,13 +33,18 @@ static void xe_sa_bo_manager_fini(struct drm_device *drm, void *arg)
sa_manager->bo = NULL; sa_manager->bo = NULL;
} }
int xe_sa_bo_manager_init(struct xe_gt *gt, struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_gt *gt, u32 size, u32 align)
struct xe_sa_manager *sa_manager,
u32 size, u32 align)
{ {
struct xe_device *xe = gt_to_xe(gt); struct xe_device *xe = gt_to_xe(gt);
u32 managed_size = size - SZ_4K; u32 managed_size = size - SZ_4K;
struct xe_bo *bo; struct xe_bo *bo;
int ret;
struct xe_sa_manager *sa_manager = drmm_kzalloc(&gt_to_xe(gt)->drm,
sizeof(*sa_manager),
GFP_KERNEL);
if (!sa_manager)
return ERR_PTR(-ENOMEM);
sa_manager->bo = NULL; sa_manager->bo = NULL;
...@@ -49,7 +54,7 @@ int xe_sa_bo_manager_init(struct xe_gt *gt, ...@@ -49,7 +54,7 @@ int xe_sa_bo_manager_init(struct xe_gt *gt,
if (IS_ERR(bo)) { if (IS_ERR(bo)) {
drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n", drm_err(&xe->drm, "failed to allocate bo for sa manager: %ld\n",
PTR_ERR(bo)); PTR_ERR(bo));
return PTR_ERR(bo); return (struct xe_sa_manager *)bo;
} }
sa_manager->bo = bo; sa_manager->bo = bo;
...@@ -61,15 +66,19 @@ int xe_sa_bo_manager_init(struct xe_gt *gt, ...@@ -61,15 +66,19 @@ int xe_sa_bo_manager_init(struct xe_gt *gt,
if (!sa_manager->cpu_ptr) { if (!sa_manager->cpu_ptr) {
xe_bo_unpin_map_no_vm(sa_manager->bo); xe_bo_unpin_map_no_vm(sa_manager->bo);
sa_manager->bo = NULL; sa_manager->bo = NULL;
return -ENOMEM; return ERR_PTR(-ENOMEM);
} }
} else { } else {
sa_manager->cpu_ptr = bo->vmap.vaddr; sa_manager->cpu_ptr = bo->vmap.vaddr;
memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size); memset(sa_manager->cpu_ptr, 0, bo->ttm.base.size);
} }
return drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini, ret = drmm_add_action_or_reset(&xe->drm, xe_sa_bo_manager_fini,
sa_manager); sa_manager);
if (ret)
return ERR_PTR(ret);
return sa_manager;
} }
struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager, struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
......
...@@ -11,9 +11,7 @@ struct dma_fence; ...@@ -11,9 +11,7 @@ struct dma_fence;
struct xe_bo; struct xe_bo;
struct xe_gt; struct xe_gt;
int xe_sa_bo_manager_init(struct xe_gt *gt, struct xe_sa_manager *xe_sa_bo_manager_init(struct xe_gt *gt, u32 size, u32 align);
struct xe_sa_manager *sa_manager,
u32 size, u32 align);
struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager, struct drm_suballoc *xe_sa_bo_new(struct xe_sa_manager *sa_manager,
u32 size); u32 size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment