Commit e814389f authored by Matthew Auld's avatar Matthew Auld Committed by Rodrigo Vivi

drm/xe: directly use pat_index for pte_encode

In a future patch userspace will be able to directly set the pat_index
as part of vm_bind. To support this we need to get away from using
xe_cache_level in the low level routines and rather just use the
pat_index directly.

v2: Rebase
v3: Some missed conversions, also prefer tile_to_xe() (Niranjana)
v4: remove leftover const (Lucas)
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Cc: Pallavi Mishra <pallavi.mishra@intel.com>
Cc: Lucas De Marchi <lucas.demarchi@intel.com>
Cc: Matt Roper <matthew.d.roper@intel.com>
Reviewed-by: default avatarJosé Roberto de Souza <jose.souza@intel.com>
Reviewed-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Reviewed-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
Reviewed-by: default avatarPallavi Mishra <pallavi.mishra@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 406be3cc
......@@ -322,7 +322,7 @@ static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test)
/* First part of the test, are we updating our pagetable bo with a new entry? */
xe_map_wr(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64,
0xdeaddeadbeefbeef);
expected = m->q->vm->pt_ops->pte_encode_bo(pt, 0, XE_CACHE_WB, 0);
expected = m->q->vm->pt_ops->pte_encode_bo(pt, 0, xe->pat.idx[XE_CACHE_WB], 0);
if (m->q->vm->flags & XE_VM_FLAG_64K)
expected |= XE_PTE_PS64;
if (xe_bo_is_vram(pt))
......
......@@ -27,7 +27,7 @@
#define GUC_GGTT_TOP 0xFEE00000
static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
enum xe_cache_level cache)
u16 pat_index)
{
u64 pte;
......@@ -41,13 +41,12 @@ static u64 xelp_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
}
static u64 xelpg_ggtt_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
enum xe_cache_level cache)
u16 pat_index)
{
struct xe_device *xe = xe_bo_device(bo);
u32 pat_index = xe->pat.idx[cache];
u64 pte;
pte = xelp_ggtt_pte_encode_bo(bo, bo_offset, cache);
pte = xelp_ggtt_pte_encode_bo(bo, bo_offset, pat_index);
xe_assert(xe, pat_index <= 3);
......@@ -79,6 +78,7 @@ void xe_ggtt_set_pte(struct xe_ggtt *ggtt, u64 addr, u64 pte)
static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
{
u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
u64 end = start + size - 1;
u64 scratch_pte;
......@@ -86,7 +86,7 @@ static void xe_ggtt_clear(struct xe_ggtt *ggtt, u64 start, u64 size)
if (ggtt->scratch)
scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0,
XE_CACHE_WB);
pat_index);
else
scratch_pte = 0;
......@@ -285,9 +285,10 @@ void xe_ggtt_invalidate(struct xe_ggtt *ggtt)
void xe_ggtt_printk(struct xe_ggtt *ggtt, const char *prefix)
{
u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
u64 addr, scratch_pte;
scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0, XE_CACHE_WB);
scratch_pte = ggtt->pt_ops->pte_encode_bo(ggtt->scratch, 0, pat_index);
printk("%sGlobal GTT:", prefix);
for (addr = 0; addr < ggtt->size; addr += XE_PAGE_SIZE) {
......@@ -324,11 +325,12 @@ int xe_ggtt_insert_special_node(struct xe_ggtt *ggtt, struct drm_mm_node *node,
void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
{
u16 pat_index = tile_to_xe(ggtt->tile)->pat.idx[XE_CACHE_WB];
u64 start = bo->ggtt_node.start;
u64 offset, pte;
for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) {
pte = ggtt->pt_ops->pte_encode_bo(bo, offset, XE_CACHE_WB);
pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index);
xe_ggtt_set_pte(ggtt, start + offset, pte);
}
......
......@@ -14,8 +14,7 @@ struct xe_bo;
struct xe_gt;
struct xe_ggtt_pt_ops {
u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset,
enum xe_cache_level cache);
u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset, u16 pat_index);
};
struct xe_ggtt {
......
......@@ -163,6 +163,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
struct xe_vm *vm)
{
struct xe_device *xe = tile_to_xe(tile);
u16 pat_index = xe->pat.idx[XE_CACHE_WB];
u8 id = tile->id;
u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level;
u32 map_ofs, level, i;
......@@ -194,7 +195,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
return ret;
}
entry = vm->pt_ops->pde_encode_bo(bo, bo->size - XE_PAGE_SIZE, XE_CACHE_WB);
entry = vm->pt_ops->pde_encode_bo(bo, bo->size - XE_PAGE_SIZE, pat_index);
xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry);
map_ofs = (num_entries - num_level) * XE_PAGE_SIZE;
......@@ -202,7 +203,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
/* Map the entire BO in our level 0 pt */
for (i = 0, level = 0; i < num_entries; level++) {
entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE,
XE_CACHE_WB, 0);
pat_index, 0);
xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry);
......@@ -221,7 +222,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE :
XE_PAGE_SIZE) {
entry = vm->pt_ops->pte_encode_bo(batch, i,
XE_CACHE_WB, 0);
pat_index, 0);
xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64,
entry);
......@@ -246,7 +247,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
flags = XE_PDE_64K;
entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (level - 1) *
XE_PAGE_SIZE, XE_CACHE_WB);
XE_PAGE_SIZE, pat_index);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
entry | flags);
}
......@@ -254,7 +255,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
/* Write PDE's that point to our BO. */
for (i = 0; i < num_entries - num_level; i++) {
entry = vm->pt_ops->pde_encode_bo(bo, i * XE_PAGE_SIZE,
XE_CACHE_WB);
pat_index);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
(i + 1) * 8, u64, entry);
......@@ -266,7 +267,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
level = 2;
ofs = map_ofs + XE_PAGE_SIZE * level + 256 * 8;
flags = vm->pt_ops->pte_encode_addr(xe, 0, XE_CACHE_WB, level,
flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level,
true, 0);
/*
......@@ -464,6 +465,7 @@ static void emit_pte(struct xe_migrate *m,
struct xe_res_cursor *cur,
u32 size, struct xe_bo *bo)
{
u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB];
u32 ptes;
u64 ofs = at_pt * XE_PAGE_SIZE;
u64 cur_ofs;
......@@ -507,7 +509,7 @@ static void emit_pte(struct xe_migrate *m,
}
addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe,
addr, XE_CACHE_WB,
addr, pat_index,
0, devmem, flags);
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
......@@ -1226,6 +1228,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
bool first_munmap_rebind = vma &&
vma->gpuva.flags & XE_VMA_FIRST_REBIND;
struct xe_exec_queue *q_override = !q ? m->q : q;
u16 pat_index = xe->pat.idx[XE_CACHE_WB];
/* Use the CPU if no in syncs and engine is idle */
if (no_in_syncs(syncs, num_syncs) && xe_exec_queue_is_idle(q_override)) {
......@@ -1297,7 +1300,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
xe_tile_assert(tile, pt_bo->size == SZ_4K);
addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, XE_CACHE_WB, 0);
addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, pat_index, 0);
bb->cs[bb->len++] = lower_32_bits(addr);
bb->cs[bb->len++] = upper_32_bits(addr);
}
......
......@@ -50,6 +50,7 @@ static struct xe_pt *xe_pt_entry(struct xe_pt_dir *pt_dir, unsigned int index)
static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
unsigned int level)
{
u16 pat_index = tile_to_xe(tile)->pat.idx[XE_CACHE_WB];
u8 id = tile->id;
if (!vm->scratch_bo[id])
......@@ -57,9 +58,9 @@ static u64 __xe_pt_empty_pte(struct xe_tile *tile, struct xe_vm *vm,
if (level > 0)
return vm->pt_ops->pde_encode_bo(vm->scratch_pt[id][level - 1]->bo,
0, XE_CACHE_WB);
0, pat_index);
return vm->pt_ops->pte_encode_bo(vm->scratch_bo[id], 0, XE_CACHE_WB, 0);
return vm->pt_ops->pte_encode_bo(vm->scratch_bo[id], 0, pat_index, 0);
}
/**
......@@ -510,6 +511,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
{
struct xe_pt_stage_bind_walk *xe_walk =
container_of(walk, typeof(*xe_walk), base);
u16 pat_index = tile_to_xe(xe_walk->tile)->pat.idx[xe_walk->cache];
struct xe_pt *xe_parent = container_of(parent, typeof(*xe_parent), base);
struct xe_vm *vm = xe_walk->vm;
struct xe_pt *xe_child;
......@@ -526,7 +528,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
pte = vm->pt_ops->pte_encode_vma(is_null ? 0 :
xe_res_dma(curs) + xe_walk->dma_offset,
xe_walk->vma, xe_walk->cache, level);
xe_walk->vma, pat_index, level);
pte |= xe_walk->default_pte;
/*
......@@ -591,8 +593,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
xe_child->is_compact = true;
}
pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0,
xe_walk->cache) | flags;
pte = vm->pt_ops->pde_encode_bo(xe_child->bo, 0, pat_index) | flags;
ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, xe_child,
pte);
}
......
......@@ -38,14 +38,14 @@ struct xe_pt {
struct xe_pt_ops {
u64 (*pte_encode_bo)(struct xe_bo *bo, u64 bo_offset,
enum xe_cache_level cache, u32 pt_level);
u16 pat_index, u32 pt_level);
u64 (*pte_encode_vma)(u64 pte, struct xe_vma *vma,
enum xe_cache_level cache, u32 pt_level);
u16 pat_index, u32 pt_level);
u64 (*pte_encode_addr)(struct xe_device *xe, u64 addr,
enum xe_cache_level cache,
u16 pat_index,
u32 pt_level, bool devmem, u64 flags);
u64 (*pde_encode_bo)(struct xe_bo *bo, u64 bo_offset,
const enum xe_cache_level cache);
u16 pat_index);
};
struct xe_pt_entry {
......
......@@ -1211,9 +1211,8 @@ static struct drm_gpuvm_ops gpuvm_ops = {
.vm_free = xe_vm_free,
};
static u64 pde_encode_cache(struct xe_device *xe, enum xe_cache_level cache)
static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index)
{
u32 pat_index = xe->pat.idx[cache];
u64 pte = 0;
if (pat_index & BIT(0))
......@@ -1225,9 +1224,8 @@ static u64 pde_encode_cache(struct xe_device *xe, enum xe_cache_level cache)
return pte;
}
static u64 pte_encode_cache(struct xe_device *xe, enum xe_cache_level cache)
static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index)
{
u32 pat_index = xe->pat.idx[cache];
u64 pte = 0;
if (pat_index & BIT(0))
......@@ -1261,27 +1259,27 @@ static u64 pte_encode_ps(u32 pt_level)
}
static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset,
const enum xe_cache_level cache)
const u16 pat_index)
{
struct xe_device *xe = xe_bo_device(bo);
u64 pde;
pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
pde |= XE_PAGE_PRESENT | XE_PAGE_RW;
pde |= pde_encode_cache(xe, cache);
pde |= pde_encode_pat_index(xe, pat_index);
return pde;
}
static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
enum xe_cache_level cache, u32 pt_level)
u16 pat_index, u32 pt_level)
{
struct xe_device *xe = xe_bo_device(bo);
u64 pte;
pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE);
pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
pte |= pte_encode_cache(xe, cache);
pte |= pte_encode_pat_index(xe, pat_index);
pte |= pte_encode_ps(pt_level);
if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo))
......@@ -1291,7 +1289,7 @@ static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset,
}
static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
enum xe_cache_level cache, u32 pt_level)
u16 pat_index, u32 pt_level)
{
struct xe_device *xe = xe_vma_vm(vma)->xe;
......@@ -1300,7 +1298,7 @@ static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
if (likely(!xe_vma_read_only(vma)))
pte |= XE_PAGE_RW;
pte |= pte_encode_cache(xe, cache);
pte |= pte_encode_pat_index(xe, pat_index);
pte |= pte_encode_ps(pt_level);
if (unlikely(xe_vma_is_null(vma)))
......@@ -1310,7 +1308,7 @@ static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma,
}
static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
enum xe_cache_level cache,
u16 pat_index,
u32 pt_level, bool devmem, u64 flags)
{
u64 pte;
......@@ -1320,7 +1318,7 @@ static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr,
pte = addr;
pte |= XE_PAGE_PRESENT | XE_PAGE_RW;
pte |= pte_encode_cache(xe, cache);
pte |= pte_encode_pat_index(xe, pat_index);
pte |= pte_encode_ps(pt_level);
if (devmem)
......@@ -1707,7 +1705,7 @@ struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id)
u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile)
{
return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0,
XE_CACHE_WB);
tile_to_xe(tile)->pat.idx[XE_CACHE_WB]);
}
static struct dma_fence *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment