Commit fd0975b7 authored by Brian Welty's avatar Brian Welty Committed by Rodrigo Vivi

drm/xe: Replace usage of mem_type_to_tile

Currently mem_type_to_tile() is being used to access the tile's underlying
tile.mem.vram. However, this function makes the assumption that a mem_type
will only ever map to a single tile. Now that the TTM vram manager contains
a pointer to the memory_region, make use of this in xe_bo.c.

As such, introduce a helper function res_to_mem_region() to get the
ttm_vram_mgr->vram from the BO's resource, and use this to replace usage
of mem_type_to_tile().

xe_tile is still needed to choose the migration context, so this part is
unchanged. But as this is only renaming usage, function is renamed now to
mem_type_to_migrate().
Signed-off-by: default avatarBrian Welty <brian.welty@intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 4e11a141
...@@ -101,12 +101,24 @@ static bool xe_bo_is_user(struct xe_bo *bo) ...@@ -101,12 +101,24 @@ static bool xe_bo_is_user(struct xe_bo *bo)
return bo->flags & XE_BO_CREATE_USER_BIT; return bo->flags & XE_BO_CREATE_USER_BIT;
} }
static struct xe_tile * static struct xe_migrate *
mem_type_to_tile(struct xe_device *xe, u32 mem_type) mem_type_to_migrate(struct xe_device *xe, u32 mem_type)
{ {
struct xe_tile *tile;
xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type)); xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type));
tile = &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)];
return tile->migrate;
}
static struct xe_mem_region *res_to_mem_region(struct ttm_resource *res)
{
struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
struct ttm_resource_manager *mgr;
return &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)]; xe_assert(xe, resource_is_vram(res));
mgr = ttm_manager_type(&xe->ttm, res->mem_type);
return to_xe_ttm_vram_mgr(mgr)->vram;
} }
static void try_add_system(struct xe_bo *bo, struct ttm_place *places, static void try_add_system(struct xe_bo *bo, struct ttm_place *places,
...@@ -126,11 +138,13 @@ static void try_add_system(struct xe_bo *bo, struct ttm_place *places, ...@@ -126,11 +138,13 @@ static void try_add_system(struct xe_bo *bo, struct ttm_place *places,
static void add_vram(struct xe_device *xe, struct xe_bo *bo, static void add_vram(struct xe_device *xe, struct xe_bo *bo,
struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c) struct ttm_place *places, u32 bo_flags, u32 mem_type, u32 *c)
{ {
struct xe_tile *tile = mem_type_to_tile(xe, mem_type);
struct ttm_place place = { .mem_type = mem_type }; struct ttm_place place = { .mem_type = mem_type };
u64 io_size = tile->mem.vram.io_size; struct xe_mem_region *vram;
u64 io_size;
xe_assert(xe, tile->mem.vram.usable_size); vram = to_xe_ttm_vram_mgr(ttm_manager_type(&xe->ttm, mem_type))->vram;
xe_assert(xe, vram && vram->usable_size);
io_size = vram->io_size;
/* /*
* For eviction / restore on suspend / resume objects * For eviction / restore on suspend / resume objects
...@@ -140,7 +154,7 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo, ...@@ -140,7 +154,7 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
XE_BO_CREATE_GGTT_BIT)) XE_BO_CREATE_GGTT_BIT))
place.flags |= TTM_PL_FLAG_CONTIGUOUS; place.flags |= TTM_PL_FLAG_CONTIGUOUS;
if (io_size < tile->mem.vram.usable_size) { if (io_size < vram->usable_size) {
if (bo_flags & XE_BO_NEEDS_CPU_ACCESS) { if (bo_flags & XE_BO_NEEDS_CPU_ACCESS) {
place.fpfn = 0; place.fpfn = 0;
place.lpfn = io_size >> PAGE_SHIFT; place.lpfn = io_size >> PAGE_SHIFT;
...@@ -404,21 +418,21 @@ static int xe_ttm_io_mem_reserve(struct ttm_device *bdev, ...@@ -404,21 +418,21 @@ static int xe_ttm_io_mem_reserve(struct ttm_device *bdev,
return 0; return 0;
case XE_PL_VRAM0: case XE_PL_VRAM0:
case XE_PL_VRAM1: { case XE_PL_VRAM1: {
struct xe_tile *tile = mem_type_to_tile(xe, mem->mem_type);
struct xe_ttm_vram_mgr_resource *vres = struct xe_ttm_vram_mgr_resource *vres =
to_xe_ttm_vram_mgr_resource(mem); to_xe_ttm_vram_mgr_resource(mem);
struct xe_mem_region *vram = res_to_mem_region(mem);
if (vres->used_visible_size < mem->size) if (vres->used_visible_size < mem->size)
return -EINVAL; return -EINVAL;
mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.offset = mem->start << PAGE_SHIFT;
if (tile->mem.vram.mapping && if (vram->mapping &&
mem->placement & TTM_PL_FLAG_CONTIGUOUS) mem->placement & TTM_PL_FLAG_CONTIGUOUS)
mem->bus.addr = (u8 *)tile->mem.vram.mapping + mem->bus.addr = (u8 *)vram->mapping +
mem->bus.offset; mem->bus.offset;
mem->bus.offset += tile->mem.vram.io_start; mem->bus.offset += vram->io_start;
mem->bus.is_iomem = true; mem->bus.is_iomem = true;
#if !defined(CONFIG_X86) #if !defined(CONFIG_X86)
...@@ -614,7 +628,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, ...@@ -614,7 +628,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
struct ttm_resource *old_mem = ttm_bo->resource; struct ttm_resource *old_mem = ttm_bo->resource;
u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM; u32 old_mem_type = old_mem ? old_mem->mem_type : XE_PL_SYSTEM;
struct ttm_tt *ttm = ttm_bo->ttm; struct ttm_tt *ttm = ttm_bo->ttm;
struct xe_tile *tile = NULL; struct xe_migrate *migrate = NULL;
struct dma_fence *fence; struct dma_fence *fence;
bool move_lacks_source; bool move_lacks_source;
bool tt_has_data; bool tt_has_data;
...@@ -692,14 +706,13 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, ...@@ -692,14 +706,13 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
} }
if (bo->tile) if (bo->tile)
tile = bo->tile; migrate = bo->tile->migrate;
else if (resource_is_vram(new_mem)) else if (resource_is_vram(new_mem))
tile = mem_type_to_tile(xe, new_mem->mem_type); migrate = mem_type_to_migrate(xe, new_mem->mem_type);
else if (mem_type_is_vram(old_mem_type)) else if (mem_type_is_vram(old_mem_type))
tile = mem_type_to_tile(xe, old_mem_type); migrate = mem_type_to_migrate(xe, old_mem_type);
xe_assert(xe, tile); xe_assert(xe, migrate);
xe_tile_assert(tile, tile->migrate);
trace_xe_bo_move(bo); trace_xe_bo_move(bo);
xe_device_mem_access_get(xe); xe_device_mem_access_get(xe);
...@@ -720,7 +733,8 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, ...@@ -720,7 +733,8 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
/* Create a new VMAP once kernel BO back in VRAM */ /* Create a new VMAP once kernel BO back in VRAM */
if (!ret && resource_is_vram(new_mem)) { if (!ret && resource_is_vram(new_mem)) {
void *new_addr = tile->mem.vram.mapping + struct xe_mem_region *vram = res_to_mem_region(new_mem);
void *new_addr = vram->mapping +
(new_mem->start << PAGE_SHIFT); (new_mem->start << PAGE_SHIFT);
if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) { if (XE_WARN_ON(new_mem->start == XE_BO_INVALID_OFFSET)) {
...@@ -737,9 +751,9 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict, ...@@ -737,9 +751,9 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
} }
} else { } else {
if (move_lacks_source) if (move_lacks_source)
fence = xe_migrate_clear(tile->migrate, bo, new_mem); fence = xe_migrate_clear(migrate, bo, new_mem);
else else
fence = xe_migrate_copy(tile->migrate, fence = xe_migrate_copy(migrate,
bo, bo, old_mem, new_mem); bo, bo, old_mem, new_mem);
if (IS_ERR(fence)) { if (IS_ERR(fence)) {
ret = PTR_ERR(fence); ret = PTR_ERR(fence);
...@@ -908,16 +922,16 @@ int xe_bo_restore_pinned(struct xe_bo *bo) ...@@ -908,16 +922,16 @@ int xe_bo_restore_pinned(struct xe_bo *bo)
static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo, static unsigned long xe_ttm_io_mem_pfn(struct ttm_buffer_object *ttm_bo,
unsigned long page_offset) unsigned long page_offset)
{ {
struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev);
struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); struct xe_bo *bo = ttm_to_xe_bo(ttm_bo);
struct xe_tile *tile = mem_type_to_tile(xe, ttm_bo->resource->mem_type);
struct xe_res_cursor cursor; struct xe_res_cursor cursor;
struct xe_mem_region *vram;
if (ttm_bo->resource->mem_type == XE_PL_STOLEN) if (ttm_bo->resource->mem_type == XE_PL_STOLEN)
return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT; return xe_ttm_stolen_io_offset(bo, page_offset << PAGE_SHIFT) >> PAGE_SHIFT;
vram = res_to_mem_region(ttm_bo->resource);
xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor); xe_res_first(ttm_bo->resource, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
return (tile->mem.vram.io_start + cursor.start) >> PAGE_SHIFT; return (vram->io_start + cursor.start) >> PAGE_SHIFT;
} }
static void __xe_bo_vunmap(struct xe_bo *bo); static void __xe_bo_vunmap(struct xe_bo *bo);
...@@ -1492,12 +1506,11 @@ struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, ...@@ -1492,12 +1506,11 @@ struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile,
uint64_t vram_region_gpu_offset(struct ttm_resource *res) uint64_t vram_region_gpu_offset(struct ttm_resource *res)
{ {
struct xe_device *xe = ttm_to_xe_device(res->bo->bdev); struct xe_device *xe = ttm_to_xe_device(res->bo->bdev);
struct xe_tile *tile = mem_type_to_tile(xe, res->mem_type);
if (res->mem_type == XE_PL_STOLEN) if (res->mem_type == XE_PL_STOLEN)
return xe_ttm_stolen_gpu_offset(xe); return xe_ttm_stolen_gpu_offset(xe);
return tile->mem.vram.dpa_base; return res_to_mem_region(res)->dpa_base;
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment