Commit f79ee301 authored by Matt Roper's avatar Matt Roper Committed by Rodrigo Vivi

drm/xe: Add backpointer from gt to tile

Rather than a backpointer to the xe_device, a GT should have a
backpointer to its tile (which can then be used to lookup the device if
necessary).

The gt_to_xe() helper macro (which moves from xe_gt.h to xe_gt_types.h)
can and should still be used to jump directly from an xe_gt to
xe_device.

v2:
 - Fix kunit test build
 - Move a couple changes to the previous patch. (Lucas)
Reviewed-by: default avatarMatt Atwood <matthew.s.atwood@intel.com>
Reviewed-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
Link: https://lore.kernel.org/r/20230601215244.678611-4-matthew.d.roper@intel.comSigned-off-by: default avatarMatt Roper <matthew.d.roper@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent a5edc7cd
......@@ -90,7 +90,7 @@ static int ccs_test_migrate(struct xe_gt *gt, struct xe_bo *bo,
}
/* Check last CCS value, or at least last value in page. */
offset = xe_device_ccs_bytes(gt->xe, bo->size);
offset = xe_device_ccs_bytes(gt_to_xe(gt), bo->size);
offset = min_t(u32, offset, PAGE_SIZE) / sizeof(u64) - 1;
if (cpu_map[offset] != get_val) {
KUNIT_FAIL(test,
......
......@@ -16,7 +16,7 @@
static int bb_prefetch(struct xe_gt *gt)
{
struct xe_device *xe = gt->xe;
struct xe_device *xe = gt_to_xe(gt);
if (GRAPHICS_VERx100(xe) >= 1250 && !xe_gt_is_media_type(gt))
/*
......
......@@ -142,14 +142,14 @@ static void xe_ggtt_initial_clear(struct xe_ggtt *ggtt)
u64 start, end;
/* Display may have allocated inside ggtt, so be careful with clearing here */
xe_device_mem_access_get(ggtt->gt->xe);
xe_device_mem_access_get(gt_to_xe(ggtt->gt));
mutex_lock(&ggtt->lock);
drm_mm_for_each_hole(hole, &ggtt->mm, start, end)
xe_ggtt_clear(ggtt, start, end - start);
xe_ggtt_invalidate(ggtt->gt);
mutex_unlock(&ggtt->lock);
xe_device_mem_access_put(ggtt->gt->xe);
xe_device_mem_access_put(gt_to_xe(ggtt->gt));
}
int xe_ggtt_init(struct xe_gt *gt, struct xe_ggtt *ggtt)
......@@ -286,14 +286,14 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo,
if (err)
return err;
xe_device_mem_access_get(ggtt->gt->xe);
xe_device_mem_access_get(gt_to_xe(ggtt->gt));
mutex_lock(&ggtt->lock);
err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node, bo->size,
alignment, 0, start, end, 0);
if (!err)
xe_ggtt_map_bo(ggtt, bo);
mutex_unlock(&ggtt->lock);
xe_device_mem_access_put(ggtt->gt->xe);
xe_device_mem_access_put(gt_to_xe(ggtt->gt));
return err;
}
......@@ -322,7 +322,7 @@ int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node)
{
xe_device_mem_access_get(ggtt->gt->xe);
xe_device_mem_access_get(gt_to_xe(ggtt->gt));
mutex_lock(&ggtt->lock);
xe_ggtt_clear(ggtt, node->start, node->size);
......@@ -332,7 +332,7 @@ void xe_ggtt_remove_node(struct xe_ggtt *ggtt, struct drm_mm_node *node)
xe_ggtt_invalidate(ggtt->gt);
mutex_unlock(&ggtt->lock);
xe_device_mem_access_put(ggtt->gt->xe);
xe_device_mem_access_put(gt_to_xe(ggtt->gt));
}
void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo)
......
......@@ -49,11 +49,6 @@ static inline bool xe_gt_is_media_type(struct xe_gt *gt)
return gt->info.type == XE_GT_TYPE_MEDIA;
}
#define gt_to_xe(gt__) \
_Generic(gt__, \
const struct xe_gt *: (const struct xe_device *)((gt__)->xe), \
struct xe_gt *: (gt__)->xe)
static inline bool xe_gt_is_usm_hwe(struct xe_gt *gt, struct xe_hw_engine *hwe)
{
struct xe_device *xe = gt_to_xe(gt);
......
......@@ -11,7 +11,7 @@
#include "xe_device_types.h"
#define xe_gt_printk(_gt, _level, _fmt, ...) \
drm_##_level(&(_gt)->xe->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
drm_##_level(&gt_to_xe(_gt)->drm, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
#define xe_gt_err(_gt, _fmt, ...) \
xe_gt_printk((_gt), err, _fmt, ##__VA_ARGS__)
......@@ -32,10 +32,10 @@
xe_gt_printk((_gt), err_ratelimited, _fmt, ##__VA_ARGS__)
#define xe_gt_WARN(_gt, _condition, _fmt, ...) \
drm_WARN(&(_gt)->xe->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
drm_WARN(&gt_to_xe(_gt)->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
#define xe_gt_WARN_ONCE(_gt, _condition, _fmt, ...) \
drm_WARN_ONCE(&(_gt)->xe->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
drm_WARN_ONCE(&gt_to_xe(_gt)->drm, _condition, "GT%u: " _fmt, (_gt)->info.id, ##__VA_ARGS__)
#define xe_gt_WARN_ON(_gt, _condition) \
xe_gt_WARN((_gt), _condition, "%s(%s)", "gt_WARN_ON", __stringify(_condition))
......
......@@ -248,9 +248,9 @@ int xe_gt_tlb_invalidation_vma(struct xe_gt *gt,
XE_BUG_ON(len > MAX_TLB_INVALIDATION_LEN);
xe_device_mem_access_get(gt->xe);
xe_device_mem_access_get(xe);
ret = send_tlb_invalidation(&gt->uc.guc, fence, action, len);
xe_device_mem_access_put(gt->xe);
xe_device_mem_access_put(xe);
return ret;
}
......@@ -328,8 +328,8 @@ int xe_guc_tlb_invalidation_done_handler(struct xe_guc *guc, u32 *msg, u32 len)
TLB_INVALIDATION_SEQNO_MAX;
if (!expected_seqno)
expected_seqno = 1;
if (drm_WARN_ON(&gt->xe->drm, expected_seqno != msg[0])) {
drm_err(&gt->xe->drm, "TLB expected_seqno(%d) != msg(%u)\n",
if (drm_WARN_ON(&gt_to_xe(gt)->drm, expected_seqno != msg[0])) {
drm_err(&gt_to_xe(gt)->drm, "TLB expected_seqno(%d) != msg(%u)\n",
expected_seqno, msg[0]);
}
......
......@@ -76,6 +76,16 @@ enum xe_steering_type {
NUM_STEERING_TYPES
};
#define gt_to_tile(gt__) \
_Generic(gt__, \
const struct xe_gt *: (const struct xe_tile *)((gt__)->tile), \
struct xe_gt *: (gt__)->tile)
#define gt_to_xe(gt__) \
_Generic(gt__, \
const struct xe_gt *: (const struct xe_device *)(gt_to_tile(gt__)->xe), \
struct xe_gt *: gt_to_tile(gt__)->xe)
/**
* struct xe_gt - A "Graphics Technology" unit of the GPU
*
......@@ -90,8 +100,8 @@ enum xe_steering_type {
* separate GTs within a tile.
*/
struct xe_gt {
/** @xe: backpointer to XE device */
struct xe_device *xe;
/** @tile: Backpointer to GT's tile */
struct xe_tile *tile;
/** @info: GT info */
struct {
......
......@@ -217,8 +217,8 @@ int xe_mmio_tile_vram_size(struct xe_gt *gt, u64 *vram_size, u64 *tile_size, u64
return err;
/* actual size */
if (unlikely(gt->xe->info.platform == XE_DG1)) {
*tile_size = pci_resource_len(to_pci_dev(gt->xe->drm.dev), GEN12_LMEM_BAR);
if (unlikely(gt_to_xe(gt)->info.platform == XE_DG1)) {
*tile_size = pci_resource_len(to_pci_dev(gt_to_xe(gt)->drm.dev), GEN12_LMEM_BAR);
*tile_offset = 0;
} else {
reg = xe_gt_mcr_unicast_read_any(gt, XEHP_TILE_ADDR_RANGE(gt->info.id));
......@@ -227,7 +227,7 @@ int xe_mmio_tile_vram_size(struct xe_gt *gt, u64 *vram_size, u64 *tile_size, u64
}
/* minus device usage */
if (gt->xe->info.has_flat_ccs) {
if (gt_to_xe(gt)->info.has_flat_ccs) {
reg = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
offset = (u64)REG_FIELD_GET(GENMASK(31, 8), reg) * SZ_64K;
} else {
......
......@@ -472,7 +472,7 @@ static void __init_mocs_table(struct xe_gt *gt,
unsigned int i;
u32 mocs;
mocs_dbg(&gt->xe->drm, "entries:%d\n", info->n_entries);
mocs_dbg(&gt_to_xe(gt)->drm, "entries:%d\n", info->n_entries);
drm_WARN_ONCE(&xe->drm, !info->unused_entries_index,
"Unused entries index should have been defined\n");
for (i = 0;
......@@ -480,7 +480,7 @@ static void __init_mocs_table(struct xe_gt *gt,
i++) {
struct xe_reg reg = XE_REG(addr + i * 4);
mocs_dbg(&gt->xe->drm, "%d 0x%x 0x%x\n", i, reg.addr, mocs);
mocs_dbg(&gt_to_xe(gt)->drm, "%d 0x%x 0x%x\n", i, reg.addr, mocs);
xe_mmio_write32(gt, reg, mocs);
}
}
......@@ -509,13 +509,13 @@ static void init_l3cc_table(struct xe_gt *gt,
unsigned int i;
u32 l3cc;
mocs_dbg(&gt->xe->drm, "entries:%d\n", info->n_entries);
mocs_dbg(&gt_to_xe(gt)->drm, "entries:%d\n", info->n_entries);
for (i = 0;
i < (info->n_entries + 1) / 2 ?
(l3cc = l3cc_combine(get_entry_l3cc(info, 2 * i),
get_entry_l3cc(info, 2 * i + 1))), 1 : 0;
i++) {
mocs_dbg(&gt->xe->drm, "%d 0x%x 0x%x\n", i, LNCFCMOCS(i).addr,
mocs_dbg(&gt_to_xe(gt)->drm, "%d 0x%x 0x%x\n", i, LNCFCMOCS(i).addr,
l3cc);
xe_mmio_write32(gt, LNCFCMOCS(i), l3cc);
}
......@@ -525,7 +525,7 @@ void xe_mocs_init_early(struct xe_gt *gt)
{
struct xe_mocs_info table;
get_mocs_settings(gt->xe, &table);
get_mocs_settings(gt_to_xe(gt), &table);
gt->mocs.uc_index = table.uc_index;
gt->mocs.wb_index = table.wb_index;
}
......@@ -538,8 +538,8 @@ void xe_mocs_init(struct xe_gt *gt)
/*
* LLC and eDRAM control values are not applicable to dgfx
*/
flags = get_mocs_settings(gt->xe, &table);
mocs_dbg(&gt->xe->drm, "flag:0x%x\n", flags);
flags = get_mocs_settings(gt_to_xe(gt), &table);
mocs_dbg(&gt_to_xe(gt)->drm, "flag:0x%x\n", flags);
if (flags & HAS_GLOBAL_MOCS)
__init_mocs_table(gt, &table, GLOBAL_MOCS(0).addr);
......
......@@ -544,7 +544,7 @@ static int xe_info_init(struct xe_device *xe,
gt = &tile->primary_gt;
gt->info.id = id;
gt->xe = xe;
gt->tile = tile;
if (id == 0) {
gt->info.type = XE_GT_TYPE_MAIN;
......
......@@ -696,7 +696,7 @@ xe_pt_stage_bind_entry(struct xe_ptw *parent, pgoff_t offset,
* TODO: Suballocate the pt bo to avoid wasting a lot of
* memory.
*/
if (GRAPHICS_VERx100(xe_walk->gt->xe) >= 1250 && level == 1 &&
if (GRAPHICS_VERx100(gt_to_xe(xe_walk->gt)) >= 1250 && level == 1 &&
covers && xe_pt_scan_64K(addr, next, xe_walk)) {
walk->shifts = xe_compact_pt_shifts;
flags |= XE_PDE_64K;
......
......@@ -53,20 +53,20 @@ bool xe_ttm_stolen_cpu_access_needs_ggtt(struct xe_device *xe)
static s64 detect_bar2_dgfx(struct xe_gt *gt, struct xe_ttm_stolen_mgr *mgr)
{
struct pci_dev *pdev = to_pci_dev(gt->xe->drm.dev);
struct pci_dev *pdev = to_pci_dev(gt_to_xe(gt)->drm.dev);
u64 stolen_size;
u64 tile_offset;
u64 tile_size;
u64 vram_size;
if (xe_mmio_tile_vram_size(gt, &vram_size, &tile_size, &tile_offset)) {
drm_err(&gt->xe->drm, "Querying total vram size failed\n");
drm_err(&gt_to_xe(gt)->drm, "Querying total vram size failed\n");
return 0;
}
/* Use DSM base address instead for stolen memory */
mgr->stolen_base = (xe_mmio_read64(gt, DSMBASE) & BDSM_MASK) - tile_offset;
if (drm_WARN_ON(&gt->xe->drm, tile_size < mgr->stolen_base))
if (drm_WARN_ON(&gt_to_xe(gt)->drm, tile_size < mgr->stolen_base))
return 0;
stolen_size = tile_size - mgr->stolen_base;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment