Commit ce8bf5bd authored by Lucas De Marchi's avatar Lucas De Marchi Committed by Rodrigo Vivi

drm/xe/mmio: Use struct xe_reg

Convert all the callers to deal with xe_mmio_*() using struct xe_reg
instead of plain u32. In a few places there was also a rename
s/reg/reg_val/ when dealing with the value returned so it doesn't get
mixed up with the register address.
Reviewed-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://lore.kernel.org/r/20230508225322.2692066-2-lucas.demarchi@intel.comSigned-off-by: default avatarLucas De Marchi <lucas.demarchi@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
parent 34f89ac8
...@@ -345,7 +345,7 @@ void xe_device_wmb(struct xe_device *xe) ...@@ -345,7 +345,7 @@ void xe_device_wmb(struct xe_device *xe)
wmb(); wmb();
if (IS_DGFX(xe)) if (IS_DGFX(xe))
xe_mmio_write32(gt, SOFTWARE_FLAGS_SPR33.reg, 0); xe_mmio_write32(gt, SOFTWARE_FLAGS_SPR33, 0);
} }
u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size) u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
......
...@@ -60,7 +60,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc, ...@@ -60,7 +60,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
} }
if (hwe->class == XE_ENGINE_CLASS_COMPUTE) if (hwe->class == XE_ENGINE_CLASS_COMPUTE)
xe_mmio_write32(hwe->gt, RCU_MODE.reg, xe_mmio_write32(hwe->gt, RCU_MODE,
_MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE)); _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail); xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail);
...@@ -78,17 +78,17 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc, ...@@ -78,17 +78,17 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
*/ */
wmb(); wmb();
xe_mmio_write32(gt, RING_HWS_PGA(hwe->mmio_base).reg, xe_mmio_write32(gt, RING_HWS_PGA(hwe->mmio_base),
xe_bo_ggtt_addr(hwe->hwsp)); xe_bo_ggtt_addr(hwe->hwsp));
xe_mmio_read32(gt, RING_HWS_PGA(hwe->mmio_base).reg); xe_mmio_read32(gt, RING_HWS_PGA(hwe->mmio_base));
xe_mmio_write32(gt, RING_MODE(hwe->mmio_base).reg, xe_mmio_write32(gt, RING_MODE(hwe->mmio_base),
_MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE)); _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base).reg, xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base),
lower_32_bits(lrc_desc)); lower_32_bits(lrc_desc));
xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_HI(hwe->mmio_base).reg, xe_mmio_write32(gt, RING_EXECLIST_SQ_CONTENTS_HI(hwe->mmio_base),
upper_32_bits(lrc_desc)); upper_32_bits(lrc_desc));
xe_mmio_write32(gt, RING_EXECLIST_CONTROL(hwe->mmio_base).reg, xe_mmio_write32(gt, RING_EXECLIST_CONTROL(hwe->mmio_base),
EL_CTRL_LOAD); EL_CTRL_LOAD);
} }
...@@ -173,8 +173,8 @@ static u64 read_execlist_status(struct xe_hw_engine *hwe) ...@@ -173,8 +173,8 @@ static u64 read_execlist_status(struct xe_hw_engine *hwe)
struct xe_gt *gt = hwe->gt; struct xe_gt *gt = hwe->gt;
u32 hi, lo; u32 hi, lo;
lo = xe_mmio_read32(gt, RING_EXECLIST_STATUS_LO(hwe->mmio_base).reg); lo = xe_mmio_read32(gt, RING_EXECLIST_STATUS_LO(hwe->mmio_base));
hi = xe_mmio_read32(gt, RING_EXECLIST_STATUS_HI(hwe->mmio_base).reg); hi = xe_mmio_read32(gt, RING_EXECLIST_STATUS_HI(hwe->mmio_base));
printk(KERN_INFO "EXECLIST_STATUS %d:%d = 0x%08x %08x\n", hwe->class, printk(KERN_INFO "EXECLIST_STATUS %d:%d = 0x%08x %08x\n", hwe->class,
hwe->instance, hi, lo); hwe->instance, hi, lo);
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <drm/drm_util.h> #include <drm/drm_util.h>
#include "regs/xe_gt_regs.h" #include "regs/xe_gt_regs.h"
#include "regs/xe_reg_defs.h"
#include "xe_gt.h" #include "xe_gt.h"
#include "xe_mmio.h" #include "xe_mmio.h"
...@@ -27,7 +28,7 @@ fw_to_xe(struct xe_force_wake *fw) ...@@ -27,7 +28,7 @@ fw_to_xe(struct xe_force_wake *fw)
static void domain_init(struct xe_force_wake_domain *domain, static void domain_init(struct xe_force_wake_domain *domain,
enum xe_force_wake_domain_id id, enum xe_force_wake_domain_id id,
u32 reg, u32 ack, u32 val, u32 mask) struct xe_reg reg, struct xe_reg ack, u32 val, u32 mask)
{ {
domain->id = id; domain->id = id;
domain->reg_ctl = reg; domain->reg_ctl = reg;
...@@ -49,14 +50,14 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw) ...@@ -49,14 +50,14 @@ void xe_force_wake_init_gt(struct xe_gt *gt, struct xe_force_wake *fw)
if (xe->info.graphics_verx100 >= 1270) { if (xe->info.graphics_verx100 >= 1270) {
domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT], domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
XE_FW_DOMAIN_ID_GT, XE_FW_DOMAIN_ID_GT,
FORCEWAKE_GT.reg, FORCEWAKE_GT,
FORCEWAKE_ACK_GT_MTL.reg, FORCEWAKE_ACK_GT_MTL,
BIT(0), BIT(16)); BIT(0), BIT(16));
} else { } else {
domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT], domain_init(&fw->domains[XE_FW_DOMAIN_ID_GT],
XE_FW_DOMAIN_ID_GT, XE_FW_DOMAIN_ID_GT,
FORCEWAKE_GT.reg, FORCEWAKE_GT,
FORCEWAKE_ACK_GT.reg, FORCEWAKE_ACK_GT,
BIT(0), BIT(16)); BIT(0), BIT(16));
} }
} }
...@@ -71,8 +72,8 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw) ...@@ -71,8 +72,8 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
if (!xe_gt_is_media_type(gt)) if (!xe_gt_is_media_type(gt))
domain_init(&fw->domains[XE_FW_DOMAIN_ID_RENDER], domain_init(&fw->domains[XE_FW_DOMAIN_ID_RENDER],
XE_FW_DOMAIN_ID_RENDER, XE_FW_DOMAIN_ID_RENDER,
FORCEWAKE_RENDER.reg, FORCEWAKE_RENDER,
FORCEWAKE_ACK_RENDER.reg, FORCEWAKE_ACK_RENDER,
BIT(0), BIT(16)); BIT(0), BIT(16));
for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) { for (i = XE_HW_ENGINE_VCS0, j = 0; i <= XE_HW_ENGINE_VCS7; ++i, ++j) {
...@@ -81,8 +82,8 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw) ...@@ -81,8 +82,8 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j], domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j],
XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j, XE_FW_DOMAIN_ID_MEDIA_VDBOX0 + j,
FORCEWAKE_MEDIA_VDBOX(j).reg, FORCEWAKE_MEDIA_VDBOX(j),
FORCEWAKE_ACK_MEDIA_VDBOX(j).reg, FORCEWAKE_ACK_MEDIA_VDBOX(j),
BIT(0), BIT(16)); BIT(0), BIT(16));
} }
...@@ -92,8 +93,8 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw) ...@@ -92,8 +93,8 @@ void xe_force_wake_init_engines(struct xe_gt *gt, struct xe_force_wake *fw)
domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j], domain_init(&fw->domains[XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j],
XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j, XE_FW_DOMAIN_ID_MEDIA_VEBOX0 + j,
FORCEWAKE_MEDIA_VEBOX(j).reg, FORCEWAKE_MEDIA_VEBOX(j),
FORCEWAKE_ACK_MEDIA_VEBOX(j).reg, FORCEWAKE_ACK_MEDIA_VEBOX(j),
BIT(0), BIT(16)); BIT(0), BIT(16));
} }
} }
...@@ -128,7 +129,7 @@ static int domain_sleep_wait(struct xe_gt *gt, ...@@ -128,7 +129,7 @@ static int domain_sleep_wait(struct xe_gt *gt,
for (tmp__ = (mask__); tmp__; tmp__ &= ~BIT(ffs(tmp__) - 1)) \ for (tmp__ = (mask__); tmp__; tmp__ &= ~BIT(ffs(tmp__) - 1)) \
for_each_if((domain__ = ((fw__)->domains + \ for_each_if((domain__ = ((fw__)->domains + \
(ffs(tmp__) - 1))) && \ (ffs(tmp__) - 1))) && \
domain__->reg_ctl) domain__->reg_ctl.reg)
int xe_force_wake_get(struct xe_force_wake *fw, int xe_force_wake_get(struct xe_force_wake *fw,
enum xe_force_wake_domains domains) enum xe_force_wake_domains domains)
......
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/types.h> #include <linux/types.h>
#include "regs/xe_reg_defs.h"
enum xe_force_wake_domain_id { enum xe_force_wake_domain_id {
XE_FW_DOMAIN_ID_GT = 0, XE_FW_DOMAIN_ID_GT = 0,
XE_FW_DOMAIN_ID_RENDER, XE_FW_DOMAIN_ID_RENDER,
...@@ -56,9 +58,9 @@ struct xe_force_wake_domain { ...@@ -56,9 +58,9 @@ struct xe_force_wake_domain {
/** @id: domain force wake id */ /** @id: domain force wake id */
enum xe_force_wake_domain_id id; enum xe_force_wake_domain_id id;
/** @reg_ctl: domain wake control register address */ /** @reg_ctl: domain wake control register address */
u32 reg_ctl; struct xe_reg reg_ctl;
/** @reg_ack: domain ack register address */ /** @reg_ack: domain ack register address */
u32 reg_ack; struct xe_reg reg_ack;
/** @val: domain wake write value */ /** @val: domain wake write value */
u32 val; u32 val;
/** @mask: domain mask */ /** @mask: domain mask */
......
...@@ -207,12 +207,12 @@ void xe_ggtt_invalidate(struct xe_gt *gt) ...@@ -207,12 +207,12 @@ void xe_ggtt_invalidate(struct xe_gt *gt)
struct xe_device *xe = gt_to_xe(gt); struct xe_device *xe = gt_to_xe(gt);
if (xe->info.platform == XE_PVC) { if (xe->info.platform == XE_PVC) {
xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1.reg, xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC1,
PVC_GUC_TLB_INV_DESC1_INVALIDATE); PVC_GUC_TLB_INV_DESC1_INVALIDATE);
xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC0.reg, xe_mmio_write32(gt, PVC_GUC_TLB_INV_DESC0,
PVC_GUC_TLB_INV_DESC0_VALID); PVC_GUC_TLB_INV_DESC0_VALID);
} else } else
xe_mmio_write32(gt, GUC_TLB_INV_CR.reg, xe_mmio_write32(gt, GUC_TLB_INV_CR,
GUC_TLB_INV_CR_INVALIDATE); GUC_TLB_INV_CR_INVALIDATE);
} }
} }
......
...@@ -544,8 +544,8 @@ static int do_gt_reset(struct xe_gt *gt) ...@@ -544,8 +544,8 @@ static int do_gt_reset(struct xe_gt *gt)
struct xe_device *xe = gt_to_xe(gt); struct xe_device *xe = gt_to_xe(gt);
int err; int err;
xe_mmio_write32(gt, GDRST.reg, GRDOM_FULL); xe_mmio_write32(gt, GDRST, GRDOM_FULL);
err = xe_mmio_wait32(gt, GDRST.reg, 0, GRDOM_FULL, 5000, err = xe_mmio_wait32(gt, GDRST, 0, GRDOM_FULL, 5000,
NULL, false); NULL, false);
if (err) if (err)
drm_err(&xe->drm, drm_err(&xe->drm,
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
static u32 read_reference_ts_freq(struct xe_gt *gt) static u32 read_reference_ts_freq(struct xe_gt *gt)
{ {
u32 ts_override = xe_mmio_read32(gt, TIMESTAMP_OVERRIDE.reg); u32 ts_override = xe_mmio_read32(gt, TIMESTAMP_OVERRIDE);
u32 base_freq, frac_freq; u32 base_freq, frac_freq;
base_freq = REG_FIELD_GET(TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK, base_freq = REG_FIELD_GET(TIMESTAMP_OVERRIDE_US_COUNTER_DIVIDER_MASK,
...@@ -54,7 +54,7 @@ static u32 get_crystal_clock_freq(u32 rpm_config_reg) ...@@ -54,7 +54,7 @@ static u32 get_crystal_clock_freq(u32 rpm_config_reg)
int xe_gt_clock_init(struct xe_gt *gt) int xe_gt_clock_init(struct xe_gt *gt)
{ {
u32 ctc_reg = xe_mmio_read32(gt, CTC_MODE.reg); u32 ctc_reg = xe_mmio_read32(gt, CTC_MODE);
u32 freq = 0; u32 freq = 0;
/* Assuming gen11+ so assert this assumption is correct */ /* Assuming gen11+ so assert this assumption is correct */
...@@ -63,7 +63,7 @@ int xe_gt_clock_init(struct xe_gt *gt) ...@@ -63,7 +63,7 @@ int xe_gt_clock_init(struct xe_gt *gt)
if (ctc_reg & CTC_SOURCE_DIVIDE_LOGIC) { if (ctc_reg & CTC_SOURCE_DIVIDE_LOGIC) {
freq = read_reference_ts_freq(gt); freq = read_reference_ts_freq(gt);
} else { } else {
u32 c0 = xe_mmio_read32(gt, RPM_CONFIG0.reg); u32 c0 = xe_mmio_read32(gt, RPM_CONFIG0);
freq = get_crystal_clock_freq(c0); freq = get_crystal_clock_freq(c0);
......
...@@ -40,6 +40,8 @@ ...@@ -40,6 +40,8 @@
* non-terminated instance. * non-terminated instance.
*/ */
#define STEER_SEMAPHORE XE_REG(0xFD0)
static inline struct xe_reg to_xe_reg(struct xe_reg_mcr reg_mcr) static inline struct xe_reg to_xe_reg(struct xe_reg_mcr reg_mcr)
{ {
return reg_mcr.__reg; return reg_mcr.__reg;
...@@ -183,9 +185,9 @@ static void init_steering_l3bank(struct xe_gt *gt) ...@@ -183,9 +185,9 @@ static void init_steering_l3bank(struct xe_gt *gt)
{ {
if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) { if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK, u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK,
xe_mmio_read32(gt, MIRROR_FUSE3.reg)); xe_mmio_read32(gt, MIRROR_FUSE3));
u32 bank_mask = REG_FIELD_GET(GT_L3_EXC_MASK, u32 bank_mask = REG_FIELD_GET(GT_L3_EXC_MASK,
xe_mmio_read32(gt, XEHP_FUSE4.reg)); xe_mmio_read32(gt, XEHP_FUSE4));
/* /*
* Group selects mslice, instance selects bank within mslice. * Group selects mslice, instance selects bank within mslice.
...@@ -196,7 +198,7 @@ static void init_steering_l3bank(struct xe_gt *gt) ...@@ -196,7 +198,7 @@ static void init_steering_l3bank(struct xe_gt *gt)
bank_mask & BIT(0) ? 0 : 2; bank_mask & BIT(0) ? 0 : 2;
} else if (gt_to_xe(gt)->info.platform == XE_DG2) { } else if (gt_to_xe(gt)->info.platform == XE_DG2) {
u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK, u32 mslice_mask = REG_FIELD_GET(MEML3_EN_MASK,
xe_mmio_read32(gt, MIRROR_FUSE3.reg)); xe_mmio_read32(gt, MIRROR_FUSE3));
u32 bank = __ffs(mslice_mask) * 8; u32 bank = __ffs(mslice_mask) * 8;
/* /*
...@@ -208,7 +210,7 @@ static void init_steering_l3bank(struct xe_gt *gt) ...@@ -208,7 +210,7 @@ static void init_steering_l3bank(struct xe_gt *gt)
gt->steering[L3BANK].instance_target = bank & 0x3; gt->steering[L3BANK].instance_target = bank & 0x3;
} else { } else {
u32 fuse = REG_FIELD_GET(L3BANK_MASK, u32 fuse = REG_FIELD_GET(L3BANK_MASK,
~xe_mmio_read32(gt, MIRROR_FUSE3.reg)); ~xe_mmio_read32(gt, MIRROR_FUSE3));
gt->steering[L3BANK].group_target = 0; /* unused */ gt->steering[L3BANK].group_target = 0; /* unused */
gt->steering[L3BANK].instance_target = __ffs(fuse); gt->steering[L3BANK].instance_target = __ffs(fuse);
...@@ -218,7 +220,7 @@ static void init_steering_l3bank(struct xe_gt *gt) ...@@ -218,7 +220,7 @@ static void init_steering_l3bank(struct xe_gt *gt)
static void init_steering_mslice(struct xe_gt *gt) static void init_steering_mslice(struct xe_gt *gt)
{ {
u32 mask = REG_FIELD_GET(MEML3_EN_MASK, u32 mask = REG_FIELD_GET(MEML3_EN_MASK,
xe_mmio_read32(gt, MIRROR_FUSE3.reg)); xe_mmio_read32(gt, MIRROR_FUSE3));
/* /*
* mslice registers are valid (not terminated) if either the meml3 * mslice registers are valid (not terminated) if either the meml3
...@@ -337,8 +339,8 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt) ...@@ -337,8 +339,8 @@ void xe_gt_mcr_set_implicit_defaults(struct xe_gt *gt)
u32 steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, 0) | u32 steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, 0) |
REG_FIELD_PREP(MCR_SUBSLICE_MASK, 2); REG_FIELD_PREP(MCR_SUBSLICE_MASK, 2);
xe_mmio_write32(gt, MCFG_MCR_SELECTOR.reg, steer_val); xe_mmio_write32(gt, MCFG_MCR_SELECTOR, steer_val);
xe_mmio_write32(gt, SF_MCR_SELECTOR.reg, steer_val); xe_mmio_write32(gt, SF_MCR_SELECTOR, steer_val);
/* /*
* For GAM registers, all reads should be directed to instance 1 * For GAM registers, all reads should be directed to instance 1
* (unicast reads against other instances are not allowed), * (unicast reads against other instances are not allowed),
...@@ -376,7 +378,7 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt, ...@@ -376,7 +378,7 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
continue; continue;
for (int i = 0; gt->steering[type].ranges[i].end > 0; i++) { for (int i = 0; gt->steering[type].ranges[i].end > 0; i++) {
if (xe_mmio_in_range(&gt->steering[type].ranges[i], reg.reg)) { if (xe_mmio_in_range(&gt->steering[type].ranges[i], reg)) {
*group = gt->steering[type].group_target; *group = gt->steering[type].group_target;
*instance = gt->steering[type].instance_target; *instance = gt->steering[type].instance_target;
return true; return true;
...@@ -387,7 +389,7 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt, ...@@ -387,7 +389,7 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
implicit_ranges = gt->steering[IMPLICIT_STEERING].ranges; implicit_ranges = gt->steering[IMPLICIT_STEERING].ranges;
if (implicit_ranges) if (implicit_ranges)
for (int i = 0; implicit_ranges[i].end > 0; i++) for (int i = 0; implicit_ranges[i].end > 0; i++)
if (xe_mmio_in_range(&implicit_ranges[i], reg.reg)) if (xe_mmio_in_range(&implicit_ranges[i], reg))
return false; return false;
/* /*
...@@ -403,8 +405,6 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt, ...@@ -403,8 +405,6 @@ static bool xe_gt_mcr_get_nonterminated_steering(struct xe_gt *gt,
return true; return true;
} }
#define STEER_SEMAPHORE 0xFD0
/* /*
* Obtain exclusive access to MCR steering. On MTL and beyond we also need * Obtain exclusive access to MCR steering. On MTL and beyond we also need
* to synchronize with external clients (e.g., firmware), so a semaphore * to synchronize with external clients (e.g., firmware), so a semaphore
...@@ -446,16 +446,17 @@ static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr, ...@@ -446,16 +446,17 @@ static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
u8 rw_flag, int group, int instance, u32 value) u8 rw_flag, int group, int instance, u32 value)
{ {
const struct xe_reg reg = to_xe_reg(reg_mcr); const struct xe_reg reg = to_xe_reg(reg_mcr);
u32 steer_reg, steer_val, val = 0; struct xe_reg steer_reg;
u32 steer_val, val = 0;
lockdep_assert_held(&gt->mcr_lock); lockdep_assert_held(&gt->mcr_lock);
if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) { if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1270) {
steer_reg = MTL_MCR_SELECTOR.reg; steer_reg = MTL_MCR_SELECTOR;
steer_val = REG_FIELD_PREP(MTL_MCR_GROUPID, group) | steer_val = REG_FIELD_PREP(MTL_MCR_GROUPID, group) |
REG_FIELD_PREP(MTL_MCR_INSTANCEID, instance); REG_FIELD_PREP(MTL_MCR_INSTANCEID, instance);
} else { } else {
steer_reg = MCR_SELECTOR.reg; steer_reg = MCR_SELECTOR;
steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, group) | steer_val = REG_FIELD_PREP(MCR_SLICE_MASK, group) |
REG_FIELD_PREP(MCR_SUBSLICE_MASK, instance); REG_FIELD_PREP(MCR_SUBSLICE_MASK, instance);
} }
...@@ -480,9 +481,9 @@ static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr, ...@@ -480,9 +481,9 @@ static u32 rw_with_mcr_steering(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
xe_mmio_write32(gt, steer_reg, steer_val); xe_mmio_write32(gt, steer_reg, steer_val);
if (rw_flag == MCR_OP_READ) if (rw_flag == MCR_OP_READ)
val = xe_mmio_read32(gt, reg.reg); val = xe_mmio_read32(gt, reg);
else else
xe_mmio_write32(gt, reg.reg, value); xe_mmio_write32(gt, reg, value);
/* /*
* If we turned off the multicast bit (during a write) we're required * If we turned off the multicast bit (during a write) we're required
...@@ -524,7 +525,7 @@ u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, struct xe_reg_mcr reg_mcr) ...@@ -524,7 +525,7 @@ u32 xe_gt_mcr_unicast_read_any(struct xe_gt *gt, struct xe_reg_mcr reg_mcr)
group, instance, 0); group, instance, 0);
mcr_unlock(gt); mcr_unlock(gt);
} else { } else {
val = xe_mmio_read32(gt, reg.reg); val = xe_mmio_read32(gt, reg);
} }
return val; return val;
...@@ -591,7 +592,7 @@ void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr, ...@@ -591,7 +592,7 @@ void xe_gt_mcr_multicast_write(struct xe_gt *gt, struct xe_reg_mcr reg_mcr,
* to touch the steering register. * to touch the steering register.
*/ */
mcr_lock(gt); mcr_lock(gt);
xe_mmio_write32(gt, reg.reg, value); xe_mmio_write32(gt, reg, value);
mcr_unlock(gt); mcr_unlock(gt);
} }
......
...@@ -26,7 +26,7 @@ load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...) ...@@ -26,7 +26,7 @@ load_dss_mask(struct xe_gt *gt, xe_dss_mask_t mask, int numregs, ...)
va_start(argp, numregs); va_start(argp, numregs);
for (i = 0; i < numregs; i++) for (i = 0; i < numregs; i++)
fuse_val[i] = xe_mmio_read32(gt, va_arg(argp, u32)); fuse_val[i] = xe_mmio_read32(gt, va_arg(argp, struct xe_reg));
va_end(argp); va_end(argp);
bitmap_from_arr32(mask, fuse_val, numregs * 32); bitmap_from_arr32(mask, fuse_val, numregs * 32);
...@@ -36,7 +36,7 @@ static void ...@@ -36,7 +36,7 @@ static void
load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask) load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask)
{ {
struct xe_device *xe = gt_to_xe(gt); struct xe_device *xe = gt_to_xe(gt);
u32 reg = xe_mmio_read32(gt, XELP_EU_ENABLE.reg); u32 reg_val = xe_mmio_read32(gt, XELP_EU_ENABLE);
u32 val = 0; u32 val = 0;
int i; int i;
...@@ -47,15 +47,15 @@ load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask) ...@@ -47,15 +47,15 @@ load_eu_mask(struct xe_gt *gt, xe_eu_mask_t mask)
* of enable). * of enable).
*/ */
if (GRAPHICS_VERx100(xe) < 1250) if (GRAPHICS_VERx100(xe) < 1250)
reg = ~reg & XELP_EU_MASK; reg_val = ~reg_val & XELP_EU_MASK;
/* On PVC, one bit = one EU */ /* On PVC, one bit = one EU */
if (GRAPHICS_VERx100(xe) == 1260) { if (GRAPHICS_VERx100(xe) == 1260) {
val = reg; val = reg_val;
} else { } else {
/* All other platforms, one bit = 2 EU */ /* All other platforms, one bit = 2 EU */
for (i = 0; i < fls(reg); i++) for (i = 0; i < fls(reg_val); i++)
if (reg & BIT(i)) if (reg_val & BIT(i))
val |= 0x3 << 2 * i; val |= 0x3 << 2 * i;
} }
...@@ -95,10 +95,10 @@ xe_gt_topology_init(struct xe_gt *gt) ...@@ -95,10 +95,10 @@ xe_gt_topology_init(struct xe_gt *gt)
load_dss_mask(gt, gt->fuse_topo.g_dss_mask, load_dss_mask(gt, gt->fuse_topo.g_dss_mask,
num_geometry_regs, num_geometry_regs,
XELP_GT_GEOMETRY_DSS_ENABLE.reg); XELP_GT_GEOMETRY_DSS_ENABLE);
load_dss_mask(gt, gt->fuse_topo.c_dss_mask, num_compute_regs, load_dss_mask(gt, gt->fuse_topo.c_dss_mask, num_compute_regs,
XEHP_GT_COMPUTE_DSS_ENABLE.reg, XEHP_GT_COMPUTE_DSS_ENABLE,
XEHPC_GT_COMPUTE_DSS_ENABLE_EXT.reg); XEHPC_GT_COMPUTE_DSS_ENABLE_EXT);
load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss); load_eu_mask(gt, gt->fuse_topo.eu_mask_per_dss);
xe_gt_topology_dump(gt, &p); xe_gt_topology_dump(gt, &p);
......
...@@ -232,10 +232,10 @@ static void guc_write_params(struct xe_guc *guc) ...@@ -232,10 +232,10 @@ static void guc_write_params(struct xe_guc *guc)
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
xe_mmio_write32(gt, SOFT_SCRATCH(0).reg, 0); xe_mmio_write32(gt, SOFT_SCRATCH(0), 0);
for (i = 0; i < GUC_CTL_MAX_DWORDS; i++) for (i = 0; i < GUC_CTL_MAX_DWORDS; i++)
xe_mmio_write32(gt, SOFT_SCRATCH(1 + i).reg, guc->params[i]); xe_mmio_write32(gt, SOFT_SCRATCH(1 + i), guc->params[i]);
} }
int xe_guc_init(struct xe_guc *guc) int xe_guc_init(struct xe_guc *guc)
...@@ -268,9 +268,9 @@ int xe_guc_init(struct xe_guc *guc) ...@@ -268,9 +268,9 @@ int xe_guc_init(struct xe_guc *guc)
guc_init_params(guc); guc_init_params(guc);
if (xe_gt_is_media_type(gt)) if (xe_gt_is_media_type(gt))
guc->notify_reg = MEDIA_GUC_HOST_INTERRUPT.reg; guc->notify_reg = MEDIA_GUC_HOST_INTERRUPT;
else else
guc->notify_reg = GUC_HOST_INTERRUPT.reg; guc->notify_reg = GUC_HOST_INTERRUPT;
xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE); xe_uc_fw_change_status(&guc->fw, XE_UC_FIRMWARE_LOADABLE);
...@@ -309,9 +309,9 @@ int xe_guc_reset(struct xe_guc *guc) ...@@ -309,9 +309,9 @@ int xe_guc_reset(struct xe_guc *guc)
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
xe_mmio_write32(gt, GDRST.reg, GRDOM_GUC); xe_mmio_write32(gt, GDRST, GRDOM_GUC);
ret = xe_mmio_wait32(gt, GDRST.reg, 0, GRDOM_GUC, 5000, ret = xe_mmio_wait32(gt, GDRST, 0, GRDOM_GUC, 5000,
&gdrst, false); &gdrst, false);
if (ret) { if (ret) {
drm_err(&xe->drm, "GuC reset timed out, GEN6_GDRST=0x%8x\n", drm_err(&xe->drm, "GuC reset timed out, GEN6_GDRST=0x%8x\n",
...@@ -319,7 +319,7 @@ int xe_guc_reset(struct xe_guc *guc) ...@@ -319,7 +319,7 @@ int xe_guc_reset(struct xe_guc *guc)
goto err_out; goto err_out;
} }
guc_status = xe_mmio_read32(gt, GUC_STATUS.reg); guc_status = xe_mmio_read32(gt, GUC_STATUS);
if (!(guc_status & GS_MIA_IN_RESET)) { if (!(guc_status & GS_MIA_IN_RESET)) {
drm_err(&xe->drm, drm_err(&xe->drm,
"GuC status: 0x%x, MIA core expected to be in reset\n", "GuC status: 0x%x, MIA core expected to be in reset\n",
...@@ -352,9 +352,9 @@ static void guc_prepare_xfer(struct xe_guc *guc) ...@@ -352,9 +352,9 @@ static void guc_prepare_xfer(struct xe_guc *guc)
shim_flags |= PVC_GUC_MOCS_INDEX(PVC_GUC_MOCS_UC_INDEX); shim_flags |= PVC_GUC_MOCS_INDEX(PVC_GUC_MOCS_UC_INDEX);
/* Must program this register before loading the ucode with DMA */ /* Must program this register before loading the ucode with DMA */
xe_mmio_write32(gt, GUC_SHIM_CONTROL.reg, shim_flags); xe_mmio_write32(gt, GUC_SHIM_CONTROL, shim_flags);
xe_mmio_write32(gt, GT_PM_CONFIG.reg, GT_DOORBELL_ENABLE); xe_mmio_write32(gt, GT_PM_CONFIG, GT_DOORBELL_ENABLE);
} }
/* /*
...@@ -370,7 +370,7 @@ static int guc_xfer_rsa(struct xe_guc *guc) ...@@ -370,7 +370,7 @@ static int guc_xfer_rsa(struct xe_guc *guc)
if (guc->fw.rsa_size > 256) { if (guc->fw.rsa_size > 256) {
u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) + u32 rsa_ggtt_addr = xe_bo_ggtt_addr(guc->fw.bo) +
xe_uc_fw_rsa_offset(&guc->fw); xe_uc_fw_rsa_offset(&guc->fw);
xe_mmio_write32(gt, UOS_RSA_SCRATCH(0).reg, rsa_ggtt_addr); xe_mmio_write32(gt, UOS_RSA_SCRATCH(0), rsa_ggtt_addr);
return 0; return 0;
} }
...@@ -379,7 +379,7 @@ static int guc_xfer_rsa(struct xe_guc *guc) ...@@ -379,7 +379,7 @@ static int guc_xfer_rsa(struct xe_guc *guc)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++) for (i = 0; i < UOS_RSA_SCRATCH_COUNT; i++)
xe_mmio_write32(gt, UOS_RSA_SCRATCH(i).reg, rsa[i]); xe_mmio_write32(gt, UOS_RSA_SCRATCH(i), rsa[i]);
return 0; return 0;
} }
...@@ -407,7 +407,7 @@ static int guc_wait_ucode(struct xe_guc *guc) ...@@ -407,7 +407,7 @@ static int guc_wait_ucode(struct xe_guc *guc)
* 200ms. Even at slowest clock, this should be sufficient. And * 200ms. Even at slowest clock, this should be sufficient. And
* in the working case, a larger timeout makes no difference. * in the working case, a larger timeout makes no difference.
*/ */
ret = xe_mmio_wait32(guc_to_gt(guc), GUC_STATUS.reg, ret = xe_mmio_wait32(guc_to_gt(guc), GUC_STATUS,
FIELD_PREP(GS_UKERNEL_MASK, FIELD_PREP(GS_UKERNEL_MASK,
XE_GUC_LOAD_STATUS_READY), XE_GUC_LOAD_STATUS_READY),
GS_UKERNEL_MASK, 200000, &status, false); GS_UKERNEL_MASK, 200000, &status, false);
...@@ -435,7 +435,7 @@ static int guc_wait_ucode(struct xe_guc *guc) ...@@ -435,7 +435,7 @@ static int guc_wait_ucode(struct xe_guc *guc)
XE_GUC_LOAD_STATUS_EXCEPTION) { XE_GUC_LOAD_STATUS_EXCEPTION) {
drm_info(drm, "GuC firmware exception. EIP: %#x\n", drm_info(drm, "GuC firmware exception. EIP: %#x\n",
xe_mmio_read32(guc_to_gt(guc), xe_mmio_read32(guc_to_gt(guc),
SOFT_SCRATCH(13).reg)); SOFT_SCRATCH(13)));
ret = -ENXIO; ret = -ENXIO;
} }
...@@ -532,10 +532,10 @@ static void guc_handle_mmio_msg(struct xe_guc *guc) ...@@ -532,10 +532,10 @@ static void guc_handle_mmio_msg(struct xe_guc *guc)
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
msg = xe_mmio_read32(gt, SOFT_SCRATCH(15).reg); msg = xe_mmio_read32(gt, SOFT_SCRATCH(15));
msg &= XE_GUC_RECV_MSG_EXCEPTION | msg &= XE_GUC_RECV_MSG_EXCEPTION |
XE_GUC_RECV_MSG_CRASH_DUMP_POSTED; XE_GUC_RECV_MSG_CRASH_DUMP_POSTED;
xe_mmio_write32(gt, SOFT_SCRATCH(15).reg, 0); xe_mmio_write32(gt, SOFT_SCRATCH(15), 0);
if (msg & XE_GUC_RECV_MSG_CRASH_DUMP_POSTED) if (msg & XE_GUC_RECV_MSG_CRASH_DUMP_POSTED)
drm_err(&guc_to_xe(guc)->drm, drm_err(&guc_to_xe(guc)->drm,
...@@ -553,12 +553,12 @@ static void guc_enable_irq(struct xe_guc *guc) ...@@ -553,12 +553,12 @@ static void guc_enable_irq(struct xe_guc *guc)
REG_FIELD_PREP(ENGINE0_MASK, GUC_INTR_GUC2HOST) : REG_FIELD_PREP(ENGINE0_MASK, GUC_INTR_GUC2HOST) :
REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST);
xe_mmio_write32(gt, GUC_SG_INTR_ENABLE.reg, xe_mmio_write32(gt, GUC_SG_INTR_ENABLE,
REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST)); REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST));
if (xe_gt_is_media_type(gt)) if (xe_gt_is_media_type(gt))
xe_mmio_rmw32(gt, GUC_SG_INTR_MASK.reg, events, 0); xe_mmio_rmw32(gt, GUC_SG_INTR_MASK, events, 0);
else else
xe_mmio_write32(gt, GUC_SG_INTR_MASK.reg, ~events); xe_mmio_write32(gt, GUC_SG_INTR_MASK, ~events);
} }
int xe_guc_enable_communication(struct xe_guc *guc) int xe_guc_enable_communication(struct xe_guc *guc)
...@@ -567,7 +567,7 @@ int xe_guc_enable_communication(struct xe_guc *guc) ...@@ -567,7 +567,7 @@ int xe_guc_enable_communication(struct xe_guc *guc)
guc_enable_irq(guc); guc_enable_irq(guc);
xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK.reg, xe_mmio_rmw32(guc_to_gt(guc), PMINTRMSK,
ARAT_EXPIRED_INTRMSK, 0); ARAT_EXPIRED_INTRMSK, 0);
err = xe_guc_ct_enable(&guc->ct); err = xe_guc_ct_enable(&guc->ct);
...@@ -620,8 +620,8 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request, ...@@ -620,8 +620,8 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
struct xe_device *xe = guc_to_xe(guc); struct xe_device *xe = guc_to_xe(guc);
struct xe_gt *gt = guc_to_gt(guc); struct xe_gt *gt = guc_to_gt(guc);
u32 header, reply; u32 header, reply;
u32 reply_reg = xe_gt_is_media_type(gt) ? struct xe_reg reply_reg = xe_gt_is_media_type(gt) ?
MED_VF_SW_FLAG(0).reg : VF_SW_FLAG(0).reg; MED_VF_SW_FLAG(0) : VF_SW_FLAG(0);
const u32 LAST_INDEX = VF_SW_FLAG_COUNT; const u32 LAST_INDEX = VF_SW_FLAG_COUNT;
int ret; int ret;
int i; int i;
...@@ -641,14 +641,14 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request, ...@@ -641,14 +641,14 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
/* Not in critical data-path, just do if else for GT type */ /* Not in critical data-path, just do if else for GT type */
if (xe_gt_is_media_type(gt)) { if (xe_gt_is_media_type(gt)) {
for (i = 0; i < len; ++i) for (i = 0; i < len; ++i)
xe_mmio_write32(gt, MED_VF_SW_FLAG(i).reg, xe_mmio_write32(gt, MED_VF_SW_FLAG(i),
request[i]); request[i]);
xe_mmio_read32(gt, MED_VF_SW_FLAG(LAST_INDEX).reg); xe_mmio_read32(gt, MED_VF_SW_FLAG(LAST_INDEX));
} else { } else {
for (i = 0; i < len; ++i) for (i = 0; i < len; ++i)
xe_mmio_write32(gt, VF_SW_FLAG(i).reg, xe_mmio_write32(gt, VF_SW_FLAG(i),
request[i]); request[i]);
xe_mmio_read32(gt, VF_SW_FLAG(LAST_INDEX).reg); xe_mmio_read32(gt, VF_SW_FLAG(LAST_INDEX));
} }
xe_guc_notify(guc); xe_guc_notify(guc);
...@@ -712,9 +712,10 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request, ...@@ -712,9 +712,10 @@ int xe_guc_mmio_send_recv(struct xe_guc *guc, const u32 *request,
if (response_buf) { if (response_buf) {
response_buf[0] = header; response_buf[0] = header;
for (i = 1; i < VF_SW_FLAG_COUNT; i++) for (i = 1; i < VF_SW_FLAG_COUNT; i++) {
response_buf[i] = reply_reg.reg += i * sizeof(u32);
xe_mmio_read32(gt, reply_reg + i * sizeof(u32)); response_buf[i] = xe_mmio_read32(gt, reply_reg);
}
} }
/* Use data from the GuC response as our return value */ /* Use data from the GuC response as our return value */
...@@ -836,7 +837,7 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) ...@@ -836,7 +837,7 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
if (err) if (err)
return; return;
status = xe_mmio_read32(gt, GUC_STATUS.reg); status = xe_mmio_read32(gt, GUC_STATUS);
drm_printf(p, "\nGuC status 0x%08x:\n", status); drm_printf(p, "\nGuC status 0x%08x:\n", status);
drm_printf(p, "\tBootrom status = 0x%x\n", drm_printf(p, "\tBootrom status = 0x%x\n",
...@@ -851,7 +852,7 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) ...@@ -851,7 +852,7 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p)
drm_puts(p, "\nScratch registers:\n"); drm_puts(p, "\nScratch registers:\n");
for (i = 0; i < SOFT_SCRATCH_COUNT; i++) { for (i = 0; i < SOFT_SCRATCH_COUNT; i++) {
drm_printf(p, "\t%2d: \t0x%x\n", drm_printf(p, "\t%2d: \t0x%x\n",
i, xe_mmio_read32(gt, SOFT_SCRATCH(i).reg)); i, xe_mmio_read32(gt, SOFT_SCRATCH(i)));
} }
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
......
...@@ -428,7 +428,6 @@ static void guc_mmio_regset_write_one(struct xe_guc_ads *ads, ...@@ -428,7 +428,6 @@ static void guc_mmio_regset_write_one(struct xe_guc_ads *ads,
struct guc_mmio_reg entry = { struct guc_mmio_reg entry = {
.offset = reg.reg, .offset = reg.reg,
.flags = reg.masked ? GUC_REGSET_MASKED : 0, .flags = reg.masked ? GUC_REGSET_MASKED : 0,
/* TODO: steering */
}; };
xe_map_memcpy_to(ads_to_xe(ads), regset_map, n_entry * sizeof(entry), xe_map_memcpy_to(ads_to_xe(ads), regset_map, n_entry * sizeof(entry),
...@@ -551,7 +550,7 @@ static void guc_doorbell_init(struct xe_guc_ads *ads) ...@@ -551,7 +550,7 @@ static void guc_doorbell_init(struct xe_guc_ads *ads)
if (GRAPHICS_VER(xe) >= 12 && !IS_DGFX(xe)) { if (GRAPHICS_VER(xe) >= 12 && !IS_DGFX(xe)) {
u32 distdbreg = u32 distdbreg =
xe_mmio_read32(gt, DIST_DBS_POPULATED.reg); xe_mmio_read32(gt, DIST_DBS_POPULATED);
ads_blob_write(ads, ads_blob_write(ads,
system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI], system_info.generic_gt_sysinfo[GUC_GENERIC_GT_SYSINFO_DOORBELL_COUNT_PER_SQIDI],
......
...@@ -317,9 +317,9 @@ static void mtl_update_rpe_value(struct xe_guc_pc *pc) ...@@ -317,9 +317,9 @@ static void mtl_update_rpe_value(struct xe_guc_pc *pc)
u32 reg; u32 reg;
if (xe_gt_is_media_type(gt)) if (xe_gt_is_media_type(gt))
reg = xe_mmio_read32(gt, MTL_MPE_FREQUENCY.reg); reg = xe_mmio_read32(gt, MTL_MPE_FREQUENCY);
else else
reg = xe_mmio_read32(gt, MTL_GT_RPE_FREQUENCY.reg); reg = xe_mmio_read32(gt, MTL_GT_RPE_FREQUENCY);
pc->rpe_freq = REG_FIELD_GET(MTL_RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER; pc->rpe_freq = REG_FIELD_GET(MTL_RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
} }
...@@ -336,9 +336,9 @@ static void tgl_update_rpe_value(struct xe_guc_pc *pc) ...@@ -336,9 +336,9 @@ static void tgl_update_rpe_value(struct xe_guc_pc *pc)
* PCODE at a different register * PCODE at a different register
*/ */
if (xe->info.platform == XE_PVC) if (xe->info.platform == XE_PVC)
reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP.reg); reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
else else
reg = xe_mmio_read32(gt, GEN10_FREQ_INFO_REC.reg); reg = xe_mmio_read32(gt, GEN10_FREQ_INFO_REC);
pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER; pc->rpe_freq = REG_FIELD_GET(RPE_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
} }
...@@ -380,10 +380,10 @@ static ssize_t freq_act_show(struct device *dev, ...@@ -380,10 +380,10 @@ static ssize_t freq_act_show(struct device *dev,
goto out; goto out;
if (xe->info.platform == XE_METEORLAKE) { if (xe->info.platform == XE_METEORLAKE) {
freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1.reg); freq = xe_mmio_read32(gt, MTL_MIRROR_TARGET_WP1);
freq = REG_FIELD_GET(MTL_CAGF_MASK, freq); freq = REG_FIELD_GET(MTL_CAGF_MASK, freq);
} else { } else {
freq = xe_mmio_read32(gt, GEN12_RPSTAT1.reg); freq = xe_mmio_read32(gt, GEN12_RPSTAT1);
freq = REG_FIELD_GET(GEN12_CAGF_MASK, freq); freq = REG_FIELD_GET(GEN12_CAGF_MASK, freq);
} }
...@@ -413,7 +413,7 @@ static ssize_t freq_cur_show(struct device *dev, ...@@ -413,7 +413,7 @@ static ssize_t freq_cur_show(struct device *dev,
if (ret) if (ret)
goto out; goto out;
freq = xe_mmio_read32(gt, RPNSWREQ.reg); freq = xe_mmio_read32(gt, RPNSWREQ);
freq = REG_FIELD_GET(REQ_RATIO_MASK, freq); freq = REG_FIELD_GET(REQ_RATIO_MASK, freq);
ret = sysfs_emit(buf, "%d\n", decode_freq(freq)); ret = sysfs_emit(buf, "%d\n", decode_freq(freq));
...@@ -588,7 +588,7 @@ static ssize_t rc_status_show(struct device *dev, ...@@ -588,7 +588,7 @@ static ssize_t rc_status_show(struct device *dev,
u32 reg; u32 reg;
xe_device_mem_access_get(gt_to_xe(gt)); xe_device_mem_access_get(gt_to_xe(gt));
reg = xe_mmio_read32(gt, GT_CORE_STATUS.reg); reg = xe_mmio_read32(gt, GT_CORE_STATUS);
xe_device_mem_access_put(gt_to_xe(gt)); xe_device_mem_access_put(gt_to_xe(gt));
switch (REG_FIELD_GET(RCN_MASK, reg)) { switch (REG_FIELD_GET(RCN_MASK, reg)) {
...@@ -615,7 +615,7 @@ static ssize_t rc6_residency_show(struct device *dev, ...@@ -615,7 +615,7 @@ static ssize_t rc6_residency_show(struct device *dev,
if (ret) if (ret)
goto out; goto out;
reg = xe_mmio_read32(gt, GT_GFX_RC6.reg); reg = xe_mmio_read32(gt, GT_GFX_RC6);
ret = sysfs_emit(buff, "%u\n", reg); ret = sysfs_emit(buff, "%u\n", reg);
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
...@@ -646,9 +646,9 @@ static void mtl_init_fused_rp_values(struct xe_guc_pc *pc) ...@@ -646,9 +646,9 @@ static void mtl_init_fused_rp_values(struct xe_guc_pc *pc)
xe_device_assert_mem_access(pc_to_xe(pc)); xe_device_assert_mem_access(pc_to_xe(pc));
if (xe_gt_is_media_type(gt)) if (xe_gt_is_media_type(gt))
reg = xe_mmio_read32(gt, MTL_MEDIAP_STATE_CAP.reg); reg = xe_mmio_read32(gt, MTL_MEDIAP_STATE_CAP);
else else
reg = xe_mmio_read32(gt, MTL_RP_STATE_CAP.reg); reg = xe_mmio_read32(gt, MTL_RP_STATE_CAP);
pc->rp0_freq = REG_FIELD_GET(MTL_RP0_CAP_MASK, reg) * pc->rp0_freq = REG_FIELD_GET(MTL_RP0_CAP_MASK, reg) *
GT_FREQUENCY_MULTIPLIER; GT_FREQUENCY_MULTIPLIER;
pc->rpn_freq = REG_FIELD_GET(MTL_RPN_CAP_MASK, reg) * pc->rpn_freq = REG_FIELD_GET(MTL_RPN_CAP_MASK, reg) *
...@@ -664,9 +664,9 @@ static void tgl_init_fused_rp_values(struct xe_guc_pc *pc) ...@@ -664,9 +664,9 @@ static void tgl_init_fused_rp_values(struct xe_guc_pc *pc)
xe_device_assert_mem_access(pc_to_xe(pc)); xe_device_assert_mem_access(pc_to_xe(pc));
if (xe->info.platform == XE_PVC) if (xe->info.platform == XE_PVC)
reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP.reg); reg = xe_mmio_read32(gt, PVC_RP_STATE_CAP);
else else
reg = xe_mmio_read32(gt, GEN6_RP_STATE_CAP.reg); reg = xe_mmio_read32(gt, GEN6_RP_STATE_CAP);
pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER; pc->rp0_freq = REG_FIELD_GET(RP0_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER; pc->rpn_freq = REG_FIELD_GET(RPN_MASK, reg) * GT_FREQUENCY_MULTIPLIER;
} }
...@@ -745,9 +745,9 @@ static int pc_gucrc_disable(struct xe_guc_pc *pc) ...@@ -745,9 +745,9 @@ static int pc_gucrc_disable(struct xe_guc_pc *pc)
if (ret) if (ret)
return ret; return ret;
xe_mmio_write32(gt, PG_ENABLE.reg, 0); xe_mmio_write32(gt, PG_ENABLE, 0);
xe_mmio_write32(gt, RC_CONTROL.reg, 0); xe_mmio_write32(gt, RC_CONTROL, 0);
xe_mmio_write32(gt, RC_STATE.reg, 0); xe_mmio_write32(gt, RC_STATE, 0);
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
return 0; return 0;
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/idr.h> #include <linux/idr.h>
#include <linux/xarray.h> #include <linux/xarray.h>
#include "regs/xe_reg_defs.h"
#include "xe_guc_ads_types.h" #include "xe_guc_ads_types.h"
#include "xe_guc_ct_types.h" #include "xe_guc_ct_types.h"
#include "xe_guc_fwif.h" #include "xe_guc_fwif.h"
...@@ -74,7 +75,7 @@ struct xe_guc { ...@@ -74,7 +75,7 @@ struct xe_guc {
/** /**
* @notify_reg: Register which is written to notify GuC of H2G messages * @notify_reg: Register which is written to notify GuC of H2G messages
*/ */
u32 notify_reg; struct xe_reg notify_reg;
/** @params: Control params for fw initialization */ /** @params: Control params for fw initialization */
u32 params[GUC_CTL_MAX_DWORDS]; u32 params[GUC_CTL_MAX_DWORDS];
}; };
......
...@@ -84,7 +84,7 @@ int xe_huc_auth(struct xe_huc *huc) ...@@ -84,7 +84,7 @@ int xe_huc_auth(struct xe_huc *huc)
goto fail; goto fail;
} }
ret = xe_mmio_wait32(gt, HUC_KERNEL_LOAD_INFO.reg, ret = xe_mmio_wait32(gt, HUC_KERNEL_LOAD_INFO,
HUC_LOAD_SUCCESSFUL, HUC_LOAD_SUCCESSFUL,
HUC_LOAD_SUCCESSFUL, 100000, NULL, false); HUC_LOAD_SUCCESSFUL, 100000, NULL, false);
if (ret) { if (ret) {
...@@ -126,7 +126,7 @@ void xe_huc_print_info(struct xe_huc *huc, struct drm_printer *p) ...@@ -126,7 +126,7 @@ void xe_huc_print_info(struct xe_huc *huc, struct drm_printer *p)
return; return;
drm_printf(p, "\nHuC status: 0x%08x\n", drm_printf(p, "\nHuC status: 0x%08x\n",
xe_mmio_read32(gt, HUC_KERNEL_LOAD_INFO.reg)); xe_mmio_read32(gt, HUC_KERNEL_LOAD_INFO));
xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
} }
...@@ -233,20 +233,25 @@ static void hw_engine_fini(struct drm_device *drm, void *arg) ...@@ -233,20 +233,25 @@ static void hw_engine_fini(struct drm_device *drm, void *arg)
hwe->gt = NULL; hwe->gt = NULL;
} }
static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, u32 reg, u32 val) static void hw_engine_mmio_write32(struct xe_hw_engine *hwe, struct xe_reg reg,
u32 val)
{ {
XE_BUG_ON(reg & hwe->mmio_base); XE_BUG_ON(reg.reg & hwe->mmio_base);
xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain); xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
xe_mmio_write32(hwe->gt, reg + hwe->mmio_base, val); reg.reg += hwe->mmio_base;
xe_mmio_write32(hwe->gt, reg, val);
} }
static u32 hw_engine_mmio_read32(struct xe_hw_engine *hwe, u32 reg) static u32 hw_engine_mmio_read32(struct xe_hw_engine *hwe, struct xe_reg reg)
{ {
XE_BUG_ON(reg & hwe->mmio_base); XE_BUG_ON(reg.reg & hwe->mmio_base);
xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain); xe_force_wake_assert_held(gt_to_fw(hwe->gt), hwe->domain);
return xe_mmio_read32(hwe->gt, reg + hwe->mmio_base); reg.reg += hwe->mmio_base;
return xe_mmio_read32(hwe->gt, reg);
} }
void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe) void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
...@@ -255,17 +260,17 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe) ...@@ -255,17 +260,17 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE); xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask) if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
xe_mmio_write32(hwe->gt, RCU_MODE.reg, xe_mmio_write32(hwe->gt, RCU_MODE,
_MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE)); _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
hw_engine_mmio_write32(hwe, RING_HWSTAM(0).reg, ~0x0); hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
hw_engine_mmio_write32(hwe, RING_HWS_PGA(0).reg, hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
xe_bo_ggtt_addr(hwe->hwsp)); xe_bo_ggtt_addr(hwe->hwsp));
hw_engine_mmio_write32(hwe, RING_MODE(0).reg, hw_engine_mmio_write32(hwe, RING_MODE(0),
_MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE)); _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE));
hw_engine_mmio_write32(hwe, RING_MI_MODE(0).reg, hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
_MASKED_BIT_DISABLE(STOP_RING)); _MASKED_BIT_DISABLE(STOP_RING));
hw_engine_mmio_read32(hwe, RING_MI_MODE(0).reg); hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
} }
void void
...@@ -443,7 +448,7 @@ static void read_media_fuses(struct xe_gt *gt) ...@@ -443,7 +448,7 @@ static void read_media_fuses(struct xe_gt *gt)
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
media_fuse = xe_mmio_read32(gt, GT_VEBOX_VDBOX_DISABLE.reg); media_fuse = xe_mmio_read32(gt, GT_VEBOX_VDBOX_DISABLE);
/* /*
* Pre-Xe_HP platforms had register bits representing absent engines, * Pre-Xe_HP platforms had register bits representing absent engines,
...@@ -485,7 +490,7 @@ static void read_copy_fuses(struct xe_gt *gt) ...@@ -485,7 +490,7 @@ static void read_copy_fuses(struct xe_gt *gt)
xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT); xe_force_wake_assert_held(gt_to_fw(gt), XE_FW_GT);
bcs_mask = xe_mmio_read32(gt, MIRROR_FUSE3.reg); bcs_mask = xe_mmio_read32(gt, MIRROR_FUSE3);
bcs_mask = REG_FIELD_GET(MEML3_EN_MASK, bcs_mask); bcs_mask = REG_FIELD_GET(MEML3_EN_MASK, bcs_mask);
/* BCS0 is always present; only BCS1-BCS8 may be fused off */ /* BCS0 is always present; only BCS1-BCS8 may be fused off */
...@@ -582,63 +587,63 @@ void xe_hw_engine_print_state(struct xe_hw_engine *hwe, struct drm_printer *p) ...@@ -582,63 +587,63 @@ void xe_hw_engine_print_state(struct xe_hw_engine *hwe, struct drm_printer *p)
drm_printf(p, "\tMMIO base: 0x%08x\n", hwe->mmio_base); drm_printf(p, "\tMMIO base: 0x%08x\n", hwe->mmio_base);
drm_printf(p, "\tHWSTAM: 0x%08x\n", drm_printf(p, "\tHWSTAM: 0x%08x\n",
hw_engine_mmio_read32(hwe, RING_HWSTAM(0).reg)); hw_engine_mmio_read32(hwe, RING_HWSTAM(0)));
drm_printf(p, "\tRING_HWS_PGA: 0x%08x\n", drm_printf(p, "\tRING_HWS_PGA: 0x%08x\n",
hw_engine_mmio_read32(hwe, RING_HWS_PGA(0).reg)); hw_engine_mmio_read32(hwe, RING_HWS_PGA(0)));
drm_printf(p, "\tRING_EXECLIST_STATUS_LO: 0x%08x\n", drm_printf(p, "\tRING_EXECLIST_STATUS_LO: 0x%08x\n",
hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0).reg)); hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_LO(0)));
drm_printf(p, "\tRING_EXECLIST_STATUS_HI: 0x%08x\n", drm_printf(p, "\tRING_EXECLIST_STATUS_HI: 0x%08x\n",
hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0).reg)); hw_engine_mmio_read32(hwe, RING_EXECLIST_STATUS_HI(0)));
drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS_LO: 0x%08x\n", drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS_LO: 0x%08x\n",
hw_engine_mmio_read32(hwe, hw_engine_mmio_read32(hwe,
RING_EXECLIST_SQ_CONTENTS_LO(0).reg)); RING_EXECLIST_SQ_CONTENTS_LO(0)));
drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS_HI: 0x%08x\n", drm_printf(p, "\tRING_EXECLIST_SQ_CONTENTS_HI: 0x%08x\n",
hw_engine_mmio_read32(hwe, hw_engine_mmio_read32(hwe,
RING_EXECLIST_SQ_CONTENTS_HI(0).reg)); RING_EXECLIST_SQ_CONTENTS_HI(0)));
drm_printf(p, "\tRING_EXECLIST_CONTROL: 0x%08x\n", drm_printf(p, "\tRING_EXECLIST_CONTROL: 0x%08x\n",
hw_engine_mmio_read32(hwe, RING_EXECLIST_CONTROL(0).reg)); hw_engine_mmio_read32(hwe, RING_EXECLIST_CONTROL(0)));
drm_printf(p, "\tRING_START: 0x%08x\n", drm_printf(p, "\tRING_START: 0x%08x\n",
hw_engine_mmio_read32(hwe, RING_START(0).reg)); hw_engine_mmio_read32(hwe, RING_START(0)));
drm_printf(p, "\tRING_HEAD: 0x%08x\n", drm_printf(p, "\tRING_HEAD: 0x%08x\n",
hw_engine_mmio_read32(hwe, RING_HEAD(0).reg) & HEAD_ADDR); hw_engine_mmio_read32(hwe, RING_HEAD(0)) & HEAD_ADDR);
drm_printf(p, "\tRING_TAIL: 0x%08x\n", drm_printf(p, "\tRING_TAIL: 0x%08x\n",
hw_engine_mmio_read32(hwe, RING_TAIL(0).reg) & TAIL_ADDR); hw_engine_mmio_read32(hwe, RING_TAIL(0)) & TAIL_ADDR);
drm_printf(p, "\tRING_CTL: 0x%08x\n", drm_printf(p, "\tRING_CTL: 0x%08x\n",
hw_engine_mmio_read32(hwe, RING_CTL(0).reg)); hw_engine_mmio_read32(hwe, RING_CTL(0)));
drm_printf(p, "\tRING_MODE: 0x%08x\n", drm_printf(p, "\tRING_MODE: 0x%08x\n",
hw_engine_mmio_read32(hwe, RING_MI_MODE(0).reg)); hw_engine_mmio_read32(hwe, RING_MI_MODE(0)));
drm_printf(p, "\tRING_MODE_GEN7: 0x%08x\n", drm_printf(p, "\tRING_MODE_GEN7: 0x%08x\n",
hw_engine_mmio_read32(hwe, RING_MODE(0).reg)); hw_engine_mmio_read32(hwe, RING_MODE(0)));
drm_printf(p, "\tRING_IMR: 0x%08x\n", drm_printf(p, "\tRING_IMR: 0x%08x\n",
hw_engine_mmio_read32(hwe, RING_IMR(0).reg)); hw_engine_mmio_read32(hwe, RING_IMR(0)));
drm_printf(p, "\tRING_ESR: 0x%08x\n", drm_printf(p, "\tRING_ESR: 0x%08x\n",
hw_engine_mmio_read32(hwe, RING_ESR(0).reg)); hw_engine_mmio_read32(hwe, RING_ESR(0)));
drm_printf(p, "\tRING_EMR: 0x%08x\n", drm_printf(p, "\tRING_EMR: 0x%08x\n",
hw_engine_mmio_read32(hwe, RING_EMR(0).reg)); hw_engine_mmio_read32(hwe, RING_EMR(0)));
drm_printf(p, "\tRING_EIR: 0x%08x\n", drm_printf(p, "\tRING_EIR: 0x%08x\n",
hw_engine_mmio_read32(hwe, RING_EIR(0).reg)); hw_engine_mmio_read32(hwe, RING_EIR(0)));
drm_printf(p, "\tACTHD: 0x%08x_%08x\n", drm_printf(p, "\tACTHD: 0x%08x_%08x\n",
hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0).reg), hw_engine_mmio_read32(hwe, RING_ACTHD_UDW(0)),
hw_engine_mmio_read32(hwe, RING_ACTHD(0).reg)); hw_engine_mmio_read32(hwe, RING_ACTHD(0)));
drm_printf(p, "\tBBADDR: 0x%08x_%08x\n", drm_printf(p, "\tBBADDR: 0x%08x_%08x\n",
hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0).reg), hw_engine_mmio_read32(hwe, RING_BBADDR_UDW(0)),
hw_engine_mmio_read32(hwe, RING_BBADDR(0).reg)); hw_engine_mmio_read32(hwe, RING_BBADDR(0)));
drm_printf(p, "\tDMA_FADDR: 0x%08x_%08x\n", drm_printf(p, "\tDMA_FADDR: 0x%08x_%08x\n",
hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0).reg), hw_engine_mmio_read32(hwe, RING_DMA_FADD_UDW(0)),
hw_engine_mmio_read32(hwe, RING_DMA_FADD(0).reg)); hw_engine_mmio_read32(hwe, RING_DMA_FADD(0)));
drm_printf(p, "\tIPEIR: 0x%08x\n", drm_printf(p, "\tIPEIR: 0x%08x\n",
hw_engine_mmio_read32(hwe, IPEIR(0).reg)); hw_engine_mmio_read32(hwe, IPEIR(0)));
drm_printf(p, "\tIPEHR: 0x%08x\n\n", drm_printf(p, "\tIPEHR: 0x%08x\n\n",
hw_engine_mmio_read32(hwe, IPEHR(0).reg)); hw_engine_mmio_read32(hwe, IPEHR(0)));
if (hwe->class == XE_ENGINE_CLASS_COMPUTE) if (hwe->class == XE_ENGINE_CLASS_COMPUTE)
drm_printf(p, "\tRCU_MODE: 0x%08x\n", drm_printf(p, "\tRCU_MODE: 0x%08x\n",
xe_mmio_read32(hwe->gt, RCU_MODE.reg)); xe_mmio_read32(hwe->gt, RCU_MODE));
} }
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
static void assert_iir_is_zero(struct xe_gt *gt, struct xe_reg reg) static void assert_iir_is_zero(struct xe_gt *gt, struct xe_reg reg)
{ {
u32 val = xe_mmio_read32(gt, reg.reg); u32 val = xe_mmio_read32(gt, reg);
if (val == 0) if (val == 0)
return; return;
...@@ -36,10 +36,10 @@ static void assert_iir_is_zero(struct xe_gt *gt, struct xe_reg reg) ...@@ -36,10 +36,10 @@ static void assert_iir_is_zero(struct xe_gt *gt, struct xe_reg reg)
drm_WARN(&gt_to_xe(gt)->drm, 1, drm_WARN(&gt_to_xe(gt)->drm, 1,
"Interrupt register 0x%x is not zero: 0x%08x\n", "Interrupt register 0x%x is not zero: 0x%08x\n",
reg.reg, val); reg.reg, val);
xe_mmio_write32(gt, reg.reg, 0xffffffff); xe_mmio_write32(gt, reg, 0xffffffff);
xe_mmio_read32(gt, reg.reg); xe_mmio_read32(gt, reg);
xe_mmio_write32(gt, reg.reg, 0xffffffff); xe_mmio_write32(gt, reg, 0xffffffff);
xe_mmio_read32(gt, reg.reg); xe_mmio_read32(gt, reg);
} }
/* /*
...@@ -54,32 +54,32 @@ static void unmask_and_enable(struct xe_gt *gt, u32 irqregs, u32 bits) ...@@ -54,32 +54,32 @@ static void unmask_and_enable(struct xe_gt *gt, u32 irqregs, u32 bits)
*/ */
assert_iir_is_zero(gt, IIR(irqregs)); assert_iir_is_zero(gt, IIR(irqregs));
xe_mmio_write32(gt, IER(irqregs).reg, bits); xe_mmio_write32(gt, IER(irqregs), bits);
xe_mmio_write32(gt, IMR(irqregs).reg, ~bits); xe_mmio_write32(gt, IMR(irqregs), ~bits);
/* Posting read */ /* Posting read */
xe_mmio_read32(gt, IMR(irqregs).reg); xe_mmio_read32(gt, IMR(irqregs));
} }
/* Mask and disable all interrupts. */ /* Mask and disable all interrupts. */
static void mask_and_disable(struct xe_gt *gt, u32 irqregs) static void mask_and_disable(struct xe_gt *gt, u32 irqregs)
{ {
xe_mmio_write32(gt, IMR(irqregs).reg, ~0); xe_mmio_write32(gt, IMR(irqregs), ~0);
/* Posting read */ /* Posting read */
xe_mmio_read32(gt, IMR(irqregs).reg); xe_mmio_read32(gt, IMR(irqregs));
xe_mmio_write32(gt, IER(irqregs).reg, 0); xe_mmio_write32(gt, IER(irqregs), 0);
/* IIR can theoretically queue up two events. Be paranoid. */ /* IIR can theoretically queue up two events. Be paranoid. */
xe_mmio_write32(gt, IIR(irqregs).reg, ~0); xe_mmio_write32(gt, IIR(irqregs), ~0);
xe_mmio_read32(gt, IIR(irqregs).reg); xe_mmio_read32(gt, IIR(irqregs));
xe_mmio_write32(gt, IIR(irqregs).reg, ~0); xe_mmio_write32(gt, IIR(irqregs), ~0);
xe_mmio_read32(gt, IIR(irqregs).reg); xe_mmio_read32(gt, IIR(irqregs));
} }
static u32 xelp_intr_disable(struct xe_gt *gt) static u32 xelp_intr_disable(struct xe_gt *gt)
{ {
xe_mmio_write32(gt, GFX_MSTR_IRQ.reg, 0); xe_mmio_write32(gt, GFX_MSTR_IRQ, 0);
/* /*
* Now with master disabled, get a sample of level indications * Now with master disabled, get a sample of level indications
...@@ -87,7 +87,7 @@ static u32 xelp_intr_disable(struct xe_gt *gt) ...@@ -87,7 +87,7 @@ static u32 xelp_intr_disable(struct xe_gt *gt)
* New indications can and will light up during processing, * New indications can and will light up during processing,
* and will generate new interrupt after enabling master. * and will generate new interrupt after enabling master.
*/ */
return xe_mmio_read32(gt, GFX_MSTR_IRQ.reg); return xe_mmio_read32(gt, GFX_MSTR_IRQ);
} }
static u32 static u32
...@@ -98,18 +98,18 @@ gu_misc_irq_ack(struct xe_gt *gt, const u32 master_ctl) ...@@ -98,18 +98,18 @@ gu_misc_irq_ack(struct xe_gt *gt, const u32 master_ctl)
if (!(master_ctl & GU_MISC_IRQ)) if (!(master_ctl & GU_MISC_IRQ))
return 0; return 0;
iir = xe_mmio_read32(gt, IIR(GU_MISC_IRQ_OFFSET).reg); iir = xe_mmio_read32(gt, IIR(GU_MISC_IRQ_OFFSET));
if (likely(iir)) if (likely(iir))
xe_mmio_write32(gt, IIR(GU_MISC_IRQ_OFFSET).reg, iir); xe_mmio_write32(gt, IIR(GU_MISC_IRQ_OFFSET), iir);
return iir; return iir;
} }
static inline void xelp_intr_enable(struct xe_gt *gt, bool stall) static inline void xelp_intr_enable(struct xe_gt *gt, bool stall)
{ {
xe_mmio_write32(gt, GFX_MSTR_IRQ.reg, MASTER_IRQ); xe_mmio_write32(gt, GFX_MSTR_IRQ, MASTER_IRQ);
if (stall) if (stall)
xe_mmio_read32(gt, GFX_MSTR_IRQ.reg); xe_mmio_read32(gt, GFX_MSTR_IRQ);
} }
static void gt_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) static void gt_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
...@@ -132,41 +132,41 @@ static void gt_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) ...@@ -132,41 +132,41 @@ static void gt_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
smask = irqs << 16; smask = irqs << 16;
/* Enable RCS, BCS, VCS and VECS class interrupts. */ /* Enable RCS, BCS, VCS and VECS class interrupts. */
xe_mmio_write32(gt, RENDER_COPY_INTR_ENABLE.reg, dmask); xe_mmio_write32(gt, RENDER_COPY_INTR_ENABLE, dmask);
xe_mmio_write32(gt, VCS_VECS_INTR_ENABLE.reg, dmask); xe_mmio_write32(gt, VCS_VECS_INTR_ENABLE, dmask);
if (ccs_mask) if (ccs_mask)
xe_mmio_write32(gt, CCS_RSVD_INTR_ENABLE.reg, smask); xe_mmio_write32(gt, CCS_RSVD_INTR_ENABLE, smask);
/* Unmask irqs on RCS, BCS, VCS and VECS engines. */ /* Unmask irqs on RCS, BCS, VCS and VECS engines. */
xe_mmio_write32(gt, RCS0_RSVD_INTR_MASK.reg, ~smask); xe_mmio_write32(gt, RCS0_RSVD_INTR_MASK, ~smask);
xe_mmio_write32(gt, BCS_RSVD_INTR_MASK.reg, ~smask); xe_mmio_write32(gt, BCS_RSVD_INTR_MASK, ~smask);
if (bcs_mask & (BIT(1)|BIT(2))) if (bcs_mask & (BIT(1)|BIT(2)))
xe_mmio_write32(gt, XEHPC_BCS1_BCS2_INTR_MASK.reg, ~dmask); xe_mmio_write32(gt, XEHPC_BCS1_BCS2_INTR_MASK, ~dmask);
if (bcs_mask & (BIT(3)|BIT(4))) if (bcs_mask & (BIT(3)|BIT(4)))
xe_mmio_write32(gt, XEHPC_BCS3_BCS4_INTR_MASK.reg, ~dmask); xe_mmio_write32(gt, XEHPC_BCS3_BCS4_INTR_MASK, ~dmask);
if (bcs_mask & (BIT(5)|BIT(6))) if (bcs_mask & (BIT(5)|BIT(6)))
xe_mmio_write32(gt, XEHPC_BCS5_BCS6_INTR_MASK.reg, ~dmask); xe_mmio_write32(gt, XEHPC_BCS5_BCS6_INTR_MASK, ~dmask);
if (bcs_mask & (BIT(7)|BIT(8))) if (bcs_mask & (BIT(7)|BIT(8)))
xe_mmio_write32(gt, XEHPC_BCS7_BCS8_INTR_MASK.reg, ~dmask); xe_mmio_write32(gt, XEHPC_BCS7_BCS8_INTR_MASK, ~dmask);
xe_mmio_write32(gt, VCS0_VCS1_INTR_MASK.reg, ~dmask); xe_mmio_write32(gt, VCS0_VCS1_INTR_MASK, ~dmask);
xe_mmio_write32(gt, VCS2_VCS3_INTR_MASK.reg, ~dmask); xe_mmio_write32(gt, VCS2_VCS3_INTR_MASK, ~dmask);
xe_mmio_write32(gt, VECS0_VECS1_INTR_MASK.reg, ~dmask); xe_mmio_write32(gt, VECS0_VECS1_INTR_MASK, ~dmask);
if (ccs_mask & (BIT(0)|BIT(1))) if (ccs_mask & (BIT(0)|BIT(1)))
xe_mmio_write32(gt, CCS0_CCS1_INTR_MASK.reg, ~dmask); xe_mmio_write32(gt, CCS0_CCS1_INTR_MASK, ~dmask);
if (ccs_mask & (BIT(2)|BIT(3))) if (ccs_mask & (BIT(2)|BIT(3)))
xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK.reg, ~dmask); xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK, ~dmask);
/* /*
* RPS interrupts will get enabled/disabled on demand when RPS itself * RPS interrupts will get enabled/disabled on demand when RPS itself
* is enabled/disabled. * is enabled/disabled.
*/ */
/* TODO: gt->pm_ier, gt->pm_imr */ /* TODO: gt->pm_ier, gt->pm_imr */
xe_mmio_write32(gt, GPM_WGBOXPERF_INTR_ENABLE.reg, 0); xe_mmio_write32(gt, GPM_WGBOXPERF_INTR_ENABLE, 0);
xe_mmio_write32(gt, GPM_WGBOXPERF_INTR_MASK.reg, ~0); xe_mmio_write32(gt, GPM_WGBOXPERF_INTR_MASK, ~0);
/* Same thing for GuC interrupts */ /* Same thing for GuC interrupts */
xe_mmio_write32(gt, GUC_SG_INTR_ENABLE.reg, 0); xe_mmio_write32(gt, GUC_SG_INTR_ENABLE, 0);
xe_mmio_write32(gt, GUC_SG_INTR_MASK.reg, ~0); xe_mmio_write32(gt, GUC_SG_INTR_MASK, ~0);
} }
static void xelp_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) static void xelp_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
...@@ -191,7 +191,7 @@ gt_engine_identity(struct xe_device *xe, ...@@ -191,7 +191,7 @@ gt_engine_identity(struct xe_device *xe,
lockdep_assert_held(&xe->irq.lock); lockdep_assert_held(&xe->irq.lock);
xe_mmio_write32(gt, IIR_REG_SELECTOR(bank).reg, BIT(bit)); xe_mmio_write32(gt, IIR_REG_SELECTOR(bank), BIT(bit));
/* /*
* NB: Specs do not specify how long to spin wait, * NB: Specs do not specify how long to spin wait,
...@@ -199,7 +199,7 @@ gt_engine_identity(struct xe_device *xe, ...@@ -199,7 +199,7 @@ gt_engine_identity(struct xe_device *xe,
*/ */
timeout_ts = (local_clock() >> 10) + 100; timeout_ts = (local_clock() >> 10) + 100;
do { do {
ident = xe_mmio_read32(gt, INTR_IDENTITY_REG(bank).reg); ident = xe_mmio_read32(gt, INTR_IDENTITY_REG(bank));
} while (!(ident & INTR_DATA_VALID) && } while (!(ident & INTR_DATA_VALID) &&
!time_after32(local_clock() >> 10, timeout_ts)); !time_after32(local_clock() >> 10, timeout_ts));
...@@ -209,7 +209,7 @@ gt_engine_identity(struct xe_device *xe, ...@@ -209,7 +209,7 @@ gt_engine_identity(struct xe_device *xe,
return 0; return 0;
} }
xe_mmio_write32(gt, INTR_IDENTITY_REG(bank).reg, INTR_DATA_VALID); xe_mmio_write32(gt, INTR_IDENTITY_REG(bank), INTR_DATA_VALID);
return ident; return ident;
} }
...@@ -248,11 +248,11 @@ static void gt_irq_handler(struct xe_device *xe, struct xe_gt *gt, ...@@ -248,11 +248,11 @@ static void gt_irq_handler(struct xe_device *xe, struct xe_gt *gt,
if (!xe_gt_is_media_type(gt)) { if (!xe_gt_is_media_type(gt)) {
intr_dw[bank] = intr_dw[bank] =
xe_mmio_read32(gt, GT_INTR_DW(bank).reg); xe_mmio_read32(gt, GT_INTR_DW(bank));
for_each_set_bit(bit, intr_dw + bank, 32) for_each_set_bit(bit, intr_dw + bank, 32)
identity[bit] = gt_engine_identity(xe, gt, identity[bit] = gt_engine_identity(xe, gt,
bank, bit); bank, bit);
xe_mmio_write32(gt, GT_INTR_DW(bank).reg, xe_mmio_write32(gt, GT_INTR_DW(bank),
intr_dw[bank]); intr_dw[bank]);
} }
...@@ -310,14 +310,14 @@ static u32 dg1_intr_disable(struct xe_device *xe) ...@@ -310,14 +310,14 @@ static u32 dg1_intr_disable(struct xe_device *xe)
u32 val; u32 val;
/* First disable interrupts */ /* First disable interrupts */
xe_mmio_write32(gt, DG1_MSTR_TILE_INTR.reg, 0); xe_mmio_write32(gt, DG1_MSTR_TILE_INTR, 0);
/* Get the indication levels and ack the master unit */ /* Get the indication levels and ack the master unit */
val = xe_mmio_read32(gt, DG1_MSTR_TILE_INTR.reg); val = xe_mmio_read32(gt, DG1_MSTR_TILE_INTR);
if (unlikely(!val)) if (unlikely(!val))
return 0; return 0;
xe_mmio_write32(gt, DG1_MSTR_TILE_INTR.reg, val); xe_mmio_write32(gt, DG1_MSTR_TILE_INTR, val);
return val; return val;
} }
...@@ -326,9 +326,9 @@ static void dg1_intr_enable(struct xe_device *xe, bool stall) ...@@ -326,9 +326,9 @@ static void dg1_intr_enable(struct xe_device *xe, bool stall)
{ {
struct xe_gt *gt = xe_device_get_gt(xe, 0); struct xe_gt *gt = xe_device_get_gt(xe, 0);
xe_mmio_write32(gt, DG1_MSTR_TILE_INTR.reg, DG1_MSTR_IRQ); xe_mmio_write32(gt, DG1_MSTR_TILE_INTR, DG1_MSTR_IRQ);
if (stall) if (stall)
xe_mmio_read32(gt, DG1_MSTR_TILE_INTR.reg); xe_mmio_read32(gt, DG1_MSTR_TILE_INTR);
} }
static void dg1_irq_postinstall(struct xe_device *xe, struct xe_gt *gt) static void dg1_irq_postinstall(struct xe_device *xe, struct xe_gt *gt)
...@@ -368,7 +368,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) ...@@ -368,7 +368,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
continue; continue;
if (!xe_gt_is_media_type(gt)) if (!xe_gt_is_media_type(gt))
master_ctl = xe_mmio_read32(gt, GFX_MSTR_IRQ.reg); master_ctl = xe_mmio_read32(gt, GFX_MSTR_IRQ);
/* /*
* We might be in irq handler just when PCIe DPC is initiated * We might be in irq handler just when PCIe DPC is initiated
...@@ -382,7 +382,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg) ...@@ -382,7 +382,7 @@ static irqreturn_t dg1_irq_handler(int irq, void *arg)
} }
if (!xe_gt_is_media_type(gt)) if (!xe_gt_is_media_type(gt))
xe_mmio_write32(gt, GFX_MSTR_IRQ.reg, master_ctl); xe_mmio_write32(gt, GFX_MSTR_IRQ, master_ctl);
gt_irq_handler(xe, gt, master_ctl, intr_dw, identity); gt_irq_handler(xe, gt, master_ctl, intr_dw, identity);
/* /*
...@@ -407,34 +407,34 @@ static void gt_irq_reset(struct xe_gt *gt) ...@@ -407,34 +407,34 @@ static void gt_irq_reset(struct xe_gt *gt)
u32 bcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY); u32 bcs_mask = xe_hw_engine_mask_per_class(gt, XE_ENGINE_CLASS_COPY);
/* Disable RCS, BCS, VCS and VECS class engines. */ /* Disable RCS, BCS, VCS and VECS class engines. */
xe_mmio_write32(gt, RENDER_COPY_INTR_ENABLE.reg, 0); xe_mmio_write32(gt, RENDER_COPY_INTR_ENABLE, 0);
xe_mmio_write32(gt, VCS_VECS_INTR_ENABLE.reg, 0); xe_mmio_write32(gt, VCS_VECS_INTR_ENABLE, 0);
if (ccs_mask) if (ccs_mask)
xe_mmio_write32(gt, CCS_RSVD_INTR_ENABLE.reg, 0); xe_mmio_write32(gt, CCS_RSVD_INTR_ENABLE, 0);
/* Restore masks irqs on RCS, BCS, VCS and VECS engines. */ /* Restore masks irqs on RCS, BCS, VCS and VECS engines. */
xe_mmio_write32(gt, RCS0_RSVD_INTR_MASK.reg, ~0); xe_mmio_write32(gt, RCS0_RSVD_INTR_MASK, ~0);
xe_mmio_write32(gt, BCS_RSVD_INTR_MASK.reg, ~0); xe_mmio_write32(gt, BCS_RSVD_INTR_MASK, ~0);
if (bcs_mask & (BIT(1)|BIT(2))) if (bcs_mask & (BIT(1)|BIT(2)))
xe_mmio_write32(gt, XEHPC_BCS1_BCS2_INTR_MASK.reg, ~0); xe_mmio_write32(gt, XEHPC_BCS1_BCS2_INTR_MASK, ~0);
if (bcs_mask & (BIT(3)|BIT(4))) if (bcs_mask & (BIT(3)|BIT(4)))
xe_mmio_write32(gt, XEHPC_BCS3_BCS4_INTR_MASK.reg, ~0); xe_mmio_write32(gt, XEHPC_BCS3_BCS4_INTR_MASK, ~0);
if (bcs_mask & (BIT(5)|BIT(6))) if (bcs_mask & (BIT(5)|BIT(6)))
xe_mmio_write32(gt, XEHPC_BCS5_BCS6_INTR_MASK.reg, ~0); xe_mmio_write32(gt, XEHPC_BCS5_BCS6_INTR_MASK, ~0);
if (bcs_mask & (BIT(7)|BIT(8))) if (bcs_mask & (BIT(7)|BIT(8)))
xe_mmio_write32(gt, XEHPC_BCS7_BCS8_INTR_MASK.reg, ~0); xe_mmio_write32(gt, XEHPC_BCS7_BCS8_INTR_MASK, ~0);
xe_mmio_write32(gt, VCS0_VCS1_INTR_MASK.reg, ~0); xe_mmio_write32(gt, VCS0_VCS1_INTR_MASK, ~0);
xe_mmio_write32(gt, VCS2_VCS3_INTR_MASK.reg, ~0); xe_mmio_write32(gt, VCS2_VCS3_INTR_MASK, ~0);
xe_mmio_write32(gt, VECS0_VECS1_INTR_MASK.reg, ~0); xe_mmio_write32(gt, VECS0_VECS1_INTR_MASK, ~0);
if (ccs_mask & (BIT(0)|BIT(1))) if (ccs_mask & (BIT(0)|BIT(1)))
xe_mmio_write32(gt, CCS0_CCS1_INTR_MASK.reg, ~0); xe_mmio_write32(gt, CCS0_CCS1_INTR_MASK, ~0);
if (ccs_mask & (BIT(2)|BIT(3))) if (ccs_mask & (BIT(2)|BIT(3)))
xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK.reg, ~0); xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK, ~0);
xe_mmio_write32(gt, GPM_WGBOXPERF_INTR_ENABLE.reg, 0); xe_mmio_write32(gt, GPM_WGBOXPERF_INTR_ENABLE, 0);
xe_mmio_write32(gt, GPM_WGBOXPERF_INTR_MASK.reg, ~0); xe_mmio_write32(gt, GPM_WGBOXPERF_INTR_MASK, ~0);
xe_mmio_write32(gt, GUC_SG_INTR_ENABLE.reg, 0); xe_mmio_write32(gt, GUC_SG_INTR_ENABLE, 0);
xe_mmio_write32(gt, GUC_SG_INTR_MASK.reg, ~0); xe_mmio_write32(gt, GUC_SG_INTR_MASK, ~0);
} }
static void xelp_irq_reset(struct xe_gt *gt) static void xelp_irq_reset(struct xe_gt *gt)
......
...@@ -153,13 +153,13 @@ int xe_mmio_total_vram_size(struct xe_device *xe, u64 *vram_size, u64 *usable_si ...@@ -153,13 +153,13 @@ int xe_mmio_total_vram_size(struct xe_device *xe, u64 *vram_size, u64 *usable_si
struct xe_gt *gt = xe_device_get_gt(xe, 0); struct xe_gt *gt = xe_device_get_gt(xe, 0);
struct pci_dev *pdev = to_pci_dev(xe->drm.dev); struct pci_dev *pdev = to_pci_dev(xe->drm.dev);
int err; int err;
u32 reg; u32 reg_val;
if (!xe->info.has_flat_ccs) { if (!xe->info.has_flat_ccs) {
*vram_size = pci_resource_len(pdev, GEN12_LMEM_BAR); *vram_size = pci_resource_len(pdev, GEN12_LMEM_BAR);
if (usable_size) if (usable_size)
*usable_size = min(*vram_size, *usable_size = min(*vram_size,
xe_mmio_read64(gt, GSMBASE.reg)); xe_mmio_read64(gt, GSMBASE));
return 0; return 0;
} }
...@@ -167,11 +167,11 @@ int xe_mmio_total_vram_size(struct xe_device *xe, u64 *vram_size, u64 *usable_si ...@@ -167,11 +167,11 @@ int xe_mmio_total_vram_size(struct xe_device *xe, u64 *vram_size, u64 *usable_si
if (err) if (err)
return err; return err;
reg = xe_gt_mcr_unicast_read_any(gt, XEHP_TILE0_ADDR_RANGE); reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_TILE0_ADDR_RANGE);
*vram_size = (u64)REG_FIELD_GET(GENMASK(14, 8), reg) * SZ_1G; *vram_size = (u64)REG_FIELD_GET(GENMASK(14, 8), reg_val) * SZ_1G;
if (usable_size) { if (usable_size) {
reg = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR); reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR);
*usable_size = (u64)REG_FIELD_GET(GENMASK(31, 8), reg) * SZ_64K; *usable_size = (u64)REG_FIELD_GET(GENMASK(31, 8), reg_val) * SZ_64K;
drm_info(&xe->drm, "vram_size: 0x%llx usable_size: 0x%llx\n", drm_info(&xe->drm, "vram_size: 0x%llx usable_size: 0x%llx\n",
*vram_size, *usable_size); *vram_size, *usable_size);
} }
...@@ -298,7 +298,7 @@ static void xe_mmio_probe_tiles(struct xe_device *xe) ...@@ -298,7 +298,7 @@ static void xe_mmio_probe_tiles(struct xe_device *xe)
if (xe->info.tile_count == 1) if (xe->info.tile_count == 1)
return; return;
mtcfg = xe_mmio_read64(gt, XEHP_MTCFG_ADDR.reg); mtcfg = xe_mmio_read64(gt, XEHP_MTCFG_ADDR);
adj_tile_count = xe->info.tile_count = adj_tile_count = xe->info.tile_count =
REG_FIELD_GET(TILE_COUNT, mtcfg) + 1; REG_FIELD_GET(TILE_COUNT, mtcfg) + 1;
if (xe->info.media_verx100 >= 1300) if (xe->info.media_verx100 >= 1300)
...@@ -374,7 +374,7 @@ int xe_mmio_init(struct xe_device *xe) ...@@ -374,7 +374,7 @@ int xe_mmio_init(struct xe_device *xe)
* keep the GT powered down; we won't be able to communicate with it * keep the GT powered down; we won't be able to communicate with it
* and we should not continue with driver initialization. * and we should not continue with driver initialization.
*/ */
if (IS_DGFX(xe) && !(xe_mmio_read32(gt, GU_CNTL.reg) & LMEM_INIT)) { if (IS_DGFX(xe) && !(xe_mmio_read32(gt, GU_CNTL) & LMEM_INIT)) {
drm_err(&xe->drm, "VRAM not initialized by firmware\n"); drm_err(&xe->drm, "VRAM not initialized by firmware\n");
return -ENODEV; return -ENODEV;
} }
...@@ -403,6 +403,7 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data, ...@@ -403,6 +403,7 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data,
struct xe_device *xe = to_xe_device(dev); struct xe_device *xe = to_xe_device(dev);
struct drm_xe_mmio *args = data; struct drm_xe_mmio *args = data;
unsigned int bits_flag, bytes; unsigned int bits_flag, bytes;
struct xe_reg reg;
bool allowed; bool allowed;
int ret = 0; int ret = 0;
...@@ -435,6 +436,12 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data, ...@@ -435,6 +436,12 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data,
if (XE_IOCTL_ERR(xe, args->addr + bytes > xe->mmio.size)) if (XE_IOCTL_ERR(xe, args->addr + bytes > xe->mmio.size))
return -EINVAL; return -EINVAL;
/*
* TODO: migrate to xe_gt_mcr to lookup the mmio range and handle
* multicast registers. Steering would need uapi extension.
*/
reg = XE_REG(args->addr);
xe_force_wake_get(gt_to_fw(&xe->gt[0]), XE_FORCEWAKE_ALL); xe_force_wake_get(gt_to_fw(&xe->gt[0]), XE_FORCEWAKE_ALL);
if (args->flags & DRM_XE_MMIO_WRITE) { if (args->flags & DRM_XE_MMIO_WRITE) {
...@@ -444,10 +451,10 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data, ...@@ -444,10 +451,10 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data,
ret = -EINVAL; ret = -EINVAL;
goto exit; goto exit;
} }
xe_mmio_write32(to_gt(xe), args->addr, args->value); xe_mmio_write32(to_gt(xe), reg, args->value);
break; break;
case DRM_XE_MMIO_64BIT: case DRM_XE_MMIO_64BIT:
xe_mmio_write64(to_gt(xe), args->addr, args->value); xe_mmio_write64(to_gt(xe), reg, args->value);
break; break;
default: default:
drm_dbg(&xe->drm, "Invalid MMIO bit size"); drm_dbg(&xe->drm, "Invalid MMIO bit size");
...@@ -462,10 +469,10 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data, ...@@ -462,10 +469,10 @@ int xe_mmio_ioctl(struct drm_device *dev, void *data,
if (args->flags & DRM_XE_MMIO_READ) { if (args->flags & DRM_XE_MMIO_READ) {
switch (bits_flag) { switch (bits_flag) {
case DRM_XE_MMIO_32BIT: case DRM_XE_MMIO_32BIT:
args->value = xe_mmio_read32(to_gt(xe), args->addr); args->value = xe_mmio_read32(to_gt(xe), reg);
break; break;
case DRM_XE_MMIO_64BIT: case DRM_XE_MMIO_64BIT:
args->value = xe_mmio_read64(to_gt(xe), args->addr); args->value = xe_mmio_read64(to_gt(xe), reg);
break; break;
default: default:
drm_dbg(&xe->drm, "Invalid MMIO bit size"); drm_dbg(&xe->drm, "Invalid MMIO bit size");
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/io-64-nonatomic-lo-hi.h>
#include "regs/xe_reg_defs.h"
#include "xe_gt_types.h" #include "xe_gt_types.h"
struct drm_device; struct drm_device;
...@@ -17,33 +18,33 @@ struct xe_device; ...@@ -17,33 +18,33 @@ struct xe_device;
int xe_mmio_init(struct xe_device *xe); int xe_mmio_init(struct xe_device *xe);
static inline u8 xe_mmio_read8(struct xe_gt *gt, u32 reg) static inline u8 xe_mmio_read8(struct xe_gt *gt, struct xe_reg reg)
{ {
if (reg < gt->mmio.adj_limit) if (reg.reg < gt->mmio.adj_limit)
reg += gt->mmio.adj_offset; reg.reg += gt->mmio.adj_offset;
return readb(gt->mmio.regs + reg); return readb(gt->mmio.regs + reg.reg);
} }
static inline void xe_mmio_write32(struct xe_gt *gt, static inline void xe_mmio_write32(struct xe_gt *gt,
u32 reg, u32 val) struct xe_reg reg, u32 val)
{ {
if (reg < gt->mmio.adj_limit) if (reg.reg < gt->mmio.adj_limit)
reg += gt->mmio.adj_offset; reg.reg += gt->mmio.adj_offset;
writel(val, gt->mmio.regs + reg); writel(val, gt->mmio.regs + reg.reg);
} }
static inline u32 xe_mmio_read32(struct xe_gt *gt, u32 reg) static inline u32 xe_mmio_read32(struct xe_gt *gt, struct xe_reg reg)
{ {
if (reg < gt->mmio.adj_limit) if (reg.reg < gt->mmio.adj_limit)
reg += gt->mmio.adj_offset; reg.reg += gt->mmio.adj_offset;
return readl(gt->mmio.regs + reg); return readl(gt->mmio.regs + reg.reg);
} }
static inline u32 xe_mmio_rmw32(struct xe_gt *gt, u32 reg, u32 clr, static inline u32 xe_mmio_rmw32(struct xe_gt *gt, struct xe_reg reg, u32 clr,
u32 set) u32 set)
{ {
u32 old, reg_val; u32 old, reg_val;
...@@ -55,24 +56,24 @@ static inline u32 xe_mmio_rmw32(struct xe_gt *gt, u32 reg, u32 clr, ...@@ -55,24 +56,24 @@ static inline u32 xe_mmio_rmw32(struct xe_gt *gt, u32 reg, u32 clr,
} }
static inline void xe_mmio_write64(struct xe_gt *gt, static inline void xe_mmio_write64(struct xe_gt *gt,
u32 reg, u64 val) struct xe_reg reg, u64 val)
{ {
if (reg < gt->mmio.adj_limit) if (reg.reg < gt->mmio.adj_limit)
reg += gt->mmio.adj_offset; reg.reg += gt->mmio.adj_offset;
writeq(val, gt->mmio.regs + reg); writeq(val, gt->mmio.regs + reg.reg);
} }
static inline u64 xe_mmio_read64(struct xe_gt *gt, u32 reg) static inline u64 xe_mmio_read64(struct xe_gt *gt, struct xe_reg reg)
{ {
if (reg < gt->mmio.adj_limit) if (reg.reg < gt->mmio.adj_limit)
reg += gt->mmio.adj_offset; reg.reg += gt->mmio.adj_offset;
return readq(gt->mmio.regs + reg); return readq(gt->mmio.regs + reg.reg);
} }
static inline int xe_mmio_write32_and_verify(struct xe_gt *gt, static inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
u32 reg, u32 val, struct xe_reg reg, u32 val,
u32 mask, u32 eval) u32 mask, u32 eval)
{ {
u32 reg_val; u32 reg_val;
...@@ -83,8 +84,9 @@ static inline int xe_mmio_write32_and_verify(struct xe_gt *gt, ...@@ -83,8 +84,9 @@ static inline int xe_mmio_write32_and_verify(struct xe_gt *gt,
return (reg_val & mask) != eval ? -EINVAL : 0; return (reg_val & mask) != eval ? -EINVAL : 0;
} }
static inline int xe_mmio_wait32(struct xe_gt *gt, u32 reg, u32 val, u32 mask, static inline int xe_mmio_wait32(struct xe_gt *gt, struct xe_reg reg, u32 val,
u32 timeout_us, u32 *out_val, bool atomic) u32 mask, u32 timeout_us, u32 *out_val,
bool atomic)
{ {
ktime_t cur = ktime_get_raw(); ktime_t cur = ktime_get_raw();
const ktime_t end = ktime_add_us(cur, timeout_us); const ktime_t end = ktime_add_us(cur, timeout_us);
...@@ -122,9 +124,10 @@ static inline int xe_mmio_wait32(struct xe_gt *gt, u32 reg, u32 val, u32 mask, ...@@ -122,9 +124,10 @@ static inline int xe_mmio_wait32(struct xe_gt *gt, u32 reg, u32 val, u32 mask,
int xe_mmio_ioctl(struct drm_device *dev, void *data, int xe_mmio_ioctl(struct drm_device *dev, void *data,
struct drm_file *file); struct drm_file *file);
static inline bool xe_mmio_in_range(const struct xe_mmio_range *range, u32 reg) static inline bool xe_mmio_in_range(const struct xe_mmio_range *range,
struct xe_reg reg)
{ {
return range && reg >= range->start && reg <= range->end; return range && reg.reg >= range->start && reg.reg <= range->end;
} }
int xe_mmio_probe_vram(struct xe_device *xe); int xe_mmio_probe_vram(struct xe_device *xe);
......
...@@ -477,8 +477,10 @@ static void __init_mocs_table(struct xe_gt *gt, ...@@ -477,8 +477,10 @@ static void __init_mocs_table(struct xe_gt *gt,
for (i = 0; for (i = 0;
i < info->n_entries ? (mocs = get_entry_control(info, i)), 1 : 0; i < info->n_entries ? (mocs = get_entry_control(info, i)), 1 : 0;
i++) { i++) {
mocs_dbg(&gt->xe->drm, "%d 0x%x 0x%x\n", i, XE_REG(addr + i * 4).reg, mocs); struct xe_reg reg = XE_REG(addr + i * 4);
xe_mmio_write32(gt, XE_REG(addr + i * 4).reg, mocs);
mocs_dbg(&gt->xe->drm, "%d 0x%x 0x%x\n", i, reg.reg, mocs);
xe_mmio_write32(gt, reg, mocs);
} }
} }
...@@ -514,7 +516,7 @@ static void init_l3cc_table(struct xe_gt *gt, ...@@ -514,7 +516,7 @@ static void init_l3cc_table(struct xe_gt *gt,
i++) { i++) {
mocs_dbg(&gt->xe->drm, "%d 0x%x 0x%x\n", i, LNCFCMOCS(i).reg, mocs_dbg(&gt->xe->drm, "%d 0x%x 0x%x\n", i, LNCFCMOCS(i).reg,
l3cc); l3cc);
xe_mmio_write32(gt, LNCFCMOCS(i).reg, l3cc); xe_mmio_write32(gt, LNCFCMOCS(i), l3cc);
} }
} }
......
...@@ -64,14 +64,20 @@ static const u32 mtl_pat_table[] = { ...@@ -64,14 +64,20 @@ static const u32 mtl_pat_table[] = {
static void program_pat(struct xe_gt *gt, const u32 table[], int n_entries) static void program_pat(struct xe_gt *gt, const u32 table[], int n_entries)
{ {
for (int i = 0; i < n_entries; i++) for (int i = 0; i < n_entries; i++) {
xe_mmio_write32(gt, _PAT_INDEX(i), table[i]); struct xe_reg reg = XE_REG(_PAT_INDEX(i));
xe_mmio_write32(gt, reg, table[i]);
}
} }
static void program_pat_mcr(struct xe_gt *gt, const u32 table[], int n_entries) static void program_pat_mcr(struct xe_gt *gt, const u32 table[], int n_entries)
{ {
for (int i = 0; i < n_entries; i++) for (int i = 0; i < n_entries; i++) {
xe_gt_mcr_multicast_write(gt, XE_REG_MCR(_PAT_INDEX(i)), table[i]); struct xe_reg_mcr reg_mcr = XE_REG_MCR(_PAT_INDEX(i));
xe_gt_mcr_multicast_write(gt, reg_mcr, table[i]);
}
} }
void xe_pat_init(struct xe_gt *gt) void xe_pat_init(struct xe_gt *gt)
......
...@@ -43,7 +43,7 @@ static int pcode_mailbox_status(struct xe_gt *gt) ...@@ -43,7 +43,7 @@ static int pcode_mailbox_status(struct xe_gt *gt)
lockdep_assert_held(&gt->pcode.lock); lockdep_assert_held(&gt->pcode.lock);
err = xe_mmio_read32(gt, PCODE_MAILBOX.reg) & PCODE_ERROR_MASK; err = xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_ERROR_MASK;
if (err) { if (err) {
drm_err(&gt_to_xe(gt)->drm, "PCODE Mailbox failed: %d %s", err, drm_err(&gt_to_xe(gt)->drm, "PCODE Mailbox failed: %d %s", err,
err_decode[err].str ?: "Unknown"); err_decode[err].str ?: "Unknown");
...@@ -60,22 +60,22 @@ static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1, ...@@ -60,22 +60,22 @@ static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
int err; int err;
lockdep_assert_held(&gt->pcode.lock); lockdep_assert_held(&gt->pcode.lock);
if ((xe_mmio_read32(gt, PCODE_MAILBOX.reg) & PCODE_READY) != 0) if ((xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_READY) != 0)
return -EAGAIN; return -EAGAIN;
xe_mmio_write32(gt, PCODE_DATA0.reg, *data0); xe_mmio_write32(gt, PCODE_DATA0, *data0);
xe_mmio_write32(gt, PCODE_DATA1.reg, data1 ? *data1 : 0); xe_mmio_write32(gt, PCODE_DATA1, data1 ? *data1 : 0);
xe_mmio_write32(gt, PCODE_MAILBOX.reg, PCODE_READY | mbox); xe_mmio_write32(gt, PCODE_MAILBOX, PCODE_READY | mbox);
err = xe_mmio_wait32(gt, PCODE_MAILBOX.reg, 0, PCODE_READY, err = xe_mmio_wait32(gt, PCODE_MAILBOX, 0, PCODE_READY,
timeout_ms * 1000, NULL, atomic); timeout_ms * 1000, NULL, atomic);
if (err) if (err)
return err; return err;
if (return_data) { if (return_data) {
*data0 = xe_mmio_read32(gt, PCODE_DATA0.reg); *data0 = xe_mmio_read32(gt, PCODE_DATA0);
if (data1) if (data1)
*data1 = xe_mmio_read32(gt, PCODE_DATA1.reg); *data1 = xe_mmio_read32(gt, PCODE_DATA1);
} }
return pcode_mailbox_status(gt); return pcode_mailbox_status(gt);
......
...@@ -161,7 +161,7 @@ static void apply_one_mmio(struct xe_gt *gt, struct xe_reg_sr_entry *entry) ...@@ -161,7 +161,7 @@ static void apply_one_mmio(struct xe_gt *gt, struct xe_reg_sr_entry *entry)
else if (entry->clr_bits + 1) else if (entry->clr_bits + 1)
val = (reg.mcr ? val = (reg.mcr ?
xe_gt_mcr_unicast_read_any(gt, reg_mcr) : xe_gt_mcr_unicast_read_any(gt, reg_mcr) :
xe_mmio_read32(gt, reg.reg)) & (~entry->clr_bits); xe_mmio_read32(gt, reg)) & (~entry->clr_bits);
else else
val = 0; val = 0;
...@@ -177,7 +177,7 @@ static void apply_one_mmio(struct xe_gt *gt, struct xe_reg_sr_entry *entry) ...@@ -177,7 +177,7 @@ static void apply_one_mmio(struct xe_gt *gt, struct xe_reg_sr_entry *entry)
if (entry->reg.mcr) if (entry->reg.mcr)
xe_gt_mcr_multicast_write(gt, reg_mcr, val); xe_gt_mcr_multicast_write(gt, reg_mcr, val);
else else
xe_mmio_write32(gt, reg.reg, val); xe_mmio_write32(gt, reg, val);
} }
void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt) void xe_reg_sr_apply_mmio(struct xe_reg_sr *sr, struct xe_gt *gt)
...@@ -230,15 +230,17 @@ void xe_reg_sr_apply_whitelist(struct xe_reg_sr *sr, u32 mmio_base, ...@@ -230,15 +230,17 @@ void xe_reg_sr_apply_whitelist(struct xe_reg_sr *sr, u32 mmio_base,
p = drm_debug_printer(KBUILD_MODNAME); p = drm_debug_printer(KBUILD_MODNAME);
xa_for_each(&sr->xa, reg, entry) { xa_for_each(&sr->xa, reg, entry) {
xe_reg_whitelist_print_entry(&p, 0, reg, entry); xe_reg_whitelist_print_entry(&p, 0, reg, entry);
xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(mmio_base, slot).reg, xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(mmio_base, slot),
reg | entry->set_bits); reg | entry->set_bits);
slot++; slot++;
} }
/* And clear the rest just in case of garbage */ /* And clear the rest just in case of garbage */
for (; slot < RING_MAX_NONPRIV_SLOTS; slot++) for (; slot < RING_MAX_NONPRIV_SLOTS; slot++) {
xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(mmio_base, slot).reg, u32 addr = RING_NOPID(mmio_base).reg;
RING_NOPID(mmio_base).reg);
xe_mmio_write32(gt, RING_FORCE_TO_NONPRIV(mmio_base, slot), addr);
}
err = xe_force_wake_put(&gt->mmio.fw, XE_FORCEWAKE_ALL); err = xe_force_wake_put(&gt->mmio.fw, XE_FORCEWAKE_ALL);
XE_WARN_ON(err); XE_WARN_ON(err);
......
...@@ -44,10 +44,11 @@ static u32 preparser_disable(bool state) ...@@ -44,10 +44,11 @@ static u32 preparser_disable(bool state)
return MI_ARB_CHECK | BIT(8) | state; return MI_ARB_CHECK | BIT(8) | state;
} }
static int emit_aux_table_inv(struct xe_gt *gt, u32 addr, u32 *dw, int i) static int emit_aux_table_inv(struct xe_gt *gt, struct xe_reg reg,
u32 *dw, int i)
{ {
dw[i++] = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN; dw[i++] = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
dw[i++] = addr + gt->mmio.adj_offset; dw[i++] = reg.reg + gt->mmio.adj_offset;
dw[i++] = AUX_INV; dw[i++] = AUX_INV;
dw[i++] = MI_NOOP; dw[i++] = MI_NOOP;
...@@ -203,9 +204,9 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc, ...@@ -203,9 +204,9 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
/* hsdes: 1809175790 */ /* hsdes: 1809175790 */
if (!xe->info.has_flat_ccs) { if (!xe->info.has_flat_ccs) {
if (decode) if (decode)
i = emit_aux_table_inv(gt, VD0_AUX_INV.reg, dw, i); i = emit_aux_table_inv(gt, VD0_AUX_INV, dw, i);
else else
i = emit_aux_table_inv(gt, VE0_AUX_INV.reg, dw, i); i = emit_aux_table_inv(gt, VE0_AUX_INV, dw, i);
} }
dw[i++] = preparser_disable(false); dw[i++] = preparser_disable(false);
...@@ -248,7 +249,7 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job, ...@@ -248,7 +249,7 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
/* hsdes: 1809175790 */ /* hsdes: 1809175790 */
if (!xe->info.has_flat_ccs) if (!xe->info.has_flat_ccs)
i = emit_aux_table_inv(gt, CCS_AUX_INV.reg, dw, i); i = emit_aux_table_inv(gt, CCS_AUX_INV, dw, i);
dw[i++] = preparser_disable(false); dw[i++] = preparser_disable(false);
......
...@@ -65,7 +65,7 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr) ...@@ -65,7 +65,7 @@ static s64 detect_bar2_dgfx(struct xe_device *xe, struct xe_ttm_stolen_mgr *mgr)
} }
/* Use DSM base address instead for stolen memory */ /* Use DSM base address instead for stolen memory */
mgr->stolen_base = xe_mmio_read64(gt, DSMBASE.reg) & BDSM_MASK; mgr->stolen_base = xe_mmio_read64(gt, DSMBASE) & BDSM_MASK;
if (drm_WARN_ON(&xe->drm, vram_size < mgr->stolen_base)) if (drm_WARN_ON(&xe->drm, vram_size < mgr->stolen_base))
return 0; return 0;
...@@ -88,7 +88,7 @@ static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr ...@@ -88,7 +88,7 @@ static u32 detect_bar2_integrated(struct xe_device *xe, struct xe_ttm_stolen_mgr
u32 stolen_size; u32 stolen_size;
u32 ggc, gms; u32 ggc, gms;
ggc = xe_mmio_read32(to_gt(xe), GGC.reg); ggc = xe_mmio_read32(to_gt(xe), GGC);
/* check GGMS, should be fixed 0x3 (8MB) */ /* check GGMS, should be fixed 0x3 (8MB) */
if (drm_WARN_ON(&xe->drm, (ggc & GGMS_MASK) != GGMS_MASK)) if (drm_WARN_ON(&xe->drm, (ggc & GGMS_MASK) != GGMS_MASK))
......
...@@ -462,33 +462,33 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags) ...@@ -462,33 +462,33 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags)
/* Set the source address for the uCode */ /* Set the source address for the uCode */
src_offset = uc_fw_ggtt_offset(uc_fw); src_offset = uc_fw_ggtt_offset(uc_fw);
xe_mmio_write32(gt, DMA_ADDR_0_LOW.reg, lower_32_bits(src_offset)); xe_mmio_write32(gt, DMA_ADDR_0_LOW, lower_32_bits(src_offset));
xe_mmio_write32(gt, DMA_ADDR_0_HIGH.reg, upper_32_bits(src_offset)); xe_mmio_write32(gt, DMA_ADDR_0_HIGH, upper_32_bits(src_offset));
/* Set the DMA destination */ /* Set the DMA destination */
xe_mmio_write32(gt, DMA_ADDR_1_LOW.reg, offset); xe_mmio_write32(gt, DMA_ADDR_1_LOW, offset);
xe_mmio_write32(gt, DMA_ADDR_1_HIGH.reg, DMA_ADDRESS_SPACE_WOPCM); xe_mmio_write32(gt, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
/* /*
* Set the transfer size. The header plus uCode will be copied to WOPCM * Set the transfer size. The header plus uCode will be copied to WOPCM
* via DMA, excluding any other components * via DMA, excluding any other components
*/ */
xe_mmio_write32(gt, DMA_COPY_SIZE.reg, xe_mmio_write32(gt, DMA_COPY_SIZE,
sizeof(struct uc_css_header) + uc_fw->ucode_size); sizeof(struct uc_css_header) + uc_fw->ucode_size);
/* Start the DMA */ /* Start the DMA */
xe_mmio_write32(gt, DMA_CTRL.reg, xe_mmio_write32(gt, DMA_CTRL,
_MASKED_BIT_ENABLE(dma_flags | START_DMA)); _MASKED_BIT_ENABLE(dma_flags | START_DMA));
/* Wait for DMA to finish */ /* Wait for DMA to finish */
ret = xe_mmio_wait32(gt, DMA_CTRL.reg, 0, START_DMA, 100000, &dma_ctrl, ret = xe_mmio_wait32(gt, DMA_CTRL, 0, START_DMA, 100000, &dma_ctrl,
false); false);
if (ret) if (ret)
drm_err(&xe->drm, "DMA for %s fw failed, DMA_CTRL=%u\n", drm_err(&xe->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
xe_uc_fw_type_repr(uc_fw->type), dma_ctrl); xe_uc_fw_type_repr(uc_fw->type), dma_ctrl);
/* Disable the bits once DMA is over */ /* Disable the bits once DMA is over */
xe_mmio_write32(gt, DMA_CTRL.reg, _MASKED_BIT_DISABLE(dma_flags)); xe_mmio_write32(gt, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
return ret; return ret;
} }
......
...@@ -124,8 +124,8 @@ static bool __check_layout(struct xe_device *xe, u32 wopcm_size, ...@@ -124,8 +124,8 @@ static bool __check_layout(struct xe_device *xe, u32 wopcm_size,
static bool __wopcm_regs_locked(struct xe_gt *gt, static bool __wopcm_regs_locked(struct xe_gt *gt,
u32 *guc_wopcm_base, u32 *guc_wopcm_size) u32 *guc_wopcm_base, u32 *guc_wopcm_size)
{ {
u32 reg_base = xe_mmio_read32(gt, DMA_GUC_WOPCM_OFFSET.reg); u32 reg_base = xe_mmio_read32(gt, DMA_GUC_WOPCM_OFFSET);
u32 reg_size = xe_mmio_read32(gt, GUC_WOPCM_SIZE.reg); u32 reg_size = xe_mmio_read32(gt, GUC_WOPCM_SIZE);
if (!(reg_size & GUC_WOPCM_SIZE_LOCKED) || if (!(reg_size & GUC_WOPCM_SIZE_LOCKED) ||
!(reg_base & GUC_WOPCM_OFFSET_VALID)) !(reg_base & GUC_WOPCM_OFFSET_VALID))
...@@ -152,13 +152,13 @@ static int __wopcm_init_regs(struct xe_device *xe, struct xe_gt *gt, ...@@ -152,13 +152,13 @@ static int __wopcm_init_regs(struct xe_device *xe, struct xe_gt *gt,
XE_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK); XE_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK);
mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED; mask = GUC_WOPCM_SIZE_MASK | GUC_WOPCM_SIZE_LOCKED;
err = xe_mmio_write32_and_verify(gt, GUC_WOPCM_SIZE.reg, size, mask, err = xe_mmio_write32_and_verify(gt, GUC_WOPCM_SIZE, size, mask,
size | GUC_WOPCM_SIZE_LOCKED); size | GUC_WOPCM_SIZE_LOCKED);
if (err) if (err)
goto err_out; goto err_out;
mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent; mask = GUC_WOPCM_OFFSET_MASK | GUC_WOPCM_OFFSET_VALID | huc_agent;
err = xe_mmio_write32_and_verify(gt, DMA_GUC_WOPCM_OFFSET.reg, err = xe_mmio_write32_and_verify(gt, DMA_GUC_WOPCM_OFFSET,
base | huc_agent, mask, base | huc_agent, mask,
base | huc_agent | base | huc_agent |
GUC_WOPCM_OFFSET_VALID); GUC_WOPCM_OFFSET_VALID);
...@@ -171,10 +171,10 @@ static int __wopcm_init_regs(struct xe_device *xe, struct xe_gt *gt, ...@@ -171,10 +171,10 @@ static int __wopcm_init_regs(struct xe_device *xe, struct xe_gt *gt,
drm_notice(&xe->drm, "Failed to init uC WOPCM registers!\n"); drm_notice(&xe->drm, "Failed to init uC WOPCM registers!\n");
drm_notice(&xe->drm, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET", drm_notice(&xe->drm, "%s(%#x)=%#x\n", "DMA_GUC_WOPCM_OFFSET",
DMA_GUC_WOPCM_OFFSET.reg, DMA_GUC_WOPCM_OFFSET.reg,
xe_mmio_read32(gt, DMA_GUC_WOPCM_OFFSET.reg)); xe_mmio_read32(gt, DMA_GUC_WOPCM_OFFSET));
drm_notice(&xe->drm, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE", drm_notice(&xe->drm, "%s(%#x)=%#x\n", "GUC_WOPCM_SIZE",
GUC_WOPCM_SIZE.reg, GUC_WOPCM_SIZE.reg,
xe_mmio_read32(gt, GUC_WOPCM_SIZE.reg)); xe_mmio_read32(gt, GUC_WOPCM_SIZE));
return err; return err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment