Commit 4319382e authored by Daniele Ceraolo Spurio's avatar Daniele Ceraolo Spurio Committed by Chris Wilson

drm/i915: switch intel_uncore_forcewake_for_reg to intel_uncore

The intel_uncore structure is the owner of FW, so subclass the
function to it.

While at it, use a local uncore var and switch to the new read/write
functions where it makes sense.
Signed-off-by: default avatarDaniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Cc: Paulo Zanoni <paulo.r.zanoni@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190325214940.23632-7-daniele.ceraolospurio@intel.com
parent a2b4abfc
...@@ -327,6 +327,7 @@ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu, ...@@ -327,6 +327,7 @@ int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_uncore *uncore = &dev_priv->uncore;
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
enum forcewake_domains fw; enum forcewake_domains fw;
i915_reg_t reg; i915_reg_t reg;
...@@ -351,21 +352,21 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) ...@@ -351,21 +352,21 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
* otherwise device can go to RC6 state and interrupt invalidation * otherwise device can go to RC6 state and interrupt invalidation
* process * process
*/ */
fw = intel_uncore_forcewake_for_reg(dev_priv, reg, fw = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ | FW_REG_WRITE); FW_REG_READ | FW_REG_WRITE);
if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9) if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9)
fw |= FORCEWAKE_RENDER; fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(&dev_priv->uncore, fw); intel_uncore_forcewake_get(uncore, fw);
I915_WRITE_FW(reg, 0x1); intel_uncore_write_fw(uncore, reg, 0x1);
if (wait_for_atomic((I915_READ_FW(reg) == 0), 50)) if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50))
gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id); gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
else else
vgpu_vreg_t(vgpu, reg) = 0; vgpu_vreg_t(vgpu, reg) = 0;
intel_uncore_forcewake_put(&dev_priv->uncore, fw); intel_uncore_forcewake_put(uncore, fw);
gvt_dbg_core("invalidate TLB for ring %d\n", ring_id); gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
} }
......
...@@ -888,6 +888,7 @@ static inline u32 ...@@ -888,6 +888,7 @@ static inline u32
read_subslice_reg(struct drm_i915_private *dev_priv, int slice, read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
int subslice, i915_reg_t reg) int subslice, i915_reg_t reg)
{ {
struct intel_uncore *uncore = &dev_priv->uncore;
u32 mcr_slice_subslice_mask; u32 mcr_slice_subslice_mask;
u32 mcr_slice_subslice_select; u32 mcr_slice_subslice_select;
u32 default_mcr_s_ss_select; u32 default_mcr_s_ss_select;
...@@ -909,33 +910,33 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice, ...@@ -909,33 +910,33 @@ read_subslice_reg(struct drm_i915_private *dev_priv, int slice,
default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv); default_mcr_s_ss_select = intel_calculate_mcr_s_ss_select(dev_priv);
fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, fw_domains = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ); FW_REG_READ);
fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, fw_domains |= intel_uncore_forcewake_for_reg(uncore,
GEN8_MCR_SELECTOR, GEN8_MCR_SELECTOR,
FW_REG_READ | FW_REG_WRITE); FW_REG_READ | FW_REG_WRITE);
spin_lock_irq(&dev_priv->uncore.lock); spin_lock_irq(&uncore->lock);
intel_uncore_forcewake_get__locked(&dev_priv->uncore, fw_domains); intel_uncore_forcewake_get__locked(uncore, fw_domains);
mcr = I915_READ_FW(GEN8_MCR_SELECTOR); mcr = intel_uncore_read_fw(uncore, GEN8_MCR_SELECTOR);
WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) != WARN_ON_ONCE((mcr & mcr_slice_subslice_mask) !=
default_mcr_s_ss_select); default_mcr_s_ss_select);
mcr &= ~mcr_slice_subslice_mask; mcr &= ~mcr_slice_subslice_mask;
mcr |= mcr_slice_subslice_select; mcr |= mcr_slice_subslice_select;
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr); intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
ret = I915_READ_FW(reg); ret = intel_uncore_read_fw(uncore, reg);
mcr &= ~mcr_slice_subslice_mask; mcr &= ~mcr_slice_subslice_mask;
mcr |= default_mcr_s_ss_select; mcr |= default_mcr_s_ss_select;
I915_WRITE_FW(GEN8_MCR_SELECTOR, mcr); intel_uncore_write_fw(uncore, GEN8_MCR_SELECTOR, mcr);
intel_uncore_forcewake_put__locked(&dev_priv->uncore, fw_domains); intel_uncore_forcewake_put__locked(uncore, fw_domains);
spin_unlock_irq(&dev_priv->uncore.lock); spin_unlock_irq(&uncore->lock);
return ret; return ret;
} }
......
...@@ -54,7 +54,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc) ...@@ -54,7 +54,7 @@ void intel_guc_init_send_regs(struct intel_guc *guc)
BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT); BUILD_BUG_ON(GUC_MAX_MMIO_MSG_LEN > SOFT_SCRATCH_COUNT);
for (i = 0; i < guc->send_regs.count; i++) { for (i = 0; i < guc->send_regs.count; i++) {
fw_domains |= intel_uncore_forcewake_for_reg(dev_priv, fw_domains |= intel_uncore_forcewake_for_reg(&dev_priv->uncore,
guc_send_reg(guc, i), guc_send_reg(guc, i),
FW_REG_READ | FW_REG_WRITE); FW_REG_READ | FW_REG_WRITE);
} }
......
...@@ -9959,6 +9959,7 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv, ...@@ -9959,6 +9959,7 @@ static u64 vlv_residency_raw(struct drm_i915_private *dev_priv,
u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
const i915_reg_t reg) const i915_reg_t reg)
{ {
struct intel_uncore *uncore = &dev_priv->uncore;
u64 time_hw, prev_hw, overflow_hw; u64 time_hw, prev_hw, overflow_hw;
unsigned int fw_domains; unsigned int fw_domains;
unsigned long flags; unsigned long flags;
...@@ -9980,10 +9981,10 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, ...@@ -9980,10 +9981,10 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency))) if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency)))
return 0; return 0;
fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
spin_lock_irqsave(&dev_priv->uncore.lock, flags); spin_lock_irqsave(&uncore->lock, flags);
intel_uncore_forcewake_get__locked(&dev_priv->uncore, fw_domains); intel_uncore_forcewake_get__locked(uncore, fw_domains);
/* On VLV and CHV, residency time is in CZ units rather than 1.28us */ /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
...@@ -10002,7 +10003,7 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, ...@@ -10002,7 +10003,7 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
} }
overflow_hw = BIT_ULL(32); overflow_hw = BIT_ULL(32);
time_hw = I915_READ_FW(reg); time_hw = intel_uncore_read_fw(uncore, reg);
} }
/* /*
...@@ -10024,8 +10025,8 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, ...@@ -10024,8 +10025,8 @@ u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv,
time_hw += dev_priv->gt_pm.rc6.cur_residency[i]; time_hw += dev_priv->gt_pm.rc6.cur_residency[i];
dev_priv->gt_pm.rc6.cur_residency[i] = time_hw; dev_priv->gt_pm.rc6.cur_residency[i] = time_hw;
intel_uncore_forcewake_put__locked(&dev_priv->uncore, fw_domains); intel_uncore_forcewake_put__locked(uncore, fw_domains);
spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); spin_unlock_irqrestore(&uncore->lock, flags);
return mul_u64_u32_div(time_hw, mul, div); return mul_u64_u32_div(time_hw, mul, div);
} }
......
...@@ -1841,7 +1841,7 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv, ...@@ -1841,7 +1841,7 @@ int __intel_wait_for_register(struct drm_i915_private *dev_priv,
{ {
struct intel_uncore *uncore = &dev_priv->uncore; struct intel_uncore *uncore = &dev_priv->uncore;
unsigned fw = unsigned fw =
intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
u32 reg_value; u32 reg_value;
int ret; int ret;
...@@ -1904,23 +1904,23 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore) ...@@ -1904,23 +1904,23 @@ intel_uncore_arm_unclaimed_mmio_detection(struct intel_uncore *uncore)
} }
static enum forcewake_domains static enum forcewake_domains
intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv, intel_uncore_forcewake_for_read(struct intel_uncore *uncore,
i915_reg_t reg) i915_reg_t reg)
{ {
struct intel_uncore *uncore = &dev_priv->uncore; struct drm_i915_private *i915 = uncore_to_i915(uncore);
u32 offset = i915_mmio_reg_offset(reg); u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains; enum forcewake_domains fw_domains;
if (INTEL_GEN(dev_priv) >= 11) { if (INTEL_GEN(i915) >= 11) {
fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset); fw_domains = __gen11_fwtable_reg_read_fw_domains(uncore, offset);
} else if (HAS_FWTABLE(dev_priv)) { } else if (HAS_FWTABLE(i915)) {
fw_domains = __fwtable_reg_read_fw_domains(uncore, offset); fw_domains = __fwtable_reg_read_fw_domains(uncore, offset);
} else if (INTEL_GEN(dev_priv) >= 6) { } else if (INTEL_GEN(i915) >= 6) {
fw_domains = __gen6_reg_read_fw_domains(uncore, offset); fw_domains = __gen6_reg_read_fw_domains(uncore, offset);
} else { } else {
/* on devices with FW we expect to hit one of the above cases */ /* on devices with FW we expect to hit one of the above cases */
if (intel_uncore_has_forcewake(uncore)) if (intel_uncore_has_forcewake(uncore))
MISSING_CASE(INTEL_GEN(dev_priv)); MISSING_CASE(INTEL_GEN(i915));
fw_domains = 0; fw_domains = 0;
} }
...@@ -1931,25 +1931,25 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv, ...@@ -1931,25 +1931,25 @@ intel_uncore_forcewake_for_read(struct drm_i915_private *dev_priv,
} }
static enum forcewake_domains static enum forcewake_domains
intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, intel_uncore_forcewake_for_write(struct intel_uncore *uncore,
i915_reg_t reg) i915_reg_t reg)
{ {
struct intel_uncore *uncore = &dev_priv->uncore; struct drm_i915_private *i915 = uncore_to_i915(uncore);
u32 offset = i915_mmio_reg_offset(reg); u32 offset = i915_mmio_reg_offset(reg);
enum forcewake_domains fw_domains; enum forcewake_domains fw_domains;
if (INTEL_GEN(dev_priv) >= 11) { if (INTEL_GEN(i915) >= 11) {
fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset); fw_domains = __gen11_fwtable_reg_write_fw_domains(uncore, offset);
} else if (HAS_FWTABLE(dev_priv) && !IS_VALLEYVIEW(dev_priv)) { } else if (HAS_FWTABLE(i915) && !IS_VALLEYVIEW(i915)) {
fw_domains = __fwtable_reg_write_fw_domains(uncore, offset); fw_domains = __fwtable_reg_write_fw_domains(uncore, offset);
} else if (IS_GEN(dev_priv, 8)) { } else if (IS_GEN(i915, 8)) {
fw_domains = __gen8_reg_write_fw_domains(uncore, offset); fw_domains = __gen8_reg_write_fw_domains(uncore, offset);
} else if (IS_GEN_RANGE(dev_priv, 6, 7)) { } else if (IS_GEN_RANGE(i915, 6, 7)) {
fw_domains = FORCEWAKE_RENDER; fw_domains = FORCEWAKE_RENDER;
} else { } else {
/* on devices with FW we expect to hit one of the above cases */ /* on devices with FW we expect to hit one of the above cases */
if (intel_uncore_has_forcewake(uncore)) if (intel_uncore_has_forcewake(uncore))
MISSING_CASE(INTEL_GEN(dev_priv)); MISSING_CASE(INTEL_GEN(i915));
fw_domains = 0; fw_domains = 0;
} }
...@@ -1962,7 +1962,7 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, ...@@ -1962,7 +1962,7 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
/** /**
* intel_uncore_forcewake_for_reg - which forcewake domains are needed to access * intel_uncore_forcewake_for_reg - which forcewake domains are needed to access
* a register * a register
* @dev_priv: pointer to struct drm_i915_private * @uncore: pointer to struct intel_uncore
* @reg: register in question * @reg: register in question
* @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE * @op: operation bitmask of FW_REG_READ and/or FW_REG_WRITE
* *
...@@ -1974,21 +1974,21 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv, ...@@ -1974,21 +1974,21 @@ intel_uncore_forcewake_for_write(struct drm_i915_private *dev_priv,
* callers to do FIFO management on their own or risk losing writes. * callers to do FIFO management on their own or risk losing writes.
*/ */
enum forcewake_domains enum forcewake_domains
intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
i915_reg_t reg, unsigned int op) i915_reg_t reg, unsigned int op)
{ {
enum forcewake_domains fw_domains = 0; enum forcewake_domains fw_domains = 0;
WARN_ON(!op); WARN_ON(!op);
if (!intel_uncore_has_forcewake(&dev_priv->uncore)) if (!intel_uncore_has_forcewake(uncore))
return 0; return 0;
if (op & FW_REG_READ) if (op & FW_REG_READ)
fw_domains = intel_uncore_forcewake_for_read(dev_priv, reg); fw_domains = intel_uncore_forcewake_for_read(uncore, reg);
if (op & FW_REG_WRITE) if (op & FW_REG_WRITE)
fw_domains |= intel_uncore_forcewake_for_write(dev_priv, reg); fw_domains |= intel_uncore_forcewake_for_write(uncore, reg);
return fw_domains; return fw_domains;
} }
......
...@@ -193,7 +193,7 @@ void assert_forcewakes_active(struct intel_uncore *uncore, ...@@ -193,7 +193,7 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id); const char *intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id);
enum forcewake_domains enum forcewake_domains
intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv, intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
i915_reg_t reg, unsigned int op); i915_reg_t reg, unsigned int op);
#define FW_REG_READ (1) #define FW_REG_READ (1)
#define FW_REG_WRITE (2) #define FW_REG_WRITE (2)
......
...@@ -905,7 +905,7 @@ wal_get_fw_for_rmw(struct drm_i915_private *dev_priv, ...@@ -905,7 +905,7 @@ wal_get_fw_for_rmw(struct drm_i915_private *dev_priv,
unsigned int i; unsigned int i;
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
fw |= intel_uncore_forcewake_for_reg(dev_priv, fw |= intel_uncore_forcewake_for_reg(&dev_priv->uncore,
wa->reg, wa->reg,
FW_REG_READ | FW_REG_READ |
FW_REG_WRITE); FW_REG_WRITE);
......
...@@ -184,7 +184,7 @@ static int live_forcewake_ops(void *arg) ...@@ -184,7 +184,7 @@ static int live_forcewake_ops(void *arg)
if (!engine->default_state) if (!engine->default_state)
continue; continue;
fw_domains = intel_uncore_forcewake_for_reg(i915, mmio, fw_domains = intel_uncore_forcewake_for_reg(uncore, mmio,
FW_REG_READ); FW_REG_READ);
if (!fw_domains) if (!fw_domains)
continue; continue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment