Commit dab3aff7 authored by Ville Syrjälä's avatar Ville Syrjälä

drm/i915: Remove cnl pre-prod workarounds

Remove all the stepping dependent cnl workarounds. Bspec lists
more steppings than this so presumably these are classed as
pre-production. And this is cnl after all so no one should
really care anyway.
Signed-off-by: default avatarVille Syrjälä <ville.syrjala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200430125822.21985-2-ville.syrjala@linux.intel.comReviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent 25444ca6
...@@ -113,7 +113,6 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6) ...@@ -113,7 +113,6 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
struct intel_uncore *uncore = rc6_to_uncore(rc6); struct intel_uncore *uncore = rc6_to_uncore(rc6);
struct intel_engine_cs *engine; struct intel_engine_cs *engine;
enum intel_engine_id id; enum intel_engine_id id;
u32 rc6_mode;
/* 2b: Program RC6 thresholds.*/ /* 2b: Program RC6 thresholds.*/
if (INTEL_GEN(rc6_to_i915(rc6)) >= 10) { if (INTEL_GEN(rc6_to_i915(rc6)) >= 10) {
...@@ -165,16 +164,11 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6) ...@@ -165,16 +164,11 @@ static void gen9_rc6_enable(struct intel_rc6 *rc6)
/* 3a: Enable RC6 */ /* 3a: Enable RC6 */
set(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */ set(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */
/* WaRsUseTimeoutMode:cnl (pre-prod) */
if (IS_CNL_REVID(rc6_to_i915(rc6), CNL_REVID_A0, CNL_REVID_C0))
rc6_mode = GEN7_RC_CTL_TO_MODE;
else
rc6_mode = GEN6_RC_CTL_EI_MODE(1);
rc6->ctl_enable = rc6->ctl_enable =
GEN6_RC_CTL_HW_ENABLE | GEN6_RC_CTL_HW_ENABLE |
GEN6_RC_CTL_RC6_ENABLE | GEN6_RC_CTL_RC6_ENABLE |
rc6_mode; GEN6_RC_CTL_EI_MODE(1);
/* /*
* WaRsDisableCoarsePowerGating:skl,cnl * WaRsDisableCoarsePowerGating:skl,cnl
......
...@@ -485,25 +485,14 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine, ...@@ -485,25 +485,14 @@ static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine, static void cnl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal) struct i915_wa_list *wal)
{ {
struct drm_i915_private *i915 = engine->i915;
/* WaForceContextSaveRestoreNonCoherent:cnl */ /* WaForceContextSaveRestoreNonCoherent:cnl */
WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0, WA_SET_BIT_MASKED(CNL_HDC_CHICKEN0,
HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT); HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT);
/* WaThrottleEUPerfToAvoidTDBackPressure:cnl(pre-prod) */
if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, THROTTLE_12_5);
/* WaDisableReplayBufferBankArbitrationOptimization:cnl */ /* WaDisableReplayBufferBankArbitrationOptimization:cnl */
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableEnhancedSBEVertexCaching:cnl (pre-prod) */
if (IS_CNL_REVID(i915, 0, CNL_REVID_B0))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE);
/* WaPushConstantDereferenceHoldDisable:cnl */ /* WaPushConstantDereferenceHoldDisable:cnl */
WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE); WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, PUSH_CONSTANT_DEREF_DISABLE);
...@@ -872,12 +861,6 @@ cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) ...@@ -872,12 +861,6 @@ cnl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
{ {
wa_init_mcr(i915, wal); wa_init_mcr(i915, wal);
/* WaDisableI2mCycleOnWRPort:cnl (pre-prod) */
if (IS_CNL_REVID(i915, CNL_REVID_B0, CNL_REVID_B0))
wa_write_or(wal,
GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_I2M_CYCLE_ON_WR_PORT);
/* WaInPlaceDecompressionHang:cnl */ /* WaInPlaceDecompressionHang:cnl */
wa_write_or(wal, wa_write_or(wal,
GEN9_GAMT_ECO_REG_RW_IA, GEN9_GAMT_ECO_REG_RW_IA,
......
...@@ -5255,10 +5255,6 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state, ...@@ -5255,10 +5255,6 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
trans_offset_b; trans_offset_b;
} else { } else {
res_blocks = wm0_sel_res_b + trans_offset_b; res_blocks = wm0_sel_res_b + trans_offset_b;
/* WA BUG:1938466 add one block for non y-tile planes */
if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
res_blocks += 1;
} }
/* /*
...@@ -6978,9 +6974,6 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv) ...@@ -6978,9 +6974,6 @@ static void cnl_init_clock_gating(struct drm_i915_private *dev_priv)
val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE); val = I915_READ(SLICE_UNIT_LEVEL_CLKGATE);
/* ReadHitWriteOnlyDisable:cnl */ /* ReadHitWriteOnlyDisable:cnl */
val |= RCCUNIT_CLKGATE_DIS; val |= RCCUNIT_CLKGATE_DIS;
/* WaSarbUnitClockGatingDisable:cnl (pre-prod) */
if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_B0))
val |= SARBUNIT_CLKGATE_DIS;
I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val); I915_WRITE(SLICE_UNIT_LEVEL_CLKGATE, val);
/* Wa_2201832410:cnl */ /* Wa_2201832410:cnl */
......
...@@ -149,8 +149,7 @@ static bool check_hw_restrictions(struct drm_i915_private *i915, ...@@ -149,8 +149,7 @@ static bool check_hw_restrictions(struct drm_i915_private *i915,
guc_wopcm_size)) guc_wopcm_size))
return false; return false;
if ((IS_GEN(i915, 9) || if (IS_GEN(i915, 9) &&
IS_CNL_REVID(i915, CNL_REVID_A0, CNL_REVID_A0)) &&
!gen9_check_huc_fw_fits(i915, guc_wopcm_size, huc_fw_size)) !gen9_check_huc_fw_fits(i915, guc_wopcm_size, huc_fw_size))
return false; return false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment