Commit cad7d8d9 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'topic/kbl-4.7-fixes-2016-07-18' of...

Merge tag 'topic/kbl-4.7-fixes-2016-07-18' of git://anongit.freedesktop.org/drm-intel into drm-fixes

As promised here's the pile of kbl cherry-picks assembled by Mika&Rodrigo.
It's a bit much, but all well-contained to kbl code and been tested for a
while in drm-intel-next. Still separate in case too much, but in that case
I think we'd need to disable kbl by default again (which would be annoying
too) in 4.7.

* tag 'topic/kbl-4.7-fixes-2016-07-18' of git://anongit.freedesktop.org/drm-intel: (28 commits)
  drm/i915/kbl: Introduce the first official DMC for Kabylake.
  drm/i915: Introduce Kabypoint PCH for Kabylake H/DT.
  drm/i915/gen9: implement WaConextSwitchWithConcurrentTLBInvalidate
  drm/i915/gen9: Add WaFbcHighMemBwCorruptionAvoidance
  drm/i195/fbc: Add WaFbcNukeOnHostModify
  drm/i915/gen9: Add WaFbcWakeMemOn
  drm/i915/gen9: Add WaFbcTurnOffFbcWatermark
  drm/i915/kbl: Add WaClearSlmSpaceAtContextSwitch
  drm/i915/gen9: Add WaEnableChickenDCPR
  drm/i915/kbl: Add WaDisableSbeCacheDispatchPortSharing
  drm/i915/kbl: Add WaDisableGafsUnitClkGating
  drm/i915/kbl: Add WaForGAMHang
  drm/i915: Add WaInsertDummyPushConstP for bxt and kbl
  drm/i915/kbl: Add WaDisableDynamicCreditSharing
  drm/i915/kbl: Add WaDisableGamClockGating
  drm/i915/gen9: Enable must set chicken bits in config0 reg
  drm/i915/kbl: Add WaDisableLSQCROPERFforOCL
  drm/i915/kbl: Add WaDisableSDEUnitClockGating
  drm/i915/kbl: Add WaDisableFenceDestinationToSLM for A0
  drm/i915/kbl: Add WaEnableGapsTsvCreditFix
  ...
parents 84ade45e a4a027a8
...@@ -512,6 +512,10 @@ void intel_detect_pch(struct drm_device *dev) ...@@ -512,6 +512,10 @@ void intel_detect_pch(struct drm_device *dev)
DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n");
WARN_ON(!IS_SKYLAKE(dev) && WARN_ON(!IS_SKYLAKE(dev) &&
!IS_KABYLAKE(dev)); !IS_KABYLAKE(dev));
} else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_KBP;
DRM_DEBUG_KMS("Found KabyPoint PCH\n");
WARN_ON(!IS_KABYLAKE(dev));
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) || } else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) || (id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) && ((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
......
...@@ -990,6 +990,7 @@ enum intel_pch { ...@@ -990,6 +990,7 @@ enum intel_pch {
PCH_CPT, /* Cougarpoint PCH */ PCH_CPT, /* Cougarpoint PCH */
PCH_LPT, /* Lynxpoint PCH */ PCH_LPT, /* Lynxpoint PCH */
PCH_SPT, /* Sunrisepoint PCH */ PCH_SPT, /* Sunrisepoint PCH */
PCH_KBP, /* Kabypoint PCH */
PCH_NOP, PCH_NOP,
}; };
...@@ -2600,6 +2601,15 @@ struct drm_i915_cmd_table { ...@@ -2600,6 +2601,15 @@ struct drm_i915_cmd_table {
#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until)) #define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
#define KBL_REVID_A0 0x0
#define KBL_REVID_B0 0x1
#define KBL_REVID_C0 0x2
#define KBL_REVID_D0 0x3
#define KBL_REVID_E0 0x4
#define IS_KBL_REVID(p, since, until) \
(IS_KABYLAKE(p) && IS_REVID(p, since, until))
/* /*
* The genX designation typically refers to the render engine, so render * The genX designation typically refers to the render engine, so render
* capability related checks should use IS_GEN, while display and other checks * capability related checks should use IS_GEN, while display and other checks
...@@ -2708,11 +2718,13 @@ struct drm_i915_cmd_table { ...@@ -2708,11 +2718,13 @@ struct drm_i915_cmd_table {
#define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00
#define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100
#define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00
#define INTEL_PCH_KBP_DEVICE_ID_TYPE 0xA200
#define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100
#define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000
#define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */
#define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type)
#define HAS_PCH_KBP(dev) (INTEL_PCH_TYPE(dev) == PCH_KBP)
#define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT)
#define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT) #define HAS_PCH_LPT(dev) (INTEL_PCH_TYPE(dev) == PCH_LPT)
#define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) #define HAS_PCH_LPT_LP(dev) (__I915__(dev)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE)
......
...@@ -55,8 +55,10 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, ...@@ -55,8 +55,10 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
return -ENODEV; return -ENODEV;
/* See the comment at the drm_mm_init() call for more about this check. /* See the comment at the drm_mm_init() call for more about this check.
* WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */ * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete)
if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096) */
if (start < 4096 && (IS_GEN8(dev_priv) ||
IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)))
start = 4096; start = 4096;
mutex_lock(&dev_priv->mm.stolen_lock); mutex_lock(&dev_priv->mm.stolen_lock);
......
...@@ -2471,7 +2471,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) ...@@ -2471,7 +2471,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(SDEIIR, iir); I915_WRITE(SDEIIR, iir);
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
if (HAS_PCH_SPT(dev_priv)) if (HAS_PCH_SPT(dev_priv) || HAS_PCH_KBP(dev_priv))
spt_irq_handler(dev, iir); spt_irq_handler(dev, iir);
else else
cpt_irq_handler(dev, iir); cpt_irq_handler(dev, iir);
...@@ -4661,7 +4661,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv) ...@@ -4661,7 +4661,7 @@ void intel_irq_init(struct drm_i915_private *dev_priv)
dev->driver->disable_vblank = gen8_disable_vblank; dev->driver->disable_vblank = gen8_disable_vblank;
if (IS_BROXTON(dev)) if (IS_BROXTON(dev))
dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup; dev_priv->display.hpd_irq_setup = bxt_hpd_irq_setup;
else if (HAS_PCH_SPT(dev)) else if (HAS_PCH_SPT(dev) || HAS_PCH_KBP(dev))
dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup; dev_priv->display.hpd_irq_setup = spt_hpd_irq_setup;
else else
dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup; dev_priv->display.hpd_irq_setup = ilk_hpd_irq_setup;
......
...@@ -220,6 +220,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) ...@@ -220,6 +220,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define ECOCHK_PPGTT_WT_HSW (0x2<<3) #define ECOCHK_PPGTT_WT_HSW (0x2<<3)
#define ECOCHK_PPGTT_WB_HSW (0x3<<3) #define ECOCHK_PPGTT_WB_HSW (0x3<<3)
#define GEN8_CONFIG0 _MMIO(0xD00)
#define GEN9_DEFAULT_FIXES (1 << 3 | 1 << 2 | 1 << 1)
#define GAC_ECO_BITS _MMIO(0x14090) #define GAC_ECO_BITS _MMIO(0x14090)
#define ECOBITS_SNB_BIT (1<<13) #define ECOBITS_SNB_BIT (1<<13)
#define ECOBITS_PPGTT_CACHE64B (3<<8) #define ECOBITS_PPGTT_CACHE64B (3<<8)
...@@ -1669,6 +1672,9 @@ enum skl_disp_power_wells { ...@@ -1669,6 +1672,9 @@ enum skl_disp_power_wells {
#define GEN7_TLB_RD_ADDR _MMIO(0x4700) #define GEN7_TLB_RD_ADDR _MMIO(0x4700)
#define GAMT_CHKN_BIT_REG _MMIO(0x4ab8)
#define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28)
#if 0 #if 0
#define PRB0_TAIL _MMIO(0x2030) #define PRB0_TAIL _MMIO(0x2030)
#define PRB0_HEAD _MMIO(0x2034) #define PRB0_HEAD _MMIO(0x2034)
...@@ -1804,6 +1810,10 @@ enum skl_disp_power_wells { ...@@ -1804,6 +1810,10 @@ enum skl_disp_power_wells {
#define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2)) #define GEN9_IZ_HASHING_MASK(slice) (0x3 << ((slice) * 2))
#define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2)) #define GEN9_IZ_HASHING(slice, val) ((val) << ((slice) * 2))
/* chicken reg for WaConextSwitchWithConcurrentTLBInvalidate */
#define GEN9_CSFE_CHICKEN1_RCS _MMIO(0x20D4)
#define GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE (1 << 2)
/* WaClearTdlStateAckDirtyBits */ /* WaClearTdlStateAckDirtyBits */
#define GEN8_STATE_ACK _MMIO(0x20F0) #define GEN8_STATE_ACK _MMIO(0x20F0)
#define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8) #define GEN9_STATE_ACK_SLICE1 _MMIO(0x20F8)
...@@ -2200,6 +2210,8 @@ enum skl_disp_power_wells { ...@@ -2200,6 +2210,8 @@ enum skl_disp_power_wells {
#define ILK_DPFC_STATUS _MMIO(0x43210) #define ILK_DPFC_STATUS _MMIO(0x43210)
#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218) #define ILK_DPFC_FENCE_YOFF _MMIO(0x43218)
#define ILK_DPFC_CHICKEN _MMIO(0x43224) #define ILK_DPFC_CHICKEN _MMIO(0x43224)
#define ILK_DPFC_DISABLE_DUMMY0 (1<<8)
#define ILK_DPFC_NUKE_ON_ANY_MODIFICATION (1<<23)
#define ILK_FBC_RT_BASE _MMIO(0x2128) #define ILK_FBC_RT_BASE _MMIO(0x2128)
#define ILK_FBC_RT_VALID (1<<0) #define ILK_FBC_RT_VALID (1<<0)
#define SNB_FBC_FRONT_BUFFER (1<<1) #define SNB_FBC_FRONT_BUFFER (1<<1)
...@@ -6031,6 +6043,7 @@ enum skl_disp_power_wells { ...@@ -6031,6 +6043,7 @@ enum skl_disp_power_wells {
#define CHICKEN_PAR1_1 _MMIO(0x42080) #define CHICKEN_PAR1_1 _MMIO(0x42080)
#define DPA_MASK_VBLANK_SRD (1 << 15) #define DPA_MASK_VBLANK_SRD (1 << 15)
#define FORCE_ARB_IDLE_PLANES (1 << 14) #define FORCE_ARB_IDLE_PLANES (1 << 14)
#define SKL_EDP_PSR_FIX_RDWRAP (1 << 3)
#define _CHICKEN_PIPESL_1_A 0x420b0 #define _CHICKEN_PIPESL_1_A 0x420b0
#define _CHICKEN_PIPESL_1_B 0x420b4 #define _CHICKEN_PIPESL_1_B 0x420b4
...@@ -6039,6 +6052,7 @@ enum skl_disp_power_wells { ...@@ -6039,6 +6052,7 @@ enum skl_disp_power_wells {
#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
#define DISP_ARB_CTL _MMIO(0x45000) #define DISP_ARB_CTL _MMIO(0x45000)
#define DISP_FBC_MEMORY_WAKE (1<<31)
#define DISP_TILE_SURFACE_SWIZZLING (1<<13) #define DISP_TILE_SURFACE_SWIZZLING (1<<13)
#define DISP_FBC_WM_DIS (1<<15) #define DISP_FBC_WM_DIS (1<<15)
#define DISP_ARB_CTL2 _MMIO(0x45004) #define DISP_ARB_CTL2 _MMIO(0x45004)
...@@ -6052,6 +6066,9 @@ enum skl_disp_power_wells { ...@@ -6052,6 +6066,9 @@ enum skl_disp_power_wells {
#define HSW_NDE_RSTWRN_OPT _MMIO(0x46408) #define HSW_NDE_RSTWRN_OPT _MMIO(0x46408)
#define RESET_PCH_HANDSHAKE_ENABLE (1<<4) #define RESET_PCH_HANDSHAKE_ENABLE (1<<4)
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
#define MASK_WAKEMEM (1<<13)
#define SKL_DFSM _MMIO(0x51000) #define SKL_DFSM _MMIO(0x51000)
#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23) #define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23)
#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23) #define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23)
...@@ -6069,6 +6086,7 @@ enum skl_disp_power_wells { ...@@ -6069,6 +6086,7 @@ enum skl_disp_power_wells {
#define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8) #define GEN9_TSG_BARRIER_ACK_DISABLE (1<<8)
#define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec) #define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec)
#define GEN9_CTX_PREEMPT_REG _MMIO(0x2248)
#define GEN8_CS_CHICKEN1 _MMIO(0x2580) #define GEN8_CS_CHICKEN1 _MMIO(0x2580)
/* GEN7 chicken */ /* GEN7 chicken */
...@@ -6076,6 +6094,7 @@ enum skl_disp_power_wells { ...@@ -6076,6 +6094,7 @@ enum skl_disp_power_wells {
# define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26)) # define GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC ((1<<10) | (1<<26))
# define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14) # define GEN9_RHWO_OPTIMIZATION_DISABLE (1<<14)
#define COMMON_SLICE_CHICKEN2 _MMIO(0x7014) #define COMMON_SLICE_CHICKEN2 _MMIO(0x7014)
# define GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION (1<<8)
# define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0) # define GEN8_CSC2_SBE_VUE_CACHE_CONSERVATIVE (1<<0)
#define HIZ_CHICKEN _MMIO(0x7018) #define HIZ_CHICKEN _MMIO(0x7018)
...@@ -6921,6 +6940,7 @@ enum skl_disp_power_wells { ...@@ -6921,6 +6940,7 @@ enum skl_disp_power_wells {
#define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3) #define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3)
#define GEN6_UCGCTL1 _MMIO(0x9400) #define GEN6_UCGCTL1 _MMIO(0x9400)
# define GEN6_GAMUNIT_CLOCK_GATE_DISABLE (1 << 22)
# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16) # define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
# define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5) # define GEN6_BLBUNIT_CLOCK_GATE_DISABLE (1 << 5)
# define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7) # define GEN6_CSUNIT_CLOCK_GATE_DISABLE (1 << 7)
...@@ -6937,6 +6957,7 @@ enum skl_disp_power_wells { ...@@ -6937,6 +6957,7 @@ enum skl_disp_power_wells {
#define GEN7_UCGCTL4 _MMIO(0x940c) #define GEN7_UCGCTL4 _MMIO(0x940c)
#define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25) #define GEN7_L3BANK2X_CLOCK_GATE_DISABLE (1<<25)
#define GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE (1<<14)
#define GEN6_RCGCTL1 _MMIO(0x9410) #define GEN6_RCGCTL1 _MMIO(0x9410)
#define GEN6_RCGCTL2 _MMIO(0x9414) #define GEN6_RCGCTL2 _MMIO(0x9414)
......
...@@ -41,16 +41,22 @@ ...@@ -41,16 +41,22 @@
* be moved to FW_FAILED. * be moved to FW_FAILED.
*/ */
#define I915_CSR_KBL "i915/kbl_dmc_ver1.bin"
MODULE_FIRMWARE(I915_CSR_KBL);
#define KBL_CSR_VERSION_REQUIRED CSR_VERSION(1, 1)
#define I915_CSR_SKL "i915/skl_dmc_ver1.bin" #define I915_CSR_SKL "i915/skl_dmc_ver1.bin"
MODULE_FIRMWARE(I915_CSR_SKL);
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
#define I915_CSR_BXT "i915/bxt_dmc_ver1.bin" #define I915_CSR_BXT "i915/bxt_dmc_ver1.bin"
MODULE_FIRMWARE(I915_CSR_BXT);
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares" #define FIRMWARE_URL "https://01.org/linuxgraphics/intel-linux-graphics-firmwares"
MODULE_FIRMWARE(I915_CSR_SKL);
MODULE_FIRMWARE(I915_CSR_BXT);
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
#define CSR_MAX_FW_SIZE 0x2FFF #define CSR_MAX_FW_SIZE 0x2FFF
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF #define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
...@@ -169,12 +175,10 @@ struct stepping_info { ...@@ -169,12 +175,10 @@ struct stepping_info {
char substepping; char substepping;
}; };
/*
* Kabylake derivated from Skylake H0, so SKL H0
* is the right firmware for KBL A0 (revid 0).
*/
static const struct stepping_info kbl_stepping_info[] = { static const struct stepping_info kbl_stepping_info[] = {
{'H', '0'}, {'I', '0'} {'A', '0'}, {'B', '0'}, {'C', '0'},
{'D', '0'}, {'E', '0'}, {'F', '0'},
{'G', '0'}, {'H', '0'}, {'I', '0'},
}; };
static const struct stepping_info skl_stepping_info[] = { static const struct stepping_info skl_stepping_info[] = {
...@@ -298,7 +302,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv, ...@@ -298,7 +302,9 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
csr->version = css_header->version; csr->version = css_header->version;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_KABYLAKE(dev_priv)) {
required_min_version = KBL_CSR_VERSION_REQUIRED;
} else if (IS_SKYLAKE(dev_priv)) {
required_min_version = SKL_CSR_VERSION_REQUIRED; required_min_version = SKL_CSR_VERSION_REQUIRED;
} else if (IS_BROXTON(dev_priv)) { } else if (IS_BROXTON(dev_priv)) {
required_min_version = BXT_CSR_VERSION_REQUIRED; required_min_version = BXT_CSR_VERSION_REQUIRED;
...@@ -446,7 +452,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv) ...@@ -446,7 +452,9 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (!HAS_CSR(dev_priv)) if (!HAS_CSR(dev_priv))
return; return;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_KABYLAKE(dev_priv))
csr->fw_path = I915_CSR_KBL;
else if (IS_SKYLAKE(dev_priv))
csr->fw_path = I915_CSR_SKL; csr->fw_path = I915_CSR_SKL;
else if (IS_BROXTON(dev_priv)) else if (IS_BROXTON(dev_priv))
csr->fw_path = I915_CSR_BXT; csr->fw_path = I915_CSR_BXT;
......
...@@ -1103,15 +1103,17 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, ...@@ -1103,15 +1103,17 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
uint32_t *const batch, uint32_t *const batch,
uint32_t index) uint32_t index)
{ {
struct drm_i915_private *dev_priv = engine->dev->dev_private;
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES); uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
/* /*
* WaDisableLSQCROPERFforOCL:skl * WaDisableLSQCROPERFforOCL:skl,kbl
* This WA is implemented in skl_init_clock_gating() but since * This WA is implemented in skl_init_clock_gating() but since
* this batch updates GEN8_L3SQCREG4 with default value we need to * this batch updates GEN8_L3SQCREG4 with default value we need to
* set this bit here to retain the WA during flush. * set this bit here to retain the WA during flush.
*/ */
if (IS_SKL_REVID(engine->dev, 0, SKL_REVID_E0)) if (IS_SKL_REVID(dev_priv, 0, SKL_REVID_E0) ||
IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS; l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 | wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
...@@ -1273,6 +1275,7 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, ...@@ -1273,6 +1275,7 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
{ {
int ret; int ret;
struct drm_device *dev = engine->dev; struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS); uint32_t index = wa_ctx_start(wa_ctx, *offset, CACHELINE_DWORDS);
/* WaDisableCtxRestoreArbitration:skl,bxt */ /* WaDisableCtxRestoreArbitration:skl,bxt */
...@@ -1286,6 +1289,22 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine, ...@@ -1286,6 +1289,22 @@ static int gen9_init_indirectctx_bb(struct intel_engine_cs *engine,
return ret; return ret;
index = ret; index = ret;
/* WaClearSlmSpaceAtContextSwitch:kbl */
/* Actual scratch location is at 128 bytes offset */
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0)) {
uint32_t scratch_addr
= engine->scratch.gtt_offset + 2*CACHELINE_BYTES;
wa_ctx_emit(batch, index, GFX_OP_PIPE_CONTROL(6));
wa_ctx_emit(batch, index, (PIPE_CONTROL_FLUSH_L3 |
PIPE_CONTROL_GLOBAL_GTT_IVB |
PIPE_CONTROL_CS_STALL |
PIPE_CONTROL_QW_WRITE));
wa_ctx_emit(batch, index, scratch_addr);
wa_ctx_emit(batch, index, 0);
wa_ctx_emit(batch, index, 0);
wa_ctx_emit(batch, index, 0);
}
/* Pad to end of cacheline */ /* Pad to end of cacheline */
while (index % CACHELINE_DWORDS) while (index % CACHELINE_DWORDS)
wa_ctx_emit(batch, index, MI_NOOP); wa_ctx_emit(batch, index, MI_NOOP);
...@@ -1687,9 +1706,10 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, ...@@ -1687,9 +1706,10 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
struct intel_ringbuffer *ringbuf = request->ringbuf; struct intel_ringbuffer *ringbuf = request->ringbuf;
struct intel_engine_cs *engine = ringbuf->engine; struct intel_engine_cs *engine = ringbuf->engine;
u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES; u32 scratch_addr = engine->scratch.gtt_offset + 2 * CACHELINE_BYTES;
bool vf_flush_wa = false; bool vf_flush_wa = false, dc_flush_wa = false;
u32 flags = 0; u32 flags = 0;
int ret; int ret;
int len;
flags |= PIPE_CONTROL_CS_STALL; flags |= PIPE_CONTROL_CS_STALL;
...@@ -1716,9 +1736,21 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, ...@@ -1716,9 +1736,21 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
*/ */
if (IS_GEN9(engine->dev)) if (IS_GEN9(engine->dev))
vf_flush_wa = true; vf_flush_wa = true;
/* WaForGAMHang:kbl */
if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
dc_flush_wa = true;
} }
ret = intel_ring_begin(request, vf_flush_wa ? 12 : 6); len = 6;
if (vf_flush_wa)
len += 6;
if (dc_flush_wa)
len += 12;
ret = intel_ring_begin(request, len);
if (ret) if (ret)
return ret; return ret;
...@@ -1731,12 +1763,31 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, ...@@ -1731,12 +1763,31 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, 0);
} }
if (dc_flush_wa) {
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
intel_logical_ring_emit(ringbuf, PIPE_CONTROL_DC_FLUSH_ENABLE);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
}
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6)); intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
intel_logical_ring_emit(ringbuf, flags); intel_logical_ring_emit(ringbuf, flags);
intel_logical_ring_emit(ringbuf, scratch_addr); intel_logical_ring_emit(ringbuf, scratch_addr);
intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0); intel_logical_ring_emit(ringbuf, 0);
if (dc_flush_wa) {
intel_logical_ring_emit(ringbuf, GFX_OP_PIPE_CONTROL(6));
intel_logical_ring_emit(ringbuf, PIPE_CONTROL_CS_STALL);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
intel_logical_ring_emit(ringbuf, 0);
}
intel_logical_ring_advance(ringbuf); intel_logical_ring_advance(ringbuf);
return 0; return 0;
......
...@@ -1731,7 +1731,8 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel) ...@@ -1731,7 +1731,8 @@ intel_panel_init_backlight_funcs(struct intel_panel *panel)
panel->backlight.set = bxt_set_backlight; panel->backlight.set = bxt_set_backlight;
panel->backlight.get = bxt_get_backlight; panel->backlight.get = bxt_get_backlight;
panel->backlight.hz_to_pwm = bxt_hz_to_pwm; panel->backlight.hz_to_pwm = bxt_hz_to_pwm;
} else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv)) { } else if (HAS_PCH_LPT(dev_priv) || HAS_PCH_SPT(dev_priv) ||
HAS_PCH_KBP(dev_priv)) {
panel->backlight.setup = lpt_setup_backlight; panel->backlight.setup = lpt_setup_backlight;
panel->backlight.enable = lpt_enable_backlight; panel->backlight.enable = lpt_enable_backlight;
panel->backlight.disable = lpt_disable_backlight; panel->backlight.disable = lpt_disable_backlight;
......
...@@ -54,10 +54,38 @@ ...@@ -54,10 +54,38 @@
#define INTEL_RC6p_ENABLE (1<<1) #define INTEL_RC6p_ENABLE (1<<1)
#define INTEL_RC6pp_ENABLE (1<<2) #define INTEL_RC6pp_ENABLE (1<<2)
static void gen9_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl */
I915_WRITE(CHICKEN_PAR1_1,
I915_READ(CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP);
I915_WRITE(GEN8_CONFIG0,
I915_READ(GEN8_CONFIG0) | GEN9_DEFAULT_FIXES);
/* WaEnableChickenDCPR:skl,bxt,kbl */
I915_WRITE(GEN8_CHICKEN_DCPR_1,
I915_READ(GEN8_CHICKEN_DCPR_1) | MASK_WAKEMEM);
/* WaFbcTurnOffFbcWatermark:skl,bxt,kbl */
/* WaFbcWakeMemOn:skl,bxt,kbl */
I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS |
DISP_FBC_MEMORY_WAKE);
/* WaFbcHighMemBwCorruptionAvoidance:skl,bxt,kbl */
I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
ILK_DPFC_DISABLE_DUMMY0);
}
static void bxt_init_clock_gating(struct drm_device *dev) static void bxt_init_clock_gating(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
gen9_init_clock_gating(dev);
/* WaDisableSDEUnitClockGating:bxt */ /* WaDisableSDEUnitClockGating:bxt */
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE); GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
...@@ -6698,6 +6726,38 @@ static void lpt_suspend_hw(struct drm_device *dev) ...@@ -6698,6 +6726,38 @@ static void lpt_suspend_hw(struct drm_device *dev)
} }
} }
static void kabylake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
gen9_init_clock_gating(dev);
/* WaDisableSDEUnitClockGating:kbl */
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) |
GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
/* WaDisableGamClockGating:kbl */
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
I915_WRITE(GEN6_UCGCTL1, I915_READ(GEN6_UCGCTL1) |
GEN6_GAMUNIT_CLOCK_GATE_DISABLE);
/* WaFbcNukeOnHostModify:kbl */
I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
static void skylake_init_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
gen9_init_clock_gating(dev);
/* WaFbcNukeOnHostModify:skl */
I915_WRITE(ILK_DPFC_CHICKEN, I915_READ(ILK_DPFC_CHICKEN) |
ILK_DPFC_NUKE_ON_ANY_MODIFICATION);
}
static void broadwell_init_clock_gating(struct drm_device *dev) static void broadwell_init_clock_gating(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
...@@ -7163,9 +7223,9 @@ static void nop_init_clock_gating(struct drm_device *dev) ...@@ -7163,9 +7223,9 @@ static void nop_init_clock_gating(struct drm_device *dev)
void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv)
{ {
if (IS_SKYLAKE(dev_priv)) if (IS_SKYLAKE(dev_priv))
dev_priv->display.init_clock_gating = nop_init_clock_gating; dev_priv->display.init_clock_gating = skylake_init_clock_gating;
else if (IS_KABYLAKE(dev_priv)) else if (IS_KABYLAKE(dev_priv))
dev_priv->display.init_clock_gating = nop_init_clock_gating; dev_priv->display.init_clock_gating = kabylake_init_clock_gating;
else if (IS_BROXTON(dev_priv)) else if (IS_BROXTON(dev_priv))
dev_priv->display.init_clock_gating = bxt_init_clock_gating; dev_priv->display.init_clock_gating = bxt_init_clock_gating;
else if (IS_BROADWELL(dev_priv)) else if (IS_BROADWELL(dev_priv))
......
...@@ -913,24 +913,26 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) ...@@ -913,24 +913,26 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
{ {
struct drm_device *dev = engine->dev; struct drm_device *dev = engine->dev;
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
int ret; int ret;
/* WaEnableLbsSlaRetryTimerDecrement:skl */ /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
/* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
/* WaDisableKillLogic:bxt,skl */ /* WaDisableKillLogic:bxt,skl,kbl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
ECOCHK_DIS_TLB); ECOCHK_DIS_TLB);
/* WaClearFlowControlGpgpuContextSave:skl,bxt */ /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
/* WaDisablePartialInstShootdown:skl,bxt */ /* WaDisablePartialInstShootdown:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
FLOW_CONTROL_ENABLE | FLOW_CONTROL_ENABLE |
PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
/* Syncing dependencies between camera and graphics:skl,bxt */ /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
...@@ -952,18 +954,18 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) ...@@ -952,18 +954,18 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
*/ */
} }
/* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt */ /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl */
/* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt */ /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
GEN9_ENABLE_YV12_BUGFIX | GEN9_ENABLE_YV12_BUGFIX |
GEN9_ENABLE_GPGPU_PREEMPTION); GEN9_ENABLE_GPGPU_PREEMPTION);
/* Wa4x4STCOptimizationDisable:skl,bxt */ /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
/* WaDisablePartialResolveInVc:skl,bxt */ /* WaDisablePartialResolveInVc:skl,bxt,kbl */
WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE | WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE)); GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
/* WaCcsTlbPrefetchDisable:skl,bxt */ /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
GEN9_CCS_TLB_PREFETCH_ENABLE); GEN9_CCS_TLB_PREFETCH_ENABLE);
...@@ -973,31 +975,57 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) ...@@ -973,31 +975,57 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
PIXEL_MASK_CAMMING_DISABLE); PIXEL_MASK_CAMMING_DISABLE);
/* WaForceContextSaveRestoreNonCoherent:skl,bxt */ /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
tmp = HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT; WA_SET_BIT_MASKED(HDC_CHICKEN0,
if (IS_SKL_REVID(dev, SKL_REVID_F0, REVID_FOREVER) || HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
IS_BXT_REVID(dev, BXT_REVID_B0, REVID_FOREVER)) HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
tmp |= HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE;
WA_SET_BIT_MASKED(HDC_CHICKEN0, tmp); /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
* both tied to WaForceContextSaveRestoreNonCoherent
* in some hsds for skl. We keep the tie for all gen9. The
* documentation is a bit hazy and so we want to get common behaviour,
* even though there is no clear evidence we would need both on kbl/bxt.
* This area has been source of system hangs so we play it safe
* and mimic the skl regardless of what bspec says.
*
* Use Force Non-Coherent whenever executing a 3D context. This
* is a workaround for a possible hang in the unlikely event
* a TLB invalidation occurs during a PSD flush.
*/
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt */ /* WaForceEnableNonCoherent:skl,bxt,kbl */
if (IS_SKYLAKE(dev) || IS_BXT_REVID(dev, 0, BXT_REVID_B0)) WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT);
/* WaDisableHDCInvalidation:skl,bxt,kbl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
BDW_DISABLE_HDC_INVALIDATION);
/* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
if (IS_SKYLAKE(dev_priv) ||
IS_KABYLAKE(dev_priv) ||
IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
GEN8_SAMPLER_POWER_BYPASS_DIS); GEN8_SAMPLER_POWER_BYPASS_DIS);
/* WaDisableSTUnitPowerOptimization:skl,bxt */ /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE); WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
/* WaOCLCoherentLineFlush:skl,bxt */ /* WaOCLCoherentLineFlush:skl,bxt,kbl */
I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) | I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_FLUSH_COHERENT_LINES)); GEN8_LQSC_FLUSH_COHERENT_LINES));
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt */ /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
if (ret)
return ret;
/* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1); ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
if (ret) if (ret)
return ret; return ret;
/* WaAllowUMDToModifyHDCChicken1:skl,bxt */ /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1); ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
if (ret) if (ret)
return ret; return ret;
...@@ -1092,22 +1120,6 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) ...@@ -1092,22 +1120,6 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HIZ_CHICKEN, WA_SET_BIT_MASKED(HIZ_CHICKEN,
BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE); BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
/* This is tied to WaForceContextSaveRestoreNonCoherent */
if (IS_SKL_REVID(dev, 0, REVID_FOREVER)) {
/*
*Use Force Non-Coherent whenever executing a 3D context. This
* is a workaround for a possible hang in the unlikely event
* a TLB invalidation occurs during a PSD flush.
*/
/* WaForceEnableNonCoherent:skl */
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FORCE_NON_COHERENT);
/* WaDisableHDCInvalidation:skl */
I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
BDW_DISABLE_HDC_INVALIDATION);
}
/* WaBarrierPerformanceFixDisable:skl */ /* WaBarrierPerformanceFixDisable:skl */
if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0)) if (IS_SKL_REVID(dev, SKL_REVID_C0, SKL_REVID_D0))
WA_SET_BIT_MASKED(HDC_CHICKEN0, WA_SET_BIT_MASKED(HDC_CHICKEN0,
...@@ -1120,6 +1132,9 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) ...@@ -1120,6 +1132,9 @@ static int skl_init_workarounds(struct intel_engine_cs *engine)
GEN7_HALF_SLICE_CHICKEN1, GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
/* WaDisableGafsUnitClkGating:skl */
WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaDisableLSQCROPERFforOCL:skl */ /* WaDisableLSQCROPERFforOCL:skl */
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
if (ret) if (ret)
...@@ -1174,6 +1189,63 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) ...@@ -1174,6 +1189,63 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine)
return ret; return ret;
} }
/* WaInsertDummyPushConstPs:bxt */
if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
return 0;
}
static int kbl_init_workarounds(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->dev->dev_private;
int ret;
ret = gen9_init_workarounds(engine);
if (ret)
return ret;
/* WaEnableGapsTsvCreditFix:kbl */
I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
GEN9_GAPS_TSV_CREDIT_DISABLE));
/* WaDisableDynamicCreditSharing:kbl */
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
WA_SET_BIT(GAMT_CHKN_BIT_REG,
GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
/* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FENCE_DEST_SLM_DISABLE);
/* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
* involving this register should also be added to WA batch as required.
*/
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
/* WaDisableLSQCROPERFforOCL:kbl */
I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
GEN8_LQSC_RO_PERF_DIS);
/* WaInsertDummyPushConstPs:kbl */
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
/* WaDisableGafsUnitClkGating:kbl */
WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
/* WaDisableSbeCacheDispatchPortSharing:kbl */
WA_SET_BIT_MASKED(
GEN7_HALF_SLICE_CHICKEN1,
GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
/* WaDisableLSQCROPERFforOCL:kbl */
ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
if (ret)
return ret;
return 0; return 0;
} }
...@@ -1199,6 +1271,9 @@ int init_workarounds_ring(struct intel_engine_cs *engine) ...@@ -1199,6 +1271,9 @@ int init_workarounds_ring(struct intel_engine_cs *engine)
if (IS_BROXTON(dev)) if (IS_BROXTON(dev))
return bxt_init_workarounds(engine); return bxt_init_workarounds(engine);
if (IS_KABYLAKE(dev_priv))
return kbl_init_workarounds(engine);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment