Commit bef7d196 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2015-08-20' of git://anongit.freedesktop.org/drm-intel into drm-fixes

Revert of a VBT parsing commit that should've been queued for drm-next,
not v4.2. The revert unbreaks Braswell among other things.

Also on Braswell removal of DP HBR2/TP3 and intermediate eDP frequency
support. The code was optimistically added based on incorrect
documentation; the platform does not support them. These are cc: stable.

Finally a gpu state fix from Chris, also cc: stable.

* tag 'drm-intel-fixes-2015-08-20' of git://anongit.freedesktop.org/drm-intel:
  drm/i915: Avoid TP3 on CHV
  drm/i915: remove HBR2 from chv supported list
  Revert "drm/i915: Add eDP intermediate frequencies for CHV"
  Revert "drm/i915: Allow parsing of variable size child device entries from VBT"
  drm/i915: Flag the execlists context object as dirty after every use
parents 1c73d3b1 ed63baaf
...@@ -1075,34 +1075,15 @@ parse_device_mapping(struct drm_i915_private *dev_priv, ...@@ -1075,34 +1075,15 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
const union child_device_config *p_child; const union child_device_config *p_child;
union child_device_config *child_dev_ptr; union child_device_config *child_dev_ptr;
int i, child_device_num, count; int i, child_device_num, count;
u8 expected_size; u16 block_size;
u16 block_size;
p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!p_defs) { if (!p_defs) {
DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
return; return;
} }
if (bdb->version < 195) { if (p_defs->child_dev_size < sizeof(*p_child)) {
expected_size = 33; DRM_ERROR("General definiton block child device size is too small.\n");
} else if (bdb->version == 195) {
expected_size = 37;
} else if (bdb->version <= 197) {
expected_size = 38;
} else {
expected_size = 38;
DRM_DEBUG_DRIVER("Expected child_device_config size for BDB version %u not known; assuming %u\n",
expected_size, bdb->version);
}
if (expected_size > sizeof(*p_child)) {
DRM_ERROR("child_device_config cannot fit in p_child\n");
return;
}
if (p_defs->child_dev_size != expected_size) {
DRM_ERROR("Size mismatch; child_device_config size=%u (expected %u); bdb->version: %u\n",
p_defs->child_dev_size, expected_size, bdb->version);
return; return;
} }
/* get the block size of general definitions */ /* get the block size of general definitions */
...@@ -1149,7 +1130,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv, ...@@ -1149,7 +1130,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
child_dev_ptr = dev_priv->vbt.child_dev + count; child_dev_ptr = dev_priv->vbt.child_dev + count;
count++; count++;
memcpy(child_dev_ptr, p_child, p_defs->child_dev_size); memcpy(child_dev_ptr, p_child, sizeof(*p_child));
} }
return; return;
} }
......
...@@ -93,9 +93,6 @@ static const struct dp_link_dpll chv_dpll[] = { ...@@ -93,9 +93,6 @@ static const struct dp_link_dpll chv_dpll[] = {
static const int skl_rates[] = { 162000, 216000, 270000, static const int skl_rates[] = { 162000, 216000, 270000,
324000, 432000, 540000 }; 324000, 432000, 540000 };
static const int chv_rates[] = { 162000, 202500, 210000, 216000,
243000, 270000, 324000, 405000,
420000, 432000, 540000 };
static const int default_rates[] = { 162000, 270000, 540000 }; static const int default_rates[] = { 162000, 270000, 540000 };
/** /**
...@@ -1169,24 +1166,31 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates) ...@@ -1169,24 +1166,31 @@ intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
return (intel_dp_max_link_bw(intel_dp) >> 3) + 1; return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
} }
static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
{
/* WaDisableHBR2:skl */
if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
return false;
if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
(INTEL_INFO(dev)->gen >= 9))
return true;
else
return false;
}
static int static int
intel_dp_source_rates(struct drm_device *dev, const int **source_rates) intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
{ {
if (IS_SKYLAKE(dev)) { if (IS_SKYLAKE(dev)) {
*source_rates = skl_rates; *source_rates = skl_rates;
return ARRAY_SIZE(skl_rates); return ARRAY_SIZE(skl_rates);
} else if (IS_CHERRYVIEW(dev)) {
*source_rates = chv_rates;
return ARRAY_SIZE(chv_rates);
} }
*source_rates = default_rates; *source_rates = default_rates;
if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) /* This depends on the fact that 5.4 is last value in the array */
/* WaDisableHBR2:skl */ if (intel_dp_source_supports_hbr2(dev))
return (DP_LINK_BW_2_7 >> 3) + 1;
else if (INTEL_INFO(dev)->gen >= 8 ||
(IS_HASWELL(dev) && !IS_HSW_ULX(dev)))
return (DP_LINK_BW_5_4 >> 3) + 1; return (DP_LINK_BW_5_4 >> 3) + 1;
else else
return (DP_LINK_BW_2_7 >> 3) + 1; return (DP_LINK_BW_2_7 >> 3) + 1;
...@@ -3941,10 +3945,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) ...@@ -3941,10 +3945,15 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
} }
} }
/* Training Pattern 3 support, both source and sink */ /* Training Pattern 3 support, Intel platforms that support HBR2 alone
* have support for TP3 hence that check is used along with dpcd check
* to ensure TP3 can be enabled.
* SKL < B0: due it's WaDisableHBR2 is the only exception where TP3 is
* supported but still not enabled.
*/
if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED && intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
(IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) { intel_dp_source_supports_hbr2(dev)) {
intel_dp->use_tps3 = true; intel_dp->use_tps3 = true;
DRM_DEBUG_KMS("Displayport TPS3 supported\n"); DRM_DEBUG_KMS("Displayport TPS3 supported\n");
} else } else
......
...@@ -1012,6 +1012,8 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring, ...@@ -1012,6 +1012,8 @@ static int intel_lr_context_pin(struct intel_engine_cs *ring,
ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf); ret = intel_pin_and_map_ringbuffer_obj(ring->dev, ringbuf);
if (ret) if (ret)
goto unpin_ctx_obj; goto unpin_ctx_obj;
ctx_obj->dirty = true;
} }
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment