Commit 04d49f36 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-for-v4.13-rc6' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Seems to be slowing down nicely, just one amdgpu fix, and a bunch of
  i915 fixes"

* tag 'drm-fixes-for-v4.13-rc6' of git://people.freedesktop.org/~airlied/linux:
  drm/amdgpu: save list length when fence is signaled
  drm/i915: Avoid the gpu reset vs. modeset deadlock
  drm/i915: Suppress switch_mm emission between the same aliasing_ppgtt
  drm/i915: Return correct EDP voltage swing table for 0.85V
  drm/i915/cnl: Add slice and subslice information to debugfs.
  drm/i915: Perform an invalidate prior to executing golden renderstate
  drm/i915: remove unused function declaration
parents d33a2a91 28eb4628
...@@ -244,6 +244,12 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, ...@@ -244,6 +244,12 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct dma_fence *f = e->fence; struct dma_fence *f = e->fence;
struct amd_sched_fence *s_fence = to_amd_sched_fence(f); struct amd_sched_fence *s_fence = to_amd_sched_fence(f);
if (dma_fence_is_signaled(f)) {
hash_del(&e->node);
dma_fence_put(f);
kmem_cache_free(amdgpu_sync_slab, e);
continue;
}
if (ring && s_fence) { if (ring && s_fence) {
/* For fences from the same ring it is sufficient /* For fences from the same ring it is sufficient
* when they are scheduled. * when they are scheduled.
...@@ -256,13 +262,6 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, ...@@ -256,13 +262,6 @@ struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
} }
} }
if (dma_fence_is_signaled(f)) {
hash_del(&e->node);
dma_fence_put(f);
kmem_cache_free(amdgpu_sync_slab, e);
continue;
}
return f; return f;
} }
......
...@@ -4580,7 +4580,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, ...@@ -4580,7 +4580,7 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv,
sseu->slice_mask |= BIT(s); sseu->slice_mask |= BIT(s);
if (IS_GEN9_BC(dev_priv)) if (IS_GEN9_BC(dev_priv) || IS_CANNONLAKE(dev_priv))
sseu->subslice_mask = sseu->subslice_mask =
INTEL_INFO(dev_priv)->sseu.subslice_mask; INTEL_INFO(dev_priv)->sseu.subslice_mask;
......
...@@ -688,19 +688,19 @@ static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt, ...@@ -688,19 +688,19 @@ static inline bool skip_rcs_switch(struct i915_hw_ppgtt *ppgtt,
} }
static bool static bool
needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, needs_pd_load_pre(struct i915_hw_ppgtt *ppgtt, struct intel_engine_cs *engine)
struct intel_engine_cs *engine,
struct i915_gem_context *to)
{ {
struct i915_gem_context *from = engine->legacy_active_context;
if (!ppgtt) if (!ppgtt)
return false; return false;
/* Always load the ppgtt on first use */ /* Always load the ppgtt on first use */
if (!engine->legacy_active_context) if (!from)
return true; return true;
/* Same context without new entries, skip */ /* Same context without new entries, skip */
if (engine->legacy_active_context == to && if ((!from->ppgtt || from->ppgtt == ppgtt) &&
!(intel_engine_flag(engine) & ppgtt->pd_dirty_rings)) !(intel_engine_flag(engine) & ppgtt->pd_dirty_rings))
return false; return false;
...@@ -744,7 +744,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req) ...@@ -744,7 +744,7 @@ static int do_rcs_switch(struct drm_i915_gem_request *req)
if (skip_rcs_switch(ppgtt, engine, to)) if (skip_rcs_switch(ppgtt, engine, to))
return 0; return 0;
if (needs_pd_load_pre(ppgtt, engine, to)) { if (needs_pd_load_pre(ppgtt, engine)) {
/* Older GENs and non render rings still want the load first, /* Older GENs and non render rings still want the load first,
* "PP_DCLV followed by PP_DIR_BASE register through Load * "PP_DCLV followed by PP_DIR_BASE register through Load
* Register Immediate commands in Ring Buffer before submitting * Register Immediate commands in Ring Buffer before submitting
...@@ -841,7 +841,7 @@ int i915_switch_context(struct drm_i915_gem_request *req) ...@@ -841,7 +841,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
struct i915_hw_ppgtt *ppgtt = struct i915_hw_ppgtt *ppgtt =
to->ppgtt ?: req->i915->mm.aliasing_ppgtt; to->ppgtt ?: req->i915->mm.aliasing_ppgtt;
if (needs_pd_load_pre(ppgtt, engine, to)) { if (needs_pd_load_pre(ppgtt, engine)) {
int ret; int ret;
trace_switch_mm(engine, to); trace_switch_mm(engine, to);
...@@ -852,6 +852,7 @@ int i915_switch_context(struct drm_i915_gem_request *req) ...@@ -852,6 +852,7 @@ int i915_switch_context(struct drm_i915_gem_request *req)
ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine); ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
} }
engine->legacy_active_context = to;
return 0; return 0;
} }
......
...@@ -242,6 +242,10 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req) ...@@ -242,6 +242,10 @@ int i915_gem_render_state_emit(struct drm_i915_gem_request *req)
goto err_unpin; goto err_unpin;
} }
ret = req->engine->emit_flush(req, EMIT_INVALIDATE);
if (ret)
goto err_unpin;
ret = req->engine->emit_bb_start(req, ret = req->engine->emit_bb_start(req,
so->batch_offset, so->batch_size, so->batch_offset, so->batch_size,
I915_DISPATCH_SECURE); I915_DISPATCH_SECURE);
......
...@@ -1762,7 +1762,7 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, ...@@ -1762,7 +1762,7 @@ cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv,
if (dev_priv->vbt.edp.low_vswing) { if (dev_priv->vbt.edp.low_vswing) {
if (voltage == VOLTAGE_INFO_0_85V) { if (voltage == VOLTAGE_INFO_0_85V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V); *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_85V);
return cnl_ddi_translations_dp_0_85V; return cnl_ddi_translations_edp_0_85V;
} else if (voltage == VOLTAGE_INFO_0_95V) { } else if (voltage == VOLTAGE_INFO_0_95V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V); *n_entries = ARRAY_SIZE(cnl_ddi_translations_edp_0_95V);
return cnl_ddi_translations_edp_0_95V; return cnl_ddi_translations_edp_0_95V;
......
...@@ -3485,6 +3485,13 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv) ...@@ -3485,6 +3485,13 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
!gpu_reset_clobbers_display(dev_priv)) !gpu_reset_clobbers_display(dev_priv))
return; return;
/* We have a modeset vs reset deadlock, defensively unbreak it.
*
* FIXME: We can do a _lot_ better, this is just a first iteration.
*/
i915_gem_set_wedged(dev_priv);
DRM_DEBUG_DRIVER("Wedging GPU to avoid deadlocks with pending modeset updates\n");
/* /*
* Need mode_config.mutex so that we don't * Need mode_config.mutex so that we don't
* trample ongoing ->detect() and whatnot. * trample ongoing ->detect() and whatnot.
......
...@@ -63,7 +63,6 @@ enum { ...@@ -63,7 +63,6 @@ enum {
}; };
/* Logical Rings */ /* Logical Rings */
void intel_logical_ring_stop(struct intel_engine_cs *engine);
void intel_logical_ring_cleanup(struct intel_engine_cs *engine); void intel_logical_ring_cleanup(struct intel_engine_cs *engine);
int logical_render_ring_init(struct intel_engine_cs *engine); int logical_render_ring_init(struct intel_engine_cs *engine);
int logical_xcs_ring_init(struct intel_engine_cs *engine); int logical_xcs_ring_init(struct intel_engine_cs *engine);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment