Commit 6ad790c0 authored by Chris Wilson's avatar Chris Wilson Committed by Daniel Vetter

drm/i915: Boost GPU frequency if we detect outstanding pageflips

If we hit a vblank and see that have a pageflip queue but not yet
processed, ensure that the GPU is running at maximum in order to clear
the backlog. Pageflips are only queued for the following vblank, if we
miss it, there will be a visible stutter. Boosting the GPU frequency
doesn't prevent us from missing the target vblank, but it should help
the subsequent frames hitting theirs.

v2: Reorder vblank vs flip-complete so that we only check for a missed
flip after processing the completion events, and avoid spurious boosts.

v3: Rename missed_vblank
v4: Rebase
v5: Cancel the outstanding work in runtime suspend
v6: Rebase
v7: Rebase required fixing
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Deepak S<deepak.s@linux.intel.com>
Reviewed-by: Deepak S<deepak.s@linux.intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent edcf284b
......@@ -10180,6 +10180,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
WARN_ON(!in_interrupt());
......@@ -10187,12 +10188,16 @@ void intel_check_page_flip(struct drm_device *dev, int pipe)
return;
spin_lock(&dev->event_lock);
if (intel_crtc->unpin_work && __intel_pageflip_stall_check(dev, crtc)) {
work = intel_crtc->unpin_work;
if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
intel_crtc->unpin_work->flip_queued_vblank,
drm_vblank_count(dev, pipe));
work->flip_queued_vblank, drm_vblank_count(dev, pipe));
page_flip_completed(intel_crtc);
work = NULL;
}
if (work != NULL &&
drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
intel_queue_rps_boost_for_request(dev, work->flip_queued_req);
spin_unlock(&dev->event_lock);
}
......
......@@ -1263,6 +1263,8 @@ void gen6_rps_busy(struct drm_i915_private *dev_priv);
void gen6_rps_reset_ei(struct drm_i915_private *dev_priv);
void gen6_rps_idle(struct drm_i915_private *dev_priv);
void gen6_rps_boost(struct drm_i915_private *dev_priv);
void intel_queue_rps_boost_for_request(struct drm_device *dev,
struct drm_i915_gem_request *rq);
void ilk_wm_get_hw_state(struct drm_device *dev);
void skl_wm_get_hw_state(struct drm_device *dev);
void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
......
......@@ -6772,6 +6772,41 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
return val / GT_FREQUENCY_MULTIPLIER;
}
struct request_boost {
struct work_struct work;
struct drm_i915_gem_request *rq;
};
static void __intel_rps_boost_work(struct work_struct *work)
{
struct request_boost *boost = container_of(work, struct request_boost, work);
if (!i915_gem_request_completed(boost->rq, true))
gen6_rps_boost(to_i915(boost->rq->ring->dev));
i915_gem_request_unreference__unlocked(boost->rq);
kfree(boost);
}
void intel_queue_rps_boost_for_request(struct drm_device *dev,
struct drm_i915_gem_request *rq)
{
struct request_boost *boost;
if (rq == NULL || INTEL_INFO(dev)->gen < 6)
return;
boost = kmalloc(sizeof(*boost), GFP_ATOMIC);
if (boost == NULL)
return;
i915_gem_request_reference(rq);
boost->rq = rq;
INIT_WORK(&boost->work, __intel_rps_boost_work);
queue_work(to_i915(dev)->wq, &boost->work);
}
void intel_pm_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment