Commit fdd1fe57 authored by Mario Kleiner's avatar Mario Kleiner Committed by Alex Deucher

drm/amd/display: Rework vrr flip throttling for late vblank irq.

For throttling to work correctly, we always need a baseline vblank
count last_flip_vblank that increments at start of front-porch.

This is the case for drm_crtc_vblank_count() in non-VRR mode, where
the vblank irq fires at start of front-porch and triggers DRM core
vblank handling, but it is no longer the case in VRR mode, where
core vblank handling is done later, after end of front-porch.

Therefore drm_crtc_vblank_count() is no longer useful for this.
We also can't use drm_crtc_accurate_vblank_count(), as that would
screw up vblank timestamps in VRR mode when called in front-porch.

To solve this, use the cooked hardware vblank counter returned by
amdgpu_get_vblank_counter_kms() instead, as that one is cooked to
always increment at start of front-porch, independent of when
vblank related irq's fire.

This patch allows vblank irq handling to happen anywhere within
vblank of even after it, without a negative impact on flip
throttling, so followup patches can shift the vblank core
handling trigger point wherever they need it.
Signed-off-by: default avatarMario Kleiner <mario.kleiner.de@gmail.com>
Reviewed-by: default avatarNicholas Kazlauskas <Nicholas.Kazlauskas@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 66b0c973
...@@ -406,7 +406,7 @@ struct amdgpu_crtc { ...@@ -406,7 +406,7 @@ struct amdgpu_crtc {
struct amdgpu_flip_work *pflip_works; struct amdgpu_flip_work *pflip_works;
enum amdgpu_flip_status pflip_status; enum amdgpu_flip_status pflip_status;
int deferred_flip_completion; int deferred_flip_completion;
u64 last_flip_vblank; u32 last_flip_vblank;
/* pll sharing */ /* pll sharing */
struct amdgpu_atom_ss ss; struct amdgpu_atom_ss ss;
bool ss_enabled; bool ss_enabled;
......
...@@ -286,7 +286,7 @@ static void dm_pflip_high_irq(void *interrupt_params) ...@@ -286,7 +286,7 @@ static void dm_pflip_high_irq(void *interrupt_params)
} }
/* Update to correct count(s) if racing with vblank irq */ /* Update to correct count(s) if racing with vblank irq */
amdgpu_crtc->last_flip_vblank = drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); drm_crtc_accurate_vblank_count(&amdgpu_crtc->base);
/* wake up userspace */ /* wake up userspace */
if (amdgpu_crtc->event) { if (amdgpu_crtc->event) {
...@@ -298,6 +298,14 @@ static void dm_pflip_high_irq(void *interrupt_params) ...@@ -298,6 +298,14 @@ static void dm_pflip_high_irq(void *interrupt_params)
} else } else
WARN_ON(1); WARN_ON(1);
/* Keep track of vblank of this flip for flip throttling. We use the
* cooked hw counter, as that one incremented at start of this vblank
* of pageflip completion, so last_flip_vblank is the forbidden count
* for queueing new pageflips if vsync + VRR is enabled.
*/
amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
amdgpu_crtc->crtc_id);
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
spin_unlock_irqrestore(&adev->ddev->event_lock, flags); spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
...@@ -4790,11 +4798,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, ...@@ -4790,11 +4798,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
unsigned long flags; unsigned long flags;
struct amdgpu_bo *abo; struct amdgpu_bo *abo;
uint64_t tiling_flags; uint64_t tiling_flags;
uint32_t target, target_vblank; uint32_t target_vblank, last_flip_vblank;
uint64_t last_flip_vblank; bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool vrr_active = acrtc_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE;
bool pflip_present = false; bool pflip_present = false;
struct { struct {
struct dc_surface_update surface_updates[MAX_SURFACES]; struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_plane_info plane_infos[MAX_SURFACES]; struct dc_plane_info plane_infos[MAX_SURFACES];
...@@ -4936,7 +4942,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, ...@@ -4936,7 +4942,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* clients using the GLX_OML_sync_control extension or * clients using the GLX_OML_sync_control extension or
* DRI3/Present extension with defined target_msc. * DRI3/Present extension with defined target_msc.
*/ */
last_flip_vblank = drm_crtc_vblank_count(pcrtc); last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
} }
else { else {
/* For variable refresh rate mode only: /* For variable refresh rate mode only:
...@@ -4952,11 +4958,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, ...@@ -4952,11 +4958,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags);
} }
target = (uint32_t)last_flip_vblank + wait_for_vblank; target_vblank = last_flip_vblank + wait_for_vblank;
/* Prepare wait for target vblank early - before the fence-waits */
target_vblank = target - (uint32_t)drm_crtc_vblank_count(pcrtc) +
amdgpu_get_vblank_counter_kms(pcrtc->dev, acrtc_attach->crtc_id);
/* /*
* Wait until we're out of the vertical blank period before the one * Wait until we're out of the vertical blank period before the one
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment