Commit 9c5b2b0d authored by Harry Wentland's avatar Harry Wentland Committed by Alex Deucher

drm/amdgpu: Pulling old prepare and submit for flip back

This is needed to ensure every single DC commit builds. Reverting
this again when it's no longer needed by DC.

This reverts commit 98da65d5.
Signed-off-by: default avatarHarry Wentland <harry.wentland@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 6f87a895
...@@ -138,11 +138,52 @@ static void amdgpu_unpin_work_func(struct work_struct *__work) ...@@ -138,11 +138,52 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
kfree(work); kfree(work);
} }
int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_framebuffer *fb, static void amdgpu_flip_work_cleanup(struct amdgpu_flip_work *work)
struct drm_pending_vblank_event *event, {
uint32_t page_flip_flags, uint32_t target, int i;
struct drm_modeset_acquire_ctx *ctx)
amdgpu_bo_unref(&work->old_abo);
dma_fence_put(work->excl);
for (i = 0; i < work->shared_count; ++i)
dma_fence_put(work->shared[i]);
kfree(work->shared);
kfree(work);
}
static void amdgpu_flip_cleanup_unreserve(struct amdgpu_flip_work *work,
struct amdgpu_bo *new_abo)
{
amdgpu_bo_unreserve(new_abo);
amdgpu_flip_work_cleanup(work);
}
static void amdgpu_flip_cleanup_unpin(struct amdgpu_flip_work *work,
struct amdgpu_bo *new_abo)
{
if (unlikely(amdgpu_bo_unpin(new_abo) != 0))
DRM_ERROR("failed to unpin new abo in error path\n");
amdgpu_flip_cleanup_unreserve(work, new_abo);
}
void amdgpu_crtc_cleanup_flip_ctx(struct amdgpu_flip_work *work,
struct amdgpu_bo *new_abo)
{
if (unlikely(amdgpu_bo_reserve(new_abo, true) != 0)) {
DRM_ERROR("failed to reserve new abo in error path\n");
amdgpu_flip_work_cleanup(work);
return;
}
amdgpu_flip_cleanup_unpin(work, new_abo);
}
int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags,
uint32_t target,
struct amdgpu_flip_work **work_p,
struct amdgpu_bo **new_abo_p)
{ {
struct drm_device *dev = crtc->dev; struct drm_device *dev = crtc->dev;
struct amdgpu_device *adev = dev->dev_private; struct amdgpu_device *adev = dev->dev_private;
...@@ -155,7 +196,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, ...@@ -155,7 +196,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
unsigned long flags; unsigned long flags;
u64 tiling_flags; u64 tiling_flags;
u64 base; u64 base;
int i, r; int r;
work = kzalloc(sizeof *work, GFP_KERNEL); work = kzalloc(sizeof *work, GFP_KERNEL);
if (work == NULL) if (work == NULL)
...@@ -216,41 +257,80 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, ...@@ -216,41 +257,80 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
spin_unlock_irqrestore(&crtc->dev->event_lock, flags); spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
r = -EBUSY; r = -EBUSY;
goto pflip_cleanup; goto pflip_cleanup;
} }
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
*work_p = work;
*new_abo_p = new_abo;
return 0;
pflip_cleanup:
amdgpu_crtc_cleanup_flip_ctx(work, new_abo);
return r;
unpin:
amdgpu_flip_cleanup_unpin(work, new_abo);
return r;
unreserve:
amdgpu_flip_cleanup_unreserve(work, new_abo);
return r;
cleanup:
amdgpu_flip_work_cleanup(work);
return r;
}
void amdgpu_crtc_submit_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct amdgpu_flip_work *work,
struct amdgpu_bo *new_abo)
{
unsigned long flags;
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING; amdgpu_crtc->pflip_status = AMDGPU_FLIP_PENDING;
amdgpu_crtc->pflip_works = work; amdgpu_crtc->pflip_works = work;
DRM_DEBUG_DRIVER("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
amdgpu_crtc->crtc_id, amdgpu_crtc, work);
/* update crtc fb */ /* update crtc fb */
crtc->primary->fb = fb; crtc->primary->fb = fb;
spin_unlock_irqrestore(&crtc->dev->event_lock, flags); spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
DRM_DEBUG_DRIVER(
"crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n",
amdgpu_crtc->crtc_id, amdgpu_crtc, work);
amdgpu_flip_work_func(&work->flip_work.work); amdgpu_flip_work_func(&work->flip_work.work);
return 0; }
pflip_cleanup: int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
if (unlikely(amdgpu_bo_reserve(new_abo, false) != 0)) { struct drm_framebuffer *fb,
DRM_ERROR("failed to reserve new abo in error path\n"); struct drm_pending_vblank_event *event,
goto cleanup; uint32_t page_flip_flags,
} uint32_t target,
unpin: struct drm_modeset_acquire_ctx *ctx)
if (unlikely(amdgpu_bo_unpin(new_abo) != 0)) { {
DRM_ERROR("failed to unpin new abo in error path\n"); struct amdgpu_bo *new_abo;
} struct amdgpu_flip_work *work;
unreserve: int r;
amdgpu_bo_unreserve(new_abo);
cleanup: r = amdgpu_crtc_prepare_flip(crtc,
amdgpu_bo_unref(&work->old_abo); fb,
dma_fence_put(work->excl); event,
for (i = 0; i < work->shared_count; ++i) page_flip_flags,
dma_fence_put(work->shared[i]); target,
kfree(work->shared); &work,
kfree(work); &new_abo);
if (r)
return r;
return r; amdgpu_crtc_submit_flip(crtc, fb, work, new_abo);
return 0;
} }
int amdgpu_crtc_set_config(struct drm_mode_set *set, int amdgpu_crtc_set_config(struct drm_mode_set *set,
......
...@@ -587,6 +587,21 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc, ...@@ -587,6 +587,21 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
struct drm_pending_vblank_event *event, struct drm_pending_vblank_event *event,
uint32_t page_flip_flags, uint32_t target, uint32_t page_flip_flags, uint32_t target,
struct drm_modeset_acquire_ctx *ctx); struct drm_modeset_acquire_ctx *ctx);
void amdgpu_crtc_cleanup_flip_ctx(struct amdgpu_flip_work *work,
struct amdgpu_bo *new_abo);
int amdgpu_crtc_prepare_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t page_flip_flags,
uint32_t target,
struct amdgpu_flip_work **work,
struct amdgpu_bo **new_abo);
void amdgpu_crtc_submit_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct amdgpu_flip_work *work,
struct amdgpu_bo *new_abo);
extern const struct drm_mode_config_funcs amdgpu_mode_funcs; extern const struct drm_mode_config_funcs amdgpu_mode_funcs;
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment