Commit c2776afe authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: use a timer for fence fallback

Less overhead than a work item and also adds proper cleanup handling.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarChunming Zhou <david1.zhou@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 935c186a
...@@ -389,7 +389,6 @@ struct amdgpu_clock { ...@@ -389,7 +389,6 @@ struct amdgpu_clock {
* Fences. * Fences.
*/ */
struct amdgpu_fence_driver { struct amdgpu_fence_driver {
struct amdgpu_ring *ring;
uint64_t gpu_addr; uint64_t gpu_addr;
volatile uint32_t *cpu_addr; volatile uint32_t *cpu_addr;
/* sync_seq is protected by ring emission lock */ /* sync_seq is protected by ring emission lock */
...@@ -398,7 +397,7 @@ struct amdgpu_fence_driver { ...@@ -398,7 +397,7 @@ struct amdgpu_fence_driver {
bool initialized; bool initialized;
struct amdgpu_irq_src *irq_src; struct amdgpu_irq_src *irq_src;
unsigned irq_type; unsigned irq_type;
struct delayed_work lockup_work; struct timer_list fallback_timer;
wait_queue_head_t fence_queue; wait_queue_head_t fence_queue;
}; };
......
...@@ -84,24 +84,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring) ...@@ -84,24 +84,6 @@ static u32 amdgpu_fence_read(struct amdgpu_ring *ring)
return seq; return seq;
} }
/**
* amdgpu_fence_schedule_check - schedule lockup check
*
* @ring: pointer to struct amdgpu_ring
*
* Queues a delayed work item to check for lockups.
*/
static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring)
{
/*
* Do not reset the timer here with mod_delayed_work,
* this can livelock in an interaction with TTM delayed destroy.
*/
queue_delayed_work(system_power_efficient_wq,
&ring->fence_drv.lockup_work,
AMDGPU_FENCE_JIFFIES_TIMEOUT);
}
/** /**
* amdgpu_fence_emit - emit a fence on the requested ring * amdgpu_fence_emit - emit a fence on the requested ring
* *
...@@ -135,6 +117,19 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner, ...@@ -135,6 +117,19 @@ int amdgpu_fence_emit(struct amdgpu_ring *ring, void *owner,
return 0; return 0;
} }
/**
* amdgpu_fence_schedule_fallback - schedule fallback check
*
* @ring: pointer to struct amdgpu_ring
*
* Start a timer as fallback to our interrupts.
*/
static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
{
mod_timer(&ring->fence_drv.fallback_timer,
jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
}
/** /**
* amdgpu_fence_activity - check for fence activity * amdgpu_fence_activity - check for fence activity
* *
...@@ -201,45 +196,38 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring) ...@@ -201,45 +196,38 @@ static bool amdgpu_fence_activity(struct amdgpu_ring *ring)
} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq); } while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
if (seq < last_emitted) if (seq < last_emitted)
amdgpu_fence_schedule_check(ring); amdgpu_fence_schedule_fallback(ring);
return wake; return wake;
} }
/** /**
* amdgpu_fence_check_lockup - check for hardware lockup * amdgpu_fence_process - process a fence
* *
* @work: delayed work item * @adev: amdgpu_device pointer
* @ring: ring index the fence is associated with
* *
* Checks for fence activity and if there is none probe * Checks the current fence value and wakes the fence queue
* the hardware if a lockup occured. * if the sequence number has increased (all asics).
*/ */
static void amdgpu_fence_check_lockup(struct work_struct *work) void amdgpu_fence_process(struct amdgpu_ring *ring)
{ {
struct amdgpu_fence_driver *fence_drv;
struct amdgpu_ring *ring;
fence_drv = container_of(work, struct amdgpu_fence_driver,
lockup_work.work);
ring = fence_drv->ring;
if (amdgpu_fence_activity(ring)) if (amdgpu_fence_activity(ring))
wake_up_all(&ring->fence_drv.fence_queue); wake_up_all(&ring->fence_drv.fence_queue);
} }
/** /**
* amdgpu_fence_process - process a fence * amdgpu_fence_fallback - fallback for hardware interrupts
* *
* @adev: amdgpu_device pointer * @work: delayed work item
* @ring: ring index the fence is associated with
* *
* Checks the current fence value and wakes the fence queue * Checks for fence activity.
* if the sequence number has increased (all asics).
*/ */
void amdgpu_fence_process(struct amdgpu_ring *ring) static void amdgpu_fence_fallback(unsigned long arg)
{ {
if (amdgpu_fence_activity(ring)) struct amdgpu_ring *ring = (void *)arg;
wake_up_all(&ring->fence_drv.fence_queue);
amdgpu_fence_process(ring);
} }
/** /**
...@@ -289,7 +277,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq) ...@@ -289,7 +277,7 @@ static int amdgpu_fence_ring_wait_seq(struct amdgpu_ring *ring, uint64_t seq)
if (atomic64_read(&ring->fence_drv.last_seq) >= seq) if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
return 0; return 0;
amdgpu_fence_schedule_check(ring); amdgpu_fence_schedule_fallback(ring);
wait_event(ring->fence_drv.fence_queue, ( wait_event(ring->fence_drv.fence_queue, (
(signaled = amdgpu_fence_seq_signaled(ring, seq)))); (signaled = amdgpu_fence_seq_signaled(ring, seq))));
...@@ -490,9 +478,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring) ...@@ -490,9 +478,8 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring)
atomic64_set(&ring->fence_drv.last_seq, 0); atomic64_set(&ring->fence_drv.last_seq, 0);
ring->fence_drv.initialized = false; ring->fence_drv.initialized = false;
INIT_DELAYED_WORK(&ring->fence_drv.lockup_work, setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
amdgpu_fence_check_lockup); (unsigned long)ring);
ring->fence_drv.ring = ring;
init_waitqueue_head(&ring->fence_drv.fence_queue); init_waitqueue_head(&ring->fence_drv.fence_queue);
...@@ -556,6 +543,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) ...@@ -556,6 +543,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
mutex_lock(&adev->ring_lock); mutex_lock(&adev->ring_lock);
for (i = 0; i < AMDGPU_MAX_RINGS; i++) { for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i]; struct amdgpu_ring *ring = adev->rings[i];
if (!ring || !ring->fence_drv.initialized) if (!ring || !ring->fence_drv.initialized)
continue; continue;
r = amdgpu_fence_wait_empty(ring); r = amdgpu_fence_wait_empty(ring);
...@@ -567,6 +555,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev) ...@@ -567,6 +555,7 @@ void amdgpu_fence_driver_fini(struct amdgpu_device *adev)
amdgpu_irq_put(adev, ring->fence_drv.irq_src, amdgpu_irq_put(adev, ring->fence_drv.irq_src,
ring->fence_drv.irq_type); ring->fence_drv.irq_type);
amd_sched_fini(&ring->sched); amd_sched_fini(&ring->sched);
del_timer_sync(&ring->fence_drv.fallback_timer);
ring->fence_drv.initialized = false; ring->fence_drv.initialized = false;
} }
mutex_unlock(&adev->ring_lock); mutex_unlock(&adev->ring_lock);
...@@ -750,7 +739,8 @@ static bool amdgpu_fence_enable_signaling(struct fence *f) ...@@ -750,7 +739,8 @@ static bool amdgpu_fence_enable_signaling(struct fence *f)
fence->fence_wake.func = amdgpu_fence_check_signaled; fence->fence_wake.func = amdgpu_fence_check_signaled;
__add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake); __add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
fence_get(f); fence_get(f);
amdgpu_fence_schedule_check(ring); if (!timer_pending(&ring->fence_drv.fallback_timer))
amdgpu_fence_schedule_fallback(ring);
FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx); FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
return true; return true;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment