Commit e2183fb1 authored by Maarten Lankhorst's avatar Maarten Lankhorst

Revert "drm/scheduler: Job timeout handler returns status (v3)"

This reverts commit c10983e1.

This commit is not meant for drm-misc-next-fixes, and was accidentally
cherry picked over.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
parent 4b8878ee
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_trace.h" #include "amdgpu_trace.h"
static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) static void amdgpu_job_timedout(struct drm_sched_job *s_job)
{ {
struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched); struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job); struct amdgpu_job *job = to_amdgpu_job(s_job);
...@@ -41,7 +41,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) ...@@ -41,7 +41,7 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) { amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
DRM_ERROR("ring %s timeout, but soft recovered\n", DRM_ERROR("ring %s timeout, but soft recovered\n",
s_job->sched->name); s_job->sched->name);
return DRM_GPU_SCHED_STAT_NOMINAL; return;
} }
amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti); amdgpu_vm_get_task_info(ring->adev, job->pasid, &ti);
...@@ -53,12 +53,10 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) ...@@ -53,12 +53,10 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
if (amdgpu_device_should_recover_gpu(ring->adev)) { if (amdgpu_device_should_recover_gpu(ring->adev)) {
amdgpu_device_gpu_recover(ring->adev, job); amdgpu_device_gpu_recover(ring->adev, job);
return DRM_GPU_SCHED_STAT_NOMINAL;
} else { } else {
drm_sched_suspend_timeout(&ring->sched); drm_sched_suspend_timeout(&ring->sched);
if (amdgpu_sriov_vf(adev)) if (amdgpu_sriov_vf(adev))
adev->virt.tdr_debug = true; adev->virt.tdr_debug = true;
return DRM_GPU_SCHED_STAT_NOMINAL;
} }
} }
......
...@@ -82,8 +82,7 @@ static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job) ...@@ -82,8 +82,7 @@ static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
return fence; return fence;
} }
static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
*sched_job)
{ {
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job); struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
struct etnaviv_gpu *gpu = submit->gpu; struct etnaviv_gpu *gpu = submit->gpu;
...@@ -121,13 +120,9 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job ...@@ -121,13 +120,9 @@ static enum drm_gpu_sched_stat etnaviv_sched_timedout_job(struct drm_sched_job
drm_sched_resubmit_jobs(&gpu->sched); drm_sched_resubmit_jobs(&gpu->sched);
drm_sched_start(&gpu->sched, true);
return DRM_GPU_SCHED_STAT_NOMINAL;
out_no_timeout: out_no_timeout:
/* restart scheduler after GPU is usable again */ /* restart scheduler after GPU is usable again */
drm_sched_start(&gpu->sched, true); drm_sched_start(&gpu->sched, true);
return DRM_GPU_SCHED_STAT_NOMINAL;
} }
static void etnaviv_sched_free_job(struct drm_sched_job *sched_job) static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
......
...@@ -415,7 +415,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task) ...@@ -415,7 +415,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task)
mutex_unlock(&dev->error_task_list_lock); mutex_unlock(&dev->error_task_list_lock);
} }
static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job) static void lima_sched_timedout_job(struct drm_sched_job *job)
{ {
struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
struct lima_sched_task *task = to_lima_task(job); struct lima_sched_task *task = to_lima_task(job);
...@@ -449,8 +449,6 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job ...@@ -449,8 +449,6 @@ static enum drm_gpu_sched_stat lima_sched_timedout_job(struct drm_sched_job *job
drm_sched_resubmit_jobs(&pipe->base); drm_sched_resubmit_jobs(&pipe->base);
drm_sched_start(&pipe->base, true); drm_sched_start(&pipe->base, true);
return DRM_GPU_SCHED_STAT_NOMINAL;
} }
static void lima_sched_free_job(struct drm_sched_job *job) static void lima_sched_free_job(struct drm_sched_job *job)
......
...@@ -432,8 +432,7 @@ static void panfrost_scheduler_start(struct panfrost_queue_state *queue) ...@@ -432,8 +432,7 @@ static void panfrost_scheduler_start(struct panfrost_queue_state *queue)
mutex_unlock(&queue->lock); mutex_unlock(&queue->lock);
} }
static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job static void panfrost_job_timedout(struct drm_sched_job *sched_job)
*sched_job)
{ {
struct panfrost_job *job = to_panfrost_job(sched_job); struct panfrost_job *job = to_panfrost_job(sched_job);
struct panfrost_device *pfdev = job->pfdev; struct panfrost_device *pfdev = job->pfdev;
...@@ -444,7 +443,7 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job ...@@ -444,7 +443,7 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
* spurious. Bail out. * spurious. Bail out.
*/ */
if (dma_fence_is_signaled(job->done_fence)) if (dma_fence_is_signaled(job->done_fence))
return DRM_GPU_SCHED_STAT_NOMINAL; return;
dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p", dev_err(pfdev->dev, "gpu sched timeout, js=%d, config=0x%x, status=0x%x, head=0x%x, tail=0x%x, sched_job=%p",
js, js,
...@@ -456,13 +455,11 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job ...@@ -456,13 +455,11 @@ static enum drm_gpu_sched_stat panfrost_job_timedout(struct drm_sched_job
/* Scheduler is already stopped, nothing to do. */ /* Scheduler is already stopped, nothing to do. */
if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job)) if (!panfrost_scheduler_stop(&pfdev->js->queue[js], sched_job))
return DRM_GPU_SCHED_STAT_NOMINAL; return;
/* Schedule a reset if there's no reset in progress. */ /* Schedule a reset if there's no reset in progress. */
if (!atomic_xchg(&pfdev->reset.pending, 1)) if (!atomic_xchg(&pfdev->reset.pending, 1))
schedule_work(&pfdev->reset.work); schedule_work(&pfdev->reset.work);
return DRM_GPU_SCHED_STAT_NOMINAL;
} }
static const struct drm_sched_backend_ops panfrost_sched_ops = { static const struct drm_sched_backend_ops panfrost_sched_ops = {
......
...@@ -527,7 +527,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery) ...@@ -527,7 +527,7 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
EXPORT_SYMBOL(drm_sched_start); EXPORT_SYMBOL(drm_sched_start);
/** /**
* drm_sched_resubmit_jobs - helper to relaunch jobs from the pending list * drm_sched_resubmit_jobs - helper to relunch job from pending ring list
* *
* @sched: scheduler instance * @sched: scheduler instance
* *
...@@ -561,6 +561,8 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) ...@@ -561,6 +561,8 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched)
} else { } else {
s_job->s_fence->parent = fence; s_job->s_fence->parent = fence;
} }
} }
} }
EXPORT_SYMBOL(drm_sched_resubmit_jobs); EXPORT_SYMBOL(drm_sched_resubmit_jobs);
......
...@@ -259,7 +259,7 @@ v3d_cache_clean_job_run(struct drm_sched_job *sched_job) ...@@ -259,7 +259,7 @@ v3d_cache_clean_job_run(struct drm_sched_job *sched_job)
return NULL; return NULL;
} }
static enum drm_gpu_sched_status static void
v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
{ {
enum v3d_queue q; enum v3d_queue q;
...@@ -285,8 +285,6 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) ...@@ -285,8 +285,6 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
} }
mutex_unlock(&v3d->reset_lock); mutex_unlock(&v3d->reset_lock);
return DRM_GPU_SCHED_STAT_NOMINAL;
} }
/* If the current address or return address have changed, then the GPU /* If the current address or return address have changed, then the GPU
...@@ -294,7 +292,7 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) ...@@ -294,7 +292,7 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job)
* could fail if the GPU got in an infinite loop in the CL, but that * could fail if the GPU got in an infinite loop in the CL, but that
* is pretty unlikely outside of an i-g-t testcase. * is pretty unlikely outside of an i-g-t testcase.
*/ */
static enum drm_task_status static void
v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
u32 *timedout_ctca, u32 *timedout_ctra) u32 *timedout_ctca, u32 *timedout_ctra)
{ {
...@@ -306,39 +304,39 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, ...@@ -306,39 +304,39 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q,
if (*timedout_ctca != ctca || *timedout_ctra != ctra) { if (*timedout_ctca != ctca || *timedout_ctra != ctra) {
*timedout_ctca = ctca; *timedout_ctca = ctca;
*timedout_ctra = ctra; *timedout_ctra = ctra;
return DRM_GPU_SCHED_STAT_NOMINAL; return;
} }
return v3d_gpu_reset_for_timeout(v3d, sched_job); v3d_gpu_reset_for_timeout(v3d, sched_job);
} }
static enum drm_task_status static void
v3d_bin_job_timedout(struct drm_sched_job *sched_job) v3d_bin_job_timedout(struct drm_sched_job *sched_job)
{ {
struct v3d_bin_job *job = to_bin_job(sched_job); struct v3d_bin_job *job = to_bin_job(sched_job);
return v3d_cl_job_timedout(sched_job, V3D_BIN, v3d_cl_job_timedout(sched_job, V3D_BIN,
&job->timedout_ctca, &job->timedout_ctra); &job->timedout_ctca, &job->timedout_ctra);
} }
static enum drm_task_status static void
v3d_render_job_timedout(struct drm_sched_job *sched_job) v3d_render_job_timedout(struct drm_sched_job *sched_job)
{ {
struct v3d_render_job *job = to_render_job(sched_job); struct v3d_render_job *job = to_render_job(sched_job);
return v3d_cl_job_timedout(sched_job, V3D_RENDER, v3d_cl_job_timedout(sched_job, V3D_RENDER,
&job->timedout_ctca, &job->timedout_ctra); &job->timedout_ctca, &job->timedout_ctra);
} }
static enum drm_task_status static void
v3d_generic_job_timedout(struct drm_sched_job *sched_job) v3d_generic_job_timedout(struct drm_sched_job *sched_job)
{ {
struct v3d_job *job = to_v3d_job(sched_job); struct v3d_job *job = to_v3d_job(sched_job);
return v3d_gpu_reset_for_timeout(job->v3d, sched_job); v3d_gpu_reset_for_timeout(job->v3d, sched_job);
} }
static enum drm_task_status static void
v3d_csd_job_timedout(struct drm_sched_job *sched_job) v3d_csd_job_timedout(struct drm_sched_job *sched_job)
{ {
struct v3d_csd_job *job = to_csd_job(sched_job); struct v3d_csd_job *job = to_csd_job(sched_job);
...@@ -350,10 +348,10 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job) ...@@ -350,10 +348,10 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job)
*/ */
if (job->timedout_batches != batches) { if (job->timedout_batches != batches) {
job->timedout_batches = batches; job->timedout_batches = batches;
return DRM_GPU_SCHED_STAT_NOMINAL; return;
} }
return v3d_gpu_reset_for_timeout(v3d, sched_job); v3d_gpu_reset_for_timeout(v3d, sched_job);
} }
static const struct drm_sched_backend_ops v3d_bin_sched_ops = { static const struct drm_sched_backend_ops v3d_bin_sched_ops = {
......
...@@ -206,12 +206,6 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job, ...@@ -206,12 +206,6 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
return s_job && atomic_inc_return(&s_job->karma) > threshold; return s_job && atomic_inc_return(&s_job->karma) > threshold;
} }
enum drm_gpu_sched_stat {
DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
DRM_GPU_SCHED_STAT_NOMINAL,
DRM_GPU_SCHED_STAT_ENODEV,
};
/** /**
* struct drm_sched_backend_ops * struct drm_sched_backend_ops
* *
...@@ -236,16 +230,10 @@ struct drm_sched_backend_ops { ...@@ -236,16 +230,10 @@ struct drm_sched_backend_ops {
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job); struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
/** /**
* @timedout_job: Called when a job has taken too long to execute, * @timedout_job: Called when a job has taken too long to execute,
* to trigger GPU recovery. * to trigger GPU recovery.
*
* Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
* and the underlying driver has started or completed recovery.
*
* Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
* available, i.e. has been unplugged.
*/ */
enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job); void (*timedout_job)(struct drm_sched_job *sched_job);
/** /**
* @free_job: Called once the job's finished fence has been signaled * @free_job: Called once the job's finished fence has been signaled
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment