Commit 16a7133f authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: fix coding style in the scheduler v2

v2: fix even more
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarMonk.Liu <monk.liu@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 3cc25911
...@@ -320,7 +320,9 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) ...@@ -320,7 +320,9 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
} }
static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) { static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) {
struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job); struct amd_sched_job *job = container_of(cb, struct amd_sched_job,
cb_free_job);
schedule_work(&job->work_free_job); schedule_work(&job->work_free_job);
} }
...@@ -341,7 +343,8 @@ void amd_sched_job_finish(struct amd_sched_job *s_job) ...@@ -341,7 +343,8 @@ void amd_sched_job_finish(struct amd_sched_job *s_job)
struct amd_sched_job, node); struct amd_sched_job, node);
if (next) { if (next) {
INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback); INIT_DELAYED_WORK(&next->work_tdr,
s_job->timeout_callback);
amd_sched_job_get(next); amd_sched_job_get(next);
schedule_delayed_work(&next->work_tdr, sched->timeout); schedule_delayed_work(&next->work_tdr, sched->timeout);
} }
...@@ -353,7 +356,8 @@ void amd_sched_job_begin(struct amd_sched_job *s_job) ...@@ -353,7 +356,8 @@ void amd_sched_job_begin(struct amd_sched_job *s_job)
struct amd_gpu_scheduler *sched = s_job->sched; struct amd_gpu_scheduler *sched = s_job->sched;
if (sched->timeout != MAX_SCHEDULE_TIMEOUT && if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job) list_first_entry_or_null(&sched->ring_mirror_list,
struct amd_sched_job, node) == s_job)
{ {
INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback); INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback);
amd_sched_job_get(s_job); amd_sched_job_get(s_job);
...@@ -504,7 +508,8 @@ static int amd_sched_main(void *param) ...@@ -504,7 +508,8 @@ static int amd_sched_main(void *param)
if (r == -ENOENT) if (r == -ENOENT)
amd_sched_process_job(fence, &s_fence->cb); amd_sched_process_job(fence, &s_fence->cb);
else if (r) else if (r)
DRM_ERROR("fence add callback failed (%d)\n", r); DRM_ERROR("fence add callback failed (%d)\n",
r);
fence_put(fence); fence_put(fence);
} else { } else {
DRM_ERROR("Failed to run job!\n"); DRM_ERROR("Failed to run job!\n");
......
...@@ -94,7 +94,8 @@ struct amd_sched_job { ...@@ -94,7 +94,8 @@ struct amd_sched_job {
extern const struct fence_ops amd_sched_fence_ops; extern const struct fence_ops amd_sched_fence_ops;
static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
{ {
struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base); struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence,
base);
if (__f->base.ops == &amd_sched_fence_ops) if (__f->base.ops == &amd_sched_fence_ops)
return __f; return __f;
...@@ -163,12 +164,14 @@ void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched , ...@@ -163,12 +164,14 @@ void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched ,
struct amd_sched_job *s_job); struct amd_sched_job *s_job);
void amd_sched_job_finish(struct amd_sched_job *s_job); void amd_sched_job_finish(struct amd_sched_job *s_job);
void amd_sched_job_begin(struct amd_sched_job *s_job); void amd_sched_job_begin(struct amd_sched_job *s_job);
static inline void amd_sched_job_get(struct amd_sched_job *job) { static inline void amd_sched_job_get(struct amd_sched_job *job)
{
if (job) if (job)
kref_get(&job->refcount); kref_get(&job->refcount);
} }
static inline void amd_sched_job_put(struct amd_sched_job *job) { static inline void amd_sched_job_put(struct amd_sched_job *job)
{
if (job) if (job)
kref_put(&job->refcount, job->free_callback); kref_put(&job->refcount, job->free_callback);
} }
......
...@@ -27,7 +27,8 @@ ...@@ -27,7 +27,8 @@
#include <drm/drmP.h> #include <drm/drmP.h>
#include "gpu_scheduler.h" #include "gpu_scheduler.h"
struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner) struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity,
void *owner)
{ {
struct amd_sched_fence *fence = NULL; struct amd_sched_fence *fence = NULL;
unsigned seq; unsigned seq;
...@@ -38,12 +39,12 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity ...@@ -38,12 +39,12 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity
INIT_LIST_HEAD(&fence->scheduled_cb); INIT_LIST_HEAD(&fence->scheduled_cb);
fence->owner = owner; fence->owner = owner;
fence->sched = s_entity->sched; fence->sched = entity->sched;
spin_lock_init(&fence->lock); spin_lock_init(&fence->lock);
seq = atomic_inc_return(&s_entity->fence_seq); seq = atomic_inc_return(&entity->fence_seq);
fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock, fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock,
s_entity->fence_context, seq); entity->fence_context, seq);
return fence; return fence;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment