Commit 4c7eb91c authored by Junwei Zhang's avatar Junwei Zhang Committed by Alex Deucher

drm/amdgpu: refine the job naming for amdgpu_job and amdgpu_sched_job

Use consistent naming across functions.
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarDavid Zhou <david1.zhou@amd.com>
Signed-off-by: default avatarJunwei Zhang <Jerry.Zhang@amd.com>
parent bf60efd3
......@@ -1275,7 +1275,7 @@ struct amdgpu_job {
uint32_t num_ibs;
struct mutex job_lock;
struct amdgpu_user_fence uf;
int (*free_job)(struct amdgpu_job *sched_job);
int (*free_job)(struct amdgpu_job *job);
};
static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, uint32_t ib_idx, int idx)
......
......@@ -778,15 +778,15 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
return 0;
}
static int amdgpu_cs_free_job(struct amdgpu_job *sched_job)
static int amdgpu_cs_free_job(struct amdgpu_job *job)
{
int i;
if (sched_job->ibs)
for (i = 0; i < sched_job->num_ibs; i++)
amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
kfree(sched_job->ibs);
if (sched_job->uf.bo)
drm_gem_object_unreference_unlocked(&sched_job->uf.bo->gem_base);
if (job->ibs)
for (i = 0; i < job->num_ibs; i++)
amdgpu_ib_free(job->adev, &job->ibs[i]);
kfree(job->ibs);
if (job->uf.bo)
drm_gem_object_unreference_unlocked(&job->uf.bo->gem_base);
return 0;
}
......
......@@ -27,42 +27,42 @@
#include <drm/drmP.h>
#include "amdgpu.h"
static struct fence *amdgpu_sched_dependency(struct amd_sched_job *job)
static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
{
struct amdgpu_job *sched_job = (struct amdgpu_job *)job;
return amdgpu_sync_get_fence(&sched_job->ibs->sync);
struct amdgpu_job *job = (struct amdgpu_job *)sched_job;
return amdgpu_sync_get_fence(&job->ibs->sync);
}
static struct fence *amdgpu_sched_run_job(struct amd_sched_job *job)
static struct fence *amdgpu_sched_run_job(struct amd_sched_job *sched_job)
{
struct amdgpu_fence *fence = NULL;
struct amdgpu_job *sched_job;
struct amdgpu_job *job;
int r;
if (!job) {
if (!sched_job) {
DRM_ERROR("job is null\n");
return NULL;
}
sched_job = (struct amdgpu_job *)job;
mutex_lock(&sched_job->job_lock);
r = amdgpu_ib_schedule(sched_job->adev,
sched_job->num_ibs,
sched_job->ibs,
sched_job->base.owner);
job = (struct amdgpu_job *)sched_job;
mutex_lock(&job->job_lock);
r = amdgpu_ib_schedule(job->adev,
job->num_ibs,
job->ibs,
job->base.owner);
if (r) {
DRM_ERROR("Error scheduling IBs (%d)\n", r);
goto err;
}
fence = amdgpu_fence_ref(sched_job->ibs[sched_job->num_ibs - 1].fence);
fence = amdgpu_fence_ref(job->ibs[job->num_ibs - 1].fence);
err:
if (sched_job->free_job)
sched_job->free_job(sched_job);
if (job->free_job)
job->free_job(job);
mutex_unlock(&sched_job->job_lock);
fence_put(&sched_job->base.s_fence->base);
kfree(sched_job);
mutex_unlock(&job->job_lock);
fence_put(&job->base.s_fence->base);
kfree(job);
return fence ? &fence->base : NULL;
}
......
......@@ -805,10 +805,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
}
static int amdgpu_uvd_free_job(
struct amdgpu_job *sched_job)
struct amdgpu_job *job)
{
amdgpu_ib_free(sched_job->adev, sched_job->ibs);
kfree(sched_job->ibs);
amdgpu_ib_free(job->adev, job->ibs);
kfree(job->ibs);
return 0;
}
......
......@@ -342,10 +342,10 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp)
}
static int amdgpu_vce_free_job(
struct amdgpu_job *sched_job)
struct amdgpu_job *job)
{
amdgpu_ib_free(sched_job->adev, sched_job->ibs);
kfree(sched_job->ibs);
amdgpu_ib_free(job->adev, job->ibs);
kfree(job->ibs);
return 0;
}
......
......@@ -316,12 +316,12 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev,
}
}
int amdgpu_vm_free_job(struct amdgpu_job *sched_job)
int amdgpu_vm_free_job(struct amdgpu_job *job)
{
int i;
for (i = 0; i < sched_job->num_ibs; i++)
amdgpu_ib_free(sched_job->adev, &sched_job->ibs[i]);
kfree(sched_job->ibs);
for (i = 0; i < job->num_ibs; i++)
amdgpu_ib_free(job->adev, &job->ibs[i]);
kfree(job->ibs);
return 0;
}
......
......@@ -68,29 +68,29 @@ static struct amd_sched_job *
amd_sched_rq_select_job(struct amd_sched_rq *rq)
{
struct amd_sched_entity *entity;
struct amd_sched_job *job;
struct amd_sched_job *sched_job;
spin_lock(&rq->lock);
entity = rq->current_entity;
if (entity) {
list_for_each_entry_continue(entity, &rq->entities, list) {
job = amd_sched_entity_pop_job(entity);
if (job) {
sched_job = amd_sched_entity_pop_job(entity);
if (sched_job) {
rq->current_entity = entity;
spin_unlock(&rq->lock);
return job;
return sched_job;
}
}
}
list_for_each_entry(entity, &rq->entities, list) {
job = amd_sched_entity_pop_job(entity);
if (job) {
sched_job = amd_sched_entity_pop_job(entity);
if (sched_job) {
rq->current_entity = entity;
spin_unlock(&rq->lock);
return job;
return sched_job;
}
if (entity == rq->current_entity)
......@@ -208,15 +208,15 @@ static struct amd_sched_job *
amd_sched_entity_pop_job(struct amd_sched_entity *entity)
{
struct amd_gpu_scheduler *sched = entity->scheduler;
struct amd_sched_job *job;
struct amd_sched_job *sched_job;
if (ACCESS_ONCE(entity->dependency))
return NULL;
if (!kfifo_out_peek(&entity->job_queue, &job, sizeof(job)))
if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
return NULL;
while ((entity->dependency = sched->ops->dependency(job))) {
while ((entity->dependency = sched->ops->dependency(sched_job))) {
if (fence_add_callback(entity->dependency, &entity->cb,
amd_sched_entity_wakeup))
......@@ -225,32 +225,33 @@ amd_sched_entity_pop_job(struct amd_sched_entity *entity)
return NULL;
}
return job;
return sched_job;
}
/**
* Helper to submit a job to the job queue
*
* @job The pointer to job required to submit
* @sched_job The pointer to job required to submit
*
* Returns true if we could submit the job.
*/
static bool amd_sched_entity_in(struct amd_sched_job *job)
static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
{
struct amd_sched_entity *entity = job->s_entity;
struct amd_sched_entity *entity = sched_job->s_entity;
bool added, first = false;
spin_lock(&entity->queue_lock);
added = kfifo_in(&entity->job_queue, &job, sizeof(job)) == sizeof(job);
added = kfifo_in(&entity->job_queue, &sched_job,
sizeof(sched_job)) == sizeof(sched_job);
if (added && kfifo_len(&entity->job_queue) == sizeof(job))
if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
first = true;
spin_unlock(&entity->queue_lock);
/* first job wakes up scheduler */
if (first)
amd_sched_wakeup(job->sched);
amd_sched_wakeup(sched_job->sched);
return added;
}
......@@ -258,7 +259,7 @@ static bool amd_sched_entity_in(struct amd_sched_job *job)
/**
* Submit a job to the job queue
*
* @job The pointer to job required to submit
* @sched_job The pointer to job required to submit
*
* Returns 0 for success, negative error code otherwise.
*/
......@@ -304,17 +305,17 @@ static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
static struct amd_sched_job *
amd_sched_select_job(struct amd_gpu_scheduler *sched)
{
struct amd_sched_job *job;
struct amd_sched_job *sched_job;
if (!amd_sched_ready(sched))
return NULL;
/* Kernel run queue has higher priority than normal run queue*/
job = amd_sched_rq_select_job(&sched->kernel_rq);
if (job == NULL)
job = amd_sched_rq_select_job(&sched->sched_rq);
sched_job = amd_sched_rq_select_job(&sched->kernel_rq);
if (sched_job == NULL)
sched_job = amd_sched_rq_select_job(&sched->sched_rq);
return job;
return sched_job;
}
static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
......@@ -340,20 +341,20 @@ static int amd_sched_main(void *param)
while (!kthread_should_stop()) {
struct amd_sched_entity *entity;
struct amd_sched_fence *s_fence;
struct amd_sched_job *job;
struct amd_sched_job *sched_job;
struct fence *fence;
wait_event_interruptible(sched->wake_up_worker,
kthread_should_stop() ||
(job = amd_sched_select_job(sched)));
(sched_job = amd_sched_select_job(sched)));
if (!job)
if (!sched_job)
continue;
entity = job->s_entity;
s_fence = job->s_fence;
entity = sched_job->s_entity;
s_fence = sched_job->s_fence;
atomic_inc(&sched->hw_rq_count);
fence = sched->ops->run_job(job);
fence = sched->ops->run_job(sched_job);
if (fence) {
r = fence_add_callback(fence, &s_fence->cb,
amd_sched_process_job);
......@@ -367,8 +368,9 @@ static int amd_sched_main(void *param)
amd_sched_process_job(NULL, &s_fence->cb);
}
count = kfifo_out(&entity->job_queue, &job, sizeof(job));
WARN_ON(count != sizeof(job));
count = kfifo_out(&entity->job_queue, &sched_job,
sizeof(sched_job));
WARN_ON(count != sizeof(sched_job));
wake_up(&sched->job_scheduled);
}
return 0;
......
......@@ -91,8 +91,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
* these functions should be implemented in driver side
*/
struct amd_sched_backend_ops {
struct fence *(*dependency)(struct amd_sched_job *job);
struct fence *(*run_job)(struct amd_sched_job *job);
struct fence *(*dependency)(struct amd_sched_job *sched_job);
struct fence *(*run_job)(struct amd_sched_job *sched_job);
};
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment