Commit 180fc134 authored by Andrey Grodzovsky's avatar Andrey Grodzovsky Committed by Alex Deucher

drm/scheduler: Rename cleanup functions v2.

Everything in the flush code path (i.e. waiting for SW queue
to become empty) names with *_flush()
and everything in the release code path names *_fini()

This patch also effect the amdgpu and etnaviv drivers which
use those functions.

v2:
Also pplay the change to vd3.
Signed-off-by: default avatarAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Suggested-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarLucas Stach <l.stach@pengutronix.de>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent f3efec54
...@@ -104,7 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ...@@ -104,7 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
failed: failed:
for (j = 0; j < i; j++) for (j = 0; j < i; j++)
drm_sched_entity_fini(&adev->rings[j]->sched, drm_sched_entity_destroy(&adev->rings[j]->sched,
&ctx->rings[j].entity); &ctx->rings[j].entity);
kfree(ctx->fences); kfree(ctx->fences);
ctx->fences = NULL; ctx->fences = NULL;
...@@ -178,7 +178,7 @@ static void amdgpu_ctx_do_release(struct kref *ref) ...@@ -178,7 +178,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue; continue;
drm_sched_entity_fini(&ctx->adev->rings[i]->sched, drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity); &ctx->rings[i].entity);
} }
...@@ -466,7 +466,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr) ...@@ -466,7 +466,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring) if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue; continue;
max_wait = drm_sched_entity_do_release(&ctx->adev->rings[i]->sched, max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity, max_wait); &ctx->rings[i].entity, max_wait);
} }
} }
...@@ -492,7 +492,7 @@ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr) ...@@ -492,7 +492,7 @@ void amdgpu_ctx_mgr_entity_cleanup(struct amdgpu_ctx_mgr *mgr)
continue; continue;
if (kref_read(&ctx->refcount) == 1) if (kref_read(&ctx->refcount) == 1)
drm_sched_entity_cleanup(&ctx->adev->rings[i]->sched, drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
&ctx->rings[i].entity); &ctx->rings[i].entity);
else else
DRM_ERROR("ctx %p is still alive\n", ctx); DRM_ERROR("ctx %p is still alive\n", ctx);
......
...@@ -162,7 +162,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev) ...@@ -162,7 +162,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
static void amdgpu_ttm_global_fini(struct amdgpu_device *adev) static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
{ {
if (adev->mman.mem_global_referenced) { if (adev->mman.mem_global_referenced) {
drm_sched_entity_fini(adev->mman.entity.sched, drm_sched_entity_destroy(adev->mman.entity.sched,
&adev->mman.entity); &adev->mman.entity);
mutex_destroy(&adev->mman.gtt_window_lock); mutex_destroy(&adev->mman.gtt_window_lock);
drm_global_item_unref(&adev->mman.bo_global_ref.ref); drm_global_item_unref(&adev->mman.bo_global_ref.ref);
......
...@@ -309,7 +309,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) ...@@ -309,7 +309,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
kfree(adev->uvd.inst[j].saved_bo); kfree(adev->uvd.inst[j].saved_bo);
drm_sched_entity_fini(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity); drm_sched_entity_destroy(&adev->uvd.inst[j].ring.sched, &adev->uvd.inst[j].entity);
amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo, amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
&adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].gpu_addr,
......
...@@ -222,7 +222,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) ...@@ -222,7 +222,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
if (adev->vce.vcpu_bo == NULL) if (adev->vce.vcpu_bo == NULL)
return 0; return 0;
drm_sched_entity_fini(&adev->vce.ring[0].sched, &adev->vce.entity); drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
(void **)&adev->vce.cpu_addr); (void **)&adev->vce.cpu_addr);
......
...@@ -2643,7 +2643,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, ...@@ -2643,7 +2643,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->root.base.bo = NULL; vm->root.base.bo = NULL;
error_free_sched_entity: error_free_sched_entity:
drm_sched_entity_fini(&ring->sched, &vm->entity); drm_sched_entity_destroy(&ring->sched, &vm->entity);
return r; return r;
} }
...@@ -2780,7 +2780,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -2780,7 +2780,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
} }
drm_sched_entity_fini(vm->entity.sched, &vm->entity); drm_sched_entity_destroy(vm->entity.sched, &vm->entity);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n"); dev_err(adev->dev, "still active bo inside vm\n");
......
...@@ -470,7 +470,7 @@ static int uvd_v6_0_sw_fini(void *handle) ...@@ -470,7 +470,7 @@ static int uvd_v6_0_sw_fini(void *handle)
return r; return r;
if (uvd_v6_0_enc_support(adev)) { if (uvd_v6_0_enc_support(adev)) {
drm_sched_entity_fini(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc); drm_sched_entity_destroy(&adev->uvd.inst->ring_enc[0].sched, &adev->uvd.inst->entity_enc);
for (i = 0; i < adev->uvd.num_enc_rings; ++i) for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]); amdgpu_ring_fini(&adev->uvd.inst->ring_enc[i]);
......
...@@ -491,7 +491,7 @@ static int uvd_v7_0_sw_fini(void *handle) ...@@ -491,7 +491,7 @@ static int uvd_v7_0_sw_fini(void *handle)
return r; return r;
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
drm_sched_entity_fini(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc); drm_sched_entity_destroy(&adev->uvd.inst[j].ring_enc[0].sched, &adev->uvd.inst[j].entity_enc);
for (i = 0; i < adev->uvd.num_enc_rings; ++i) for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]); amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
......
...@@ -78,8 +78,8 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file) ...@@ -78,8 +78,8 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
gpu->lastctx = NULL; gpu->lastctx = NULL;
mutex_unlock(&gpu->lock); mutex_unlock(&gpu->lock);
drm_sched_entity_fini(&gpu->sched, drm_sched_entity_destroy(&gpu->sched,
&ctx->sched_entity[i]); &ctx->sched_entity[i]);
} }
} }
......
...@@ -256,7 +256,7 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, ...@@ -256,7 +256,7 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
/** /**
* drm_sched_entity_do_release - Destroy a context entity * drm_sched_entity_flush - Flush a context entity
* *
* @sched: scheduler instance * @sched: scheduler instance
* @entity: scheduler entity * @entity: scheduler entity
...@@ -267,7 +267,7 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, ...@@ -267,7 +267,7 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
* *
* Returns the remaining time in jiffies left from the input timeout * Returns the remaining time in jiffies left from the input timeout
*/ */
long drm_sched_entity_do_release(struct drm_gpu_scheduler *sched, long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity, long timeout) struct drm_sched_entity *entity, long timeout)
{ {
long ret = timeout; long ret = timeout;
...@@ -294,7 +294,7 @@ long drm_sched_entity_do_release(struct drm_gpu_scheduler *sched, ...@@ -294,7 +294,7 @@ long drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
return ret; return ret;
} }
EXPORT_SYMBOL(drm_sched_entity_do_release); EXPORT_SYMBOL(drm_sched_entity_flush);
/** /**
* drm_sched_entity_cleanup - Destroy a context entity * drm_sched_entity_cleanup - Destroy a context entity
...@@ -306,7 +306,7 @@ EXPORT_SYMBOL(drm_sched_entity_do_release); ...@@ -306,7 +306,7 @@ EXPORT_SYMBOL(drm_sched_entity_do_release);
* entity and signals all jobs with an error code if the process was killed. * entity and signals all jobs with an error code if the process was killed.
* *
*/ */
void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched, void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity) struct drm_sched_entity *entity)
{ {
...@@ -357,7 +357,7 @@ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched, ...@@ -357,7 +357,7 @@ void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
dma_fence_put(entity->last_scheduled); dma_fence_put(entity->last_scheduled);
entity->last_scheduled = NULL; entity->last_scheduled = NULL;
} }
EXPORT_SYMBOL(drm_sched_entity_cleanup); EXPORT_SYMBOL(drm_sched_entity_fini);
/** /**
* drm_sched_entity_fini - Destroy a context entity * drm_sched_entity_fini - Destroy a context entity
...@@ -367,13 +367,13 @@ EXPORT_SYMBOL(drm_sched_entity_cleanup); ...@@ -367,13 +367,13 @@ EXPORT_SYMBOL(drm_sched_entity_cleanup);
* *
* Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup() * Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
*/ */
void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity) struct drm_sched_entity *entity)
{ {
drm_sched_entity_do_release(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
drm_sched_entity_cleanup(sched, entity); drm_sched_entity_fini(sched, entity);
} }
EXPORT_SYMBOL(drm_sched_entity_fini); EXPORT_SYMBOL(drm_sched_entity_destroy);
static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb) static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
{ {
......
...@@ -151,7 +151,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file) ...@@ -151,7 +151,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file)
enum v3d_queue q; enum v3d_queue q;
for (q = 0; q < V3D_MAX_QUEUES; q++) { for (q = 0; q < V3D_MAX_QUEUES; q++) {
drm_sched_entity_fini(&v3d->queue[q].sched, drm_sched_entity_destroy(&v3d->queue[q].sched,
&v3d_priv->sched_entity[q]); &v3d_priv->sched_entity[q]);
} }
......
...@@ -284,12 +284,12 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched, ...@@ -284,12 +284,12 @@ int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity, struct drm_sched_entity *entity,
struct drm_sched_rq *rq, struct drm_sched_rq *rq,
atomic_t *guilty); atomic_t *guilty);
long drm_sched_entity_do_release(struct drm_gpu_scheduler *sched, long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity, long timeout); struct drm_sched_entity *entity, long timeout);
void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity);
void drm_sched_entity_fini(struct drm_gpu_scheduler *sched, void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity); struct drm_sched_entity *entity);
void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity);
void drm_sched_entity_push_job(struct drm_sched_job *sched_job, void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
struct drm_sched_entity *entity); struct drm_sched_entity *entity);
void drm_sched_entity_set_rq(struct drm_sched_entity *entity, void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment