Commit c5637837 authored by Monk Liu's avatar Monk Liu Committed by Alex Deucher

drm/amdgpu: keep vm in job instead of ib (v2)

ib.vm is a legacy way to get vm, after scheduler
implemented vm should be get from job, and all ibs
from one job share the same vm, no need to keep ib.vm
just move vm field to job.

this patch as well add job as paramter to ib_schedule
so it can get vm from job->vm.

v2: agd: sqaush in:
drm/amdgpu: check if ring emit_vm_flush exists in vm flush

No vm flush on engines that don't support VM.

bug:
https://bugs.freedesktop.org/show_bug.cgi?id=95195Signed-off-by: default avatarMonk Liu <Monk.Liu@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 1f207f81
......@@ -740,7 +740,6 @@ struct amdgpu_ib {
uint64_t gpu_addr;
uint32_t *ptr;
struct amdgpu_user_fence *user;
struct amdgpu_vm *vm;
unsigned vm_id;
uint64_t vm_pd_addr;
struct amdgpu_ctx *ctx;
......@@ -763,7 +762,7 @@ enum amdgpu_ring_type {
extern const struct amd_sched_backend_ops amdgpu_sched_ops;
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job);
struct amdgpu_job **job, struct amdgpu_vm *vm);
int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
struct amdgpu_job **job);
......@@ -1191,7 +1190,7 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fence *f);
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ib, struct fence *last_vm_update,
struct fence **f);
struct amdgpu_job *job, struct fence **f);
int amdgpu_ib_pool_init(struct amdgpu_device *adev);
void amdgpu_ib_pool_fini(struct amdgpu_device *adev);
int amdgpu_ib_ring_tests(struct amdgpu_device *adev);
......@@ -1247,6 +1246,7 @@ struct amdgpu_cs_parser {
struct amdgpu_job {
struct amd_sched_job base;
struct amdgpu_device *adev;
struct amdgpu_vm *vm;
struct amdgpu_ring *ring;
struct amdgpu_sync sync;
struct amdgpu_ib *ibs;
......
......@@ -120,6 +120,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_vm *vm = &fpriv->vm;
union drm_amdgpu_cs *cs = data;
uint64_t *chunk_array_user;
uint64_t *chunk_array;
......@@ -214,7 +215,7 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
}
}
ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job);
ret = amdgpu_job_alloc(p->adev, num_ibs, &p->job, vm);
if (ret)
goto free_all_kdata;
......
......@@ -74,7 +74,6 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm,
ib->gpu_addr = amdgpu_sa_bo_gpu_addr(ib->sa_bo);
}
ib->vm = vm;
ib->vm_id = 0;
return 0;
......@@ -117,13 +116,13 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct fen
*/
int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
struct amdgpu_ib *ibs, struct fence *last_vm_update,
struct fence **f)
struct amdgpu_job *job, struct fence **f)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib *ib = &ibs[0];
struct amdgpu_ctx *ctx, *old_ctx;
struct amdgpu_vm *vm;
struct fence *hwf;
struct amdgpu_vm *vm = NULL;
unsigned i, patch_offset = ~0;
int r = 0;
......@@ -132,7 +131,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return -EINVAL;
ctx = ibs->ctx;
vm = ibs->vm;
if (job) /* for domain0 job like ring test, ibs->job is not assigned */
vm = job->vm;
if (!ring->ready) {
dev_err(adev->dev, "couldn't schedule ib\n");
......@@ -174,14 +174,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
old_ctx = ring->current_ctx;
for (i = 0; i < num_ibs; ++i) {
ib = &ibs[i];
if (ib->ctx != ctx || ib->vm != vm) {
ring->current_ctx = old_ctx;
if (ib->vm_id)
amdgpu_vm_reset_id(adev, ib->vm_id);
amdgpu_ring_undo(ring);
return -EINVAL;
}
amdgpu_ring_emit_ib(ring, ib);
ring->current_ctx = ctx;
}
......
......@@ -46,7 +46,7 @@ void amdgpu_job_timeout_func(struct work_struct *work)
}
int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
struct amdgpu_job **job)
struct amdgpu_job **job, struct amdgpu_vm *vm)
{
size_t size = sizeof(struct amdgpu_job);
......@@ -60,6 +60,7 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
return -ENOMEM;
(*job)->adev = adev;
(*job)->vm = vm;
(*job)->ibs = (void *)&(*job)[1];
(*job)->num_ibs = num_ibs;
INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler);
......@@ -74,7 +75,7 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
{
int r;
r = amdgpu_job_alloc(adev, 1, job);
r = amdgpu_job_alloc(adev, 1, job, NULL);
if (r)
return r;
......@@ -138,7 +139,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring,
static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job)
{
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->ibs->vm;
struct amdgpu_vm *vm = job->vm;
struct fence *fence = amdgpu_sync_get_fence(&job->sync);
......@@ -186,7 +187,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job)
trace_amdgpu_sched_run_job(job);
r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs,
job->sync.last_vm_update, &fence);
job->sync.last_vm_update, job, &fence);
if (r) {
DRM_ERROR("Error scheduling IBs (%d)\n", r);
goto err;
......
......@@ -910,7 +910,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
ib->length_dw = 16;
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
job->fence = f;
if (r)
goto err_free;
......
......@@ -436,7 +436,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
for (i = ib->length_dw; i < ib_size_dw; ++i)
ib->ptr[i] = 0x0;
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
job->fence = f;
if (r)
goto err;
......@@ -498,7 +498,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
ib->ptr[i] = 0x0;
if (direct) {
r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f);
r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f);
job->fence = f;
if (r)
goto err;
......
......@@ -303,7 +303,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring,
pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed))
amdgpu_ring_emit_pipeline_sync(ring);
if (pd_addr != AMDGPU_VM_NO_FLUSH) {
if (ring->funcs->emit_vm_flush &&
pd_addr != AMDGPU_VM_NO_FLUSH) {
struct fence *fence;
trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id);
......
......@@ -643,7 +643,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[3] = 1;
ib.ptr[4] = 0xDEADBEEF;
ib.length_dw = 5;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r)
goto err1;
......
......@@ -2136,7 +2136,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r)
goto err2;
......
......@@ -800,7 +800,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[2] = 0xDEADBEEF;
ib.length_dw = 3;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r)
goto err2;
......@@ -1551,7 +1551,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4);
/* shedule the ib on the ring */
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r) {
DRM_ERROR("amdgpu: ib submit failed (%d).\n", r);
goto fail;
......
......@@ -701,7 +701,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r)
goto err1;
......
......@@ -925,7 +925,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring)
ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP);
ib.length_dw = 8;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f);
if (r)
goto err1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment