Commit 47ca7efa authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: allow direct submission in the VM backends v2

This allows us to update page tables directly while in a page fault.

v2: use direct/delayed entities and still wait for moves
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent a2cf3247
......@@ -201,6 +201,11 @@ struct amdgpu_vm_update_params {
*/
struct amdgpu_vm *vm;
/**
* @direct: if changes should be made directly
*/
bool direct;
/**
* @pages_addr:
*
......
......@@ -49,13 +49,6 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
{
int r;
/* Wait for PT BOs to be idle. PTs share the same resv. object
* as the root PD BO
*/
r = amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
if (unlikely(r))
return r;
/* Wait for any BO move to be completed */
if (exclusive) {
r = dma_fence_wait(exclusive, true);
......@@ -63,7 +56,14 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
return r;
}
/* Don't wait for submissions during page fault */
if (p->direct)
return 0;
/* Wait for PT BOs to be idle. PTs share the same resv. object
* as the root PD BO
*/
return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
}
/**
......
......@@ -68,17 +68,19 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
if (r)
return r;
r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
if (r)
return r;
p->num_dw_left = ndw;
r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
owner, false);
/* Wait for moves to be completed */
r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false);
if (r)
return r;
p->num_dw_left = ndw;
/* Don't wait for any submissions during page fault handling */
if (p->direct)
return 0;
return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
owner, false);
}
/**
......@@ -95,23 +97,23 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
{
struct amdgpu_bo *root = p->vm->root.base.bo;
struct amdgpu_ib *ib = p->job->ibs;
struct drm_sched_entity *entity;
struct amdgpu_ring *ring;
struct dma_fence *f;
int r;
ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring,
sched);
entity = p->direct ? &p->vm->direct : &p->vm->delayed;
ring = container_of(entity->rq->sched, struct amdgpu_ring, sched);
WARN_ON(ib->length_dw == 0);
amdgpu_ring_pad_ib(ring, ib);
WARN_ON(ib->length_dw > p->num_dw_left);
r = amdgpu_job_submit(p->job, &p->vm->delayed,
AMDGPU_FENCE_OWNER_VM, &f);
r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f);
if (r)
goto error;
amdgpu_bo_fence(root, f, true);
if (fence)
if (fence && !p->direct)
swap(*fence, f);
dma_fence_put(f);
return 0;
......@@ -121,7 +123,6 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
return r;
}
/**
* amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping
*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment