Commit 6f0308eb authored by Dave Airlie's avatar Dave Airlie Committed by Alex Deucher

amdgpu/cs: split out fence dependency checking (v2)

This just splits out the fence depenency checking into it's
own function to make it easier to add semaphore dependencies.

v2: rebase onto other changes.

v1-Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 64dab074
...@@ -923,53 +923,44 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, ...@@ -923,53 +923,44 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
return 0; return 0;
} }
static int amdgpu_cs_dependencies(struct amdgpu_device *adev, static int amdgpu_cs_process_fence_dep(struct amdgpu_cs_parser *p,
struct amdgpu_cs_parser *p) struct amdgpu_cs_chunk *chunk)
{ {
struct amdgpu_fpriv *fpriv = p->filp->driver_priv; struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
int i, j, r;
for (i = 0; i < p->nchunks; ++i) {
struct drm_amdgpu_cs_chunk_dep *deps;
struct amdgpu_cs_chunk *chunk;
unsigned num_deps; unsigned num_deps;
int i, r;
chunk = &p->chunks[i]; struct drm_amdgpu_cs_chunk_dep *deps;
if (chunk->chunk_id != AMDGPU_CHUNK_ID_DEPENDENCIES)
continue;
deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata; deps = (struct drm_amdgpu_cs_chunk_dep *)chunk->kdata;
num_deps = chunk->length_dw * 4 / num_deps = chunk->length_dw * 4 /
sizeof(struct drm_amdgpu_cs_chunk_dep); sizeof(struct drm_amdgpu_cs_chunk_dep);
for (j = 0; j < num_deps; ++j) { for (i = 0; i < num_deps; ++i) {
struct amdgpu_ring *ring; struct amdgpu_ring *ring;
struct amdgpu_ctx *ctx; struct amdgpu_ctx *ctx;
struct dma_fence *fence; struct dma_fence *fence;
ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id); ctx = amdgpu_ctx_get(fpriv, deps[i].ctx_id);
if (ctx == NULL) if (ctx == NULL)
return -EINVAL; return -EINVAL;
r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, r = amdgpu_queue_mgr_map(p->adev, &ctx->queue_mgr,
deps[j].ip_type, deps[i].ip_type,
deps[j].ip_instance, deps[i].ip_instance,
deps[j].ring, &ring); deps[i].ring, &ring);
if (r) { if (r) {
amdgpu_ctx_put(ctx); amdgpu_ctx_put(ctx);
return r; return r;
} }
fence = amdgpu_ctx_get_fence(ctx, ring, fence = amdgpu_ctx_get_fence(ctx, ring,
deps[j].handle); deps[i].handle);
if (IS_ERR(fence)) { if (IS_ERR(fence)) {
r = PTR_ERR(fence); r = PTR_ERR(fence);
amdgpu_ctx_put(ctx); amdgpu_ctx_put(ctx);
return r; return r;
} else if (fence) { } else if (fence) {
r = amdgpu_sync_fence(adev, &p->job->sync, r = amdgpu_sync_fence(p->adev, &p->job->sync,
fence); fence);
dma_fence_put(fence); dma_fence_put(fence);
amdgpu_ctx_put(ctx); amdgpu_ctx_put(ctx);
...@@ -977,6 +968,24 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev, ...@@ -977,6 +968,24 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
return r; return r;
} }
} }
return 0;
}
static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
struct amdgpu_cs_parser *p)
{
int i, r;
for (i = 0; i < p->nchunks; ++i) {
struct amdgpu_cs_chunk *chunk;
chunk = &p->chunks[i];
if (chunk->chunk_id == AMDGPU_CHUNK_ID_DEPENDENCIES) {
r = amdgpu_cs_process_fence_dep(p, chunk);
if (r)
return r;
}
} }
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment