Commit 91acbeb6 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: fix user fence handling

This fixes a random corruption under memory pressure. We need to fence
the BO for the user fence as well, otherwise it might be swapped out
and the GPU could write the fence value to an undesired location.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Cc: stable@vger.kernel.org
parent 4655a12b
......@@ -1265,6 +1265,7 @@ struct amdgpu_cs_parser {
/* user fence */
struct amdgpu_user_fence uf;
struct amdgpu_bo_list_entry uf_entry;
};
struct amdgpu_job {
......
......@@ -127,6 +127,37 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
return 0;
}
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
struct drm_amdgpu_cs_chunk_fence *fence_data)
{
struct drm_gem_object *gobj;
uint32_t handle;
handle = fence_data->handle;
gobj = drm_gem_object_lookup(p->adev->ddev, p->filp,
fence_data->handle);
if (gobj == NULL)
return -EINVAL;
p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf.offset = fence_data->offset;
if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) {
drm_gem_object_unreference_unlocked(gobj);
return -EINVAL;
}
p->uf_entry.robj = amdgpu_bo_ref(p->uf.bo);
p->uf_entry.prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
p->uf_entry.allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
p->uf_entry.priority = 0;
p->uf_entry.tv.bo = &p->uf_entry.robj->tbo;
p->uf_entry.tv.shared = true;
drm_gem_object_unreference_unlocked(gobj);
return 0;
}
int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
{
union drm_amdgpu_cs *cs = data;
......@@ -207,28 +238,15 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
case AMDGPU_CHUNK_ID_FENCE:
size = sizeof(struct drm_amdgpu_cs_chunk_fence);
if (p->chunks[i].length_dw * sizeof(uint32_t) >= size) {
uint32_t handle;
struct drm_gem_object *gobj;
struct drm_amdgpu_cs_chunk_fence *fence_data;
fence_data = (void *)p->chunks[i].kdata;
handle = fence_data->handle;
gobj = drm_gem_object_lookup(p->adev->ddev,
p->filp, handle);
if (gobj == NULL) {
if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
ret = -EINVAL;
goto free_partial_kdata;
}
p->uf.bo = gem_to_amdgpu_bo(gobj);
amdgpu_bo_ref(p->uf.bo);
drm_gem_object_unreference_unlocked(gobj);
p->uf.offset = fence_data->offset;
} else {
ret = -EINVAL;
ret = amdgpu_cs_user_fence_chunk(p, (void *)p->chunks[i].kdata);
if (ret)
goto free_partial_kdata;
}
break;
case AMDGPU_CHUNK_ID_DEPENDENCIES:
......@@ -391,6 +409,9 @@ static int amdgpu_cs_parser_relocs(struct amdgpu_cs_parser *p)
p->vm_bos = amdgpu_vm_get_bos(p->adev, &fpriv->vm,
&p->validated);
if (p->uf.bo)
list_add(&p->uf_entry.tv.head, &p->validated);
if (need_mmap_lock)
down_read(&current->mm->mmap_sem);
......@@ -488,8 +509,8 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bo
for (i = 0; i < parser->num_ibs; i++)
amdgpu_ib_free(parser->adev, &parser->ibs[i]);
kfree(parser->ibs);
if (parser->uf.bo)
amdgpu_bo_unref(&parser->uf.bo);
amdgpu_bo_unref(&parser->uf_entry.robj);
}
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment