Commit cc325d19 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: check userptrs mm earlier

Instead of when we try to bind it check the usermm when
we try to use it in the IOCTLs.
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 04db4caf
...@@ -2364,7 +2364,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain); ...@@ -2364,7 +2364,7 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *rbo, u32 domain);
bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags); uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm); struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end); unsigned long end);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm); bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
......
...@@ -93,6 +93,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, ...@@ -93,6 +93,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
bool has_userptr = false; bool has_userptr = false;
unsigned i; unsigned i;
int r;
array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry)); array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
if (!array) if (!array)
...@@ -102,17 +103,26 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, ...@@ -102,17 +103,26 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
for (i = 0; i < num_entries; ++i) { for (i = 0; i < num_entries; ++i) {
struct amdgpu_bo_list_entry *entry = &array[i]; struct amdgpu_bo_list_entry *entry = &array[i];
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
struct mm_struct *usermm;
gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle); gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle);
if (!gobj) if (!gobj) {
r = -ENOENT;
goto error_free; goto error_free;
}
entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
entry->priority = min(info[i].bo_priority, entry->priority = min(info[i].bo_priority,
AMDGPU_BO_LIST_MAX_PRIORITY); AMDGPU_BO_LIST_MAX_PRIORITY);
if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm)) usermm = amdgpu_ttm_tt_get_usermm(entry->robj->tbo.ttm);
if (usermm) {
if (usermm != current->mm) {
r = -EPERM;
goto error_free;
}
has_userptr = true; has_userptr = true;
}
entry->tv.bo = &entry->robj->tbo; entry->tv.bo = &entry->robj->tbo;
entry->tv.shared = true; entry->tv.shared = true;
...@@ -142,7 +152,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, ...@@ -142,7 +152,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
error_free: error_free:
drm_free_large(array); drm_free_large(array);
return -ENOENT; return r;
} }
struct amdgpu_bo_list * struct amdgpu_bo_list *
......
...@@ -101,7 +101,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, ...@@ -101,7 +101,7 @@ static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf.offset = fence_data->offset; p->uf.offset = fence_data->offset;
if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) { if (amdgpu_ttm_tt_get_usermm(p->uf.bo->tbo.ttm)) {
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
return -EINVAL; return -EINVAL;
} }
...@@ -296,8 +296,13 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, ...@@ -296,8 +296,13 @@ int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
list_for_each_entry(lobj, validated, tv.head) { list_for_each_entry(lobj, validated, tv.head) {
struct amdgpu_bo *bo = lobj->robj; struct amdgpu_bo *bo = lobj->robj;
struct mm_struct *usermm;
uint32_t domain; uint32_t domain;
usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
if (usermm && usermm != current->mm)
return -EPERM;
if (bo->pin_count) if (bo->pin_count)
continue; continue;
......
...@@ -310,7 +310,7 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp, ...@@ -310,7 +310,7 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp,
return -ENOENT; return -ENOENT;
} }
robj = gem_to_amdgpu_bo(gobj); robj = gem_to_amdgpu_bo(gobj);
if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm) || if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
(robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
return -EPERM; return -EPERM;
...@@ -638,7 +638,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, ...@@ -638,7 +638,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
break; break;
} }
case AMDGPU_GEM_OP_SET_PLACEMENT: case AMDGPU_GEM_OP_SET_PLACEMENT:
if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) { if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
r = -EPERM; r = -EPERM;
amdgpu_bo_unreserve(robj); amdgpu_bo_unreserve(robj);
break; break;
......
...@@ -370,7 +370,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, ...@@ -370,7 +370,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
int r, i; int r, i;
unsigned fpfn, lpfn; unsigned fpfn, lpfn;
if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm)) if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
return -EPERM; return -EPERM;
if (WARN_ON_ONCE(min_offset > max_offset)) if (WARN_ON_ONCE(min_offset > max_offset))
......
...@@ -121,7 +121,7 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, ...@@ -121,7 +121,7 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
{ {
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm)) if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
return ERR_PTR(-EPERM); return ERR_PTR(-EPERM);
return drm_gem_prime_export(dev, gobj, flags); return drm_gem_prime_export(dev, gobj, flags);
......
...@@ -499,9 +499,6 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) ...@@ -499,9 +499,6 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
enum dma_data_direction direction = write ? enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE; DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
if (current->mm != gtt->usermm)
return -EPERM;
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) { if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
/* check that we only pin down anonymous memory /* check that we only pin down anonymous memory
to prevent problems with writeback */ to prevent problems with writeback */
...@@ -773,14 +770,14 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr, ...@@ -773,14 +770,14 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
return 0; return 0;
} }
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm) struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
{ {
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
if (gtt == NULL) if (gtt == NULL)
return false; return NULL;
return !!gtt->userptr; return gtt->usermm;
} }
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment