Commit 4458da0b authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/amdgpu: fix userptr HMM range handling v2

The basic problem here is that it's not allowed to page fault while
holding the reservation lock.

So it can happen that multiple processes try to validate an userptr
at the same time.

Work around that by putting the HMM range object into the mutex
protected bo list for now.

v2: make sure range is set to NULL in case of an error
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
CC: stable@vger.kernel.org
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent b39df63b
...@@ -986,6 +986,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, ...@@ -986,6 +986,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
struct amdkfd_process_info *process_info = mem->process_info; struct amdkfd_process_info *process_info = mem->process_info;
struct amdgpu_bo *bo = mem->bo; struct amdgpu_bo *bo = mem->bo;
struct ttm_operation_ctx ctx = { true, false }; struct ttm_operation_ctx ctx = { true, false };
struct hmm_range *range;
int ret = 0; int ret = 0;
mutex_lock(&process_info->lock); mutex_lock(&process_info->lock);
...@@ -1015,7 +1016,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, ...@@ -1015,7 +1016,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
return 0; return 0;
} }
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range);
if (ret) { if (ret) {
pr_err("%s: Failed to get user pages: %d\n", __func__, ret); pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
goto unregister_out; goto unregister_out;
...@@ -1033,7 +1034,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, ...@@ -1033,7 +1034,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr,
amdgpu_bo_unreserve(bo); amdgpu_bo_unreserve(bo);
release_out: release_out:
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
unregister_out: unregister_out:
if (ret) if (ret)
amdgpu_mn_unregister(bo); amdgpu_mn_unregister(bo);
...@@ -2370,6 +2371,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, ...@@ -2370,6 +2371,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
/* Go through userptr_inval_list and update any invalid user_pages */ /* Go through userptr_inval_list and update any invalid user_pages */
list_for_each_entry(mem, &process_info->userptr_inval_list, list_for_each_entry(mem, &process_info->userptr_inval_list,
validate_list.head) { validate_list.head) {
struct hmm_range *range;
invalid = atomic_read(&mem->invalid); invalid = atomic_read(&mem->invalid);
if (!invalid) if (!invalid)
/* BO hasn't been invalidated since the last /* BO hasn't been invalidated since the last
...@@ -2380,7 +2383,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, ...@@ -2380,7 +2383,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
bo = mem->bo; bo = mem->bo;
/* Get updated user pages */ /* Get updated user pages */
ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
&range);
if (ret) { if (ret) {
pr_debug("Failed %d to get user pages\n", ret); pr_debug("Failed %d to get user pages\n", ret);
...@@ -2399,7 +2403,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, ...@@ -2399,7 +2403,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
* FIXME: Cannot ignore the return code, must hold * FIXME: Cannot ignore the return code, must hold
* notifier_lock * notifier_lock
*/ */
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
} }
/* Mark the BO as valid unless it was invalidated /* Mark the BO as valid unless it was invalidated
......
...@@ -209,6 +209,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, ...@@ -209,6 +209,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
list_add_tail(&e->tv.head, &bucket[priority]); list_add_tail(&e->tv.head, &bucket[priority]);
e->user_pages = NULL; e->user_pages = NULL;
e->range = NULL;
} }
/* Connect the sorted buckets in the output list. */ /* Connect the sorted buckets in the output list. */
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#include <drm/ttm/ttm_execbuf_util.h> #include <drm/ttm/ttm_execbuf_util.h>
#include <drm/amdgpu_drm.h> #include <drm/amdgpu_drm.h>
struct hmm_range;
struct amdgpu_device; struct amdgpu_device;
struct amdgpu_bo; struct amdgpu_bo;
struct amdgpu_bo_va; struct amdgpu_bo_va;
...@@ -36,6 +38,7 @@ struct amdgpu_bo_list_entry { ...@@ -36,6 +38,7 @@ struct amdgpu_bo_list_entry {
struct amdgpu_bo_va *bo_va; struct amdgpu_bo_va *bo_va;
uint32_t priority; uint32_t priority;
struct page **user_pages; struct page **user_pages;
struct hmm_range *range;
bool user_invalidated; bool user_invalidated;
}; };
......
...@@ -913,7 +913,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -913,7 +913,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto out_free_user_pages; goto out_free_user_pages;
} }
r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages); r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range);
if (r) { if (r) {
kvfree(e->user_pages); kvfree(e->user_pages);
e->user_pages = NULL; e->user_pages = NULL;
...@@ -991,9 +991,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -991,9 +991,10 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
if (!e->user_pages) if (!e->user_pages)
continue; continue;
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
kvfree(e->user_pages); kvfree(e->user_pages);
e->user_pages = NULL; e->user_pages = NULL;
e->range = NULL;
} }
mutex_unlock(&p->bo_list->bo_list_mutex); mutex_unlock(&p->bo_list->bo_list_mutex);
return r; return r;
...@@ -1273,7 +1274,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, ...@@ -1273,7 +1274,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range);
e->range = NULL;
} }
if (r) { if (r) {
r = -EAGAIN; r = -EAGAIN;
......
...@@ -378,6 +378,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, ...@@ -378,6 +378,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_device *adev = drm_to_adev(dev);
struct drm_amdgpu_gem_userptr *args = data; struct drm_amdgpu_gem_userptr *args = data;
struct drm_gem_object *gobj; struct drm_gem_object *gobj;
struct hmm_range *range;
struct amdgpu_bo *bo; struct amdgpu_bo *bo;
uint32_t handle; uint32_t handle;
int r; int r;
...@@ -418,7 +419,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, ...@@ -418,7 +419,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
goto release_object; goto release_object;
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages,
&range);
if (r) if (r)
goto release_object; goto release_object;
...@@ -441,7 +443,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, ...@@ -441,7 +443,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
user_pages_done: user_pages_done:
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE)
amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range);
release_object: release_object:
drm_gem_object_put(gobj); drm_gem_object_put(gobj);
......
...@@ -643,9 +643,6 @@ struct amdgpu_ttm_tt { ...@@ -643,9 +643,6 @@ struct amdgpu_ttm_tt {
struct task_struct *usertask; struct task_struct *usertask;
uint32_t userflags; uint32_t userflags;
bool bound; bool bound;
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
struct hmm_range *range;
#endif
}; };
#define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm) #define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm)
...@@ -658,7 +655,8 @@ struct amdgpu_ttm_tt { ...@@ -658,7 +655,8 @@ struct amdgpu_ttm_tt {
* Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only
* once afterwards to stop HMM tracking * once afterwards to stop HMM tracking
*/ */
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
struct hmm_range **range)
{ {
struct ttm_tt *ttm = bo->tbo.ttm; struct ttm_tt *ttm = bo->tbo.ttm;
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
...@@ -668,16 +666,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) ...@@ -668,16 +666,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
bool readonly; bool readonly;
int r = 0; int r = 0;
/* Make sure get_user_pages_done() can cleanup gracefully */
*range = NULL;
mm = bo->notifier.mm; mm = bo->notifier.mm;
if (unlikely(!mm)) { if (unlikely(!mm)) {
DRM_DEBUG_DRIVER("BO is not registered?\n"); DRM_DEBUG_DRIVER("BO is not registered?\n");
return -EFAULT; return -EFAULT;
} }
/* Another get_user_pages is running at the same time?? */
if (WARN_ON(gtt->range))
return -EFAULT;
if (!mmget_not_zero(mm)) /* Happens during process shutdown */ if (!mmget_not_zero(mm)) /* Happens during process shutdown */
return -ESRCH; return -ESRCH;
...@@ -695,7 +692,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) ...@@ -695,7 +692,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
readonly = amdgpu_ttm_tt_is_readonly(ttm); readonly = amdgpu_ttm_tt_is_readonly(ttm);
r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start, r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
ttm->num_pages, &gtt->range, readonly, ttm->num_pages, range, readonly,
true, NULL); true, NULL);
out_unlock: out_unlock:
mmap_read_unlock(mm); mmap_read_unlock(mm);
...@@ -713,30 +710,24 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) ...@@ -713,30 +710,24 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
* *
* Returns: true if pages are still valid * Returns: true if pages are still valid
*/ */
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
struct hmm_range *range)
{ {
struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm);
bool r = false;
if (!gtt || !gtt->userptr) if (!gtt || !gtt->userptr || !range)
return false; return false;
DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n", DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n",
gtt->userptr, ttm->num_pages); gtt->userptr, ttm->num_pages);
WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns, WARN_ONCE(!range->hmm_pfns, "No user pages to check\n");
"No user pages to check\n");
if (gtt->range) { /*
/* * FIXME: Must always hold notifier_lock for this, and must
* FIXME: Must always hold notifier_lock for this, and must * not ignore the return code.
* not ignore the return code. */
*/ return !amdgpu_hmm_range_get_pages_done(range);
r = amdgpu_hmm_range_get_pages_done(gtt->range);
gtt->range = NULL;
}
return !r;
} }
#endif #endif
...@@ -813,20 +804,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev, ...@@ -813,20 +804,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
/* unmap the pages mapped to the device */ /* unmap the pages mapped to the device */
dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0);
sg_free_table(ttm->sg); sg_free_table(ttm->sg);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
if (gtt->range) {
unsigned long i;
for (i = 0; i < ttm->num_pages; i++) {
if (ttm->pages[i] !=
hmm_pfn_to_page(gtt->range->hmm_pfns[i]))
break;
}
WARN((i == ttm->num_pages), "Missing get_user_page_done\n");
}
#endif
} }
static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev, static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
......
...@@ -39,6 +39,8 @@ ...@@ -39,6 +39,8 @@
#define AMDGPU_POISON 0xd0bed0be #define AMDGPU_POISON 0xd0bed0be
struct hmm_range;
struct amdgpu_gtt_mgr { struct amdgpu_gtt_mgr {
struct ttm_resource_manager manager; struct ttm_resource_manager manager;
struct drm_mm mm; struct drm_mm mm;
...@@ -149,15 +151,19 @@ void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); ...@@ -149,15 +151,19 @@ void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type); uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages); int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages,
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm); struct hmm_range **range);
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
struct hmm_range *range);
#else #else
static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
struct page **pages) struct page **pages,
struct hmm_range **range)
{ {
return -EPERM; return -EPERM;
} }
static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm,
struct hmm_range *range)
{ {
return false; return false;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment