Commit e5eaa7cc authored by Philip Yang's avatar Philip Yang Committed by Alex Deucher

drm/amdgpu: Prepare for hmm_range_register API change (v2)

An upcoming change in the hmm_range_register API requires passing in
a pointer to an hmm_mirror instead of mm_struct. To access the
hmm_mirror we need pass bo instead of ttm to amdgpu_ttm_tt_get_user_pages
because mirror is part of amdgpu_mn structure, which is accessible from bo.

v2: fix building without CONFIG_HMM_MIRROR (Arnd)
Signed-off-by: default avatarPhilip Yang <Philip.Yang@amd.com>
Signed-off-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Reviewed-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 3e2bb60a
...@@ -504,7 +504,7 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm, ...@@ -504,7 +504,7 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
goto out; goto out;
} }
ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, bo->tbo.ttm->pages); ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
if (ret) { if (ret) {
pr_err("%s: Failed to get user pages: %d\n", __func__, ret); pr_err("%s: Failed to get user pages: %d\n", __func__, ret);
goto unregister_out; goto unregister_out;
...@@ -1729,8 +1729,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, ...@@ -1729,8 +1729,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
bo = mem->bo; bo = mem->bo;
/* Get updated user pages */ /* Get updated user pages */
ret = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
bo->tbo.ttm->pages);
if (ret) { if (ret) {
pr_debug("%s: Failed to get user pages: %d\n", pr_debug("%s: Failed to get user pages: %d\n",
__func__, ret); __func__, ret);
......
...@@ -633,7 +633,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, ...@@ -633,7 +633,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
return -ENOMEM; return -ENOMEM;
} }
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, e->user_pages); r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages);
if (r) { if (r) {
kvfree(e->user_pages); kvfree(e->user_pages);
e->user_pages = NULL; e->user_pages = NULL;
......
...@@ -327,8 +327,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, ...@@ -327,8 +327,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
} }
if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) {
r = amdgpu_ttm_tt_get_user_pages(bo->tbo.ttm, r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages);
bo->tbo.ttm->pages);
if (r) if (r)
goto release_object; goto release_object;
......
...@@ -45,48 +45,11 @@ ...@@ -45,48 +45,11 @@
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/hmm.h>
#include <linux/interval_tree.h>
#include <drm/drm.h> #include <drm/drm.h>
#include "amdgpu.h" #include "amdgpu.h"
#include "amdgpu_amdkfd.h" #include "amdgpu_amdkfd.h"
/**
* struct amdgpu_mn
*
* @adev: amdgpu device pointer
* @mm: process address space
* @type: type of MMU notifier
* @work: destruction work item
* @node: hash table node to find structure by adev and mn
* @lock: rw semaphore protecting the notifier nodes
* @objects: interval tree containing amdgpu_mn_nodes
* @mirror: HMM mirror function support
*
* Data for each amdgpu device and process address space.
*/
struct amdgpu_mn {
/* constant after initialisation */
struct amdgpu_device *adev;
struct mm_struct *mm;
enum amdgpu_mn_type type;
/* only used on destruction */
struct work_struct work;
/* protected by adev->mn_lock */
struct hlist_node node;
/* objects protected by lock */
struct rw_semaphore lock;
struct rb_root_cached objects;
/* HMM mirror */
struct hmm_mirror mirror;
};
/** /**
* struct amdgpu_mn_node * struct amdgpu_mn_node
* *
......
...@@ -24,17 +24,53 @@ ...@@ -24,17 +24,53 @@
#ifndef __AMDGPU_MN_H__ #ifndef __AMDGPU_MN_H__
#define __AMDGPU_MN_H__ #define __AMDGPU_MN_H__
/* #include <linux/types.h>
* HMM mirror #include <linux/hmm.h>
*/ #include <linux/rwsem.h>
struct amdgpu_mn; #include <linux/workqueue.h>
struct hmm_range; #include <linux/interval_tree.h>
enum amdgpu_mn_type { enum amdgpu_mn_type {
AMDGPU_MN_TYPE_GFX, AMDGPU_MN_TYPE_GFX,
AMDGPU_MN_TYPE_HSA, AMDGPU_MN_TYPE_HSA,
}; };
/**
* struct amdgpu_mn
*
* @adev: amdgpu device pointer
* @mm: process address space
* @type: type of MMU notifier
* @work: destruction work item
* @node: hash table node to find structure by adev and mn
* @lock: rw semaphore protecting the notifier nodes
* @objects: interval tree containing amdgpu_mn_nodes
* @mirror: HMM mirror function support
*
* Data for each amdgpu device and process address space.
*/
struct amdgpu_mn {
/* constant after initialisation */
struct amdgpu_device *adev;
struct mm_struct *mm;
enum amdgpu_mn_type type;
/* only used on destruction */
struct work_struct work;
/* protected by adev->mn_lock */
struct hlist_node node;
/* objects protected by lock */
struct rw_semaphore lock;
struct rb_root_cached objects;
#ifdef CONFIG_HMM_MIRROR
/* HMM mirror */
struct hmm_mirror mirror;
#endif
};
#if defined(CONFIG_HMM_MIRROR) #if defined(CONFIG_HMM_MIRROR)
void amdgpu_mn_lock(struct amdgpu_mn *mn); void amdgpu_mn_lock(struct amdgpu_mn *mn);
void amdgpu_mn_unlock(struct amdgpu_mn *mn); void amdgpu_mn_unlock(struct amdgpu_mn *mn);
......
...@@ -731,8 +731,10 @@ struct amdgpu_ttm_tt { ...@@ -731,8 +731,10 @@ struct amdgpu_ttm_tt {
#define MAX_RETRY_HMM_RANGE_FAULT 16 #define MAX_RETRY_HMM_RANGE_FAULT 16
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
{ {
struct hmm_mirror *mirror = bo->mn ? &bo->mn->mirror : NULL;
struct ttm_tt *ttm = bo->tbo.ttm;
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct mm_struct *mm = gtt->usertask->mm; struct mm_struct *mm = gtt->usertask->mm;
unsigned long start = gtt->userptr; unsigned long start = gtt->userptr;
...@@ -746,6 +748,12 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) ...@@ -746,6 +748,12 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
if (!mm) /* Happens during process shutdown */ if (!mm) /* Happens during process shutdown */
return -ESRCH; return -ESRCH;
if (unlikely(!mirror)) {
DRM_DEBUG_DRIVER("Failed to get hmm_mirror\n");
r = -EFAULT;
goto out;
}
vma = find_vma(mm, start); vma = find_vma(mm, start);
if (unlikely(!vma || start < vma->vm_start)) { if (unlikely(!vma || start < vma->vm_start)) {
r = -EFAULT; r = -EFAULT;
......
...@@ -102,10 +102,11 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo); ...@@ -102,10 +102,11 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo);
int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages); int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages);
bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm); bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm);
#else #else
static inline int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
struct page **pages)
{ {
return -EPERM; return -EPERM;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment