Commit 04d8d73d authored by Philip Yang's avatar Philip Yang Committed by Alex Deucher

drm/amdgpu: add common HMM get pages function

Move the HMM get pages function from amdgpu_ttm and to amdgpu_mn. This
common function will be used by new svm APIs.
Signed-off-by: default avatarPhilip Yang <Philip.Yang@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent c5e2e478
......@@ -155,3 +155,86 @@ void amdgpu_mn_unregister(struct amdgpu_bo *bo)
mmu_interval_notifier_remove(&bo->notifier);
bo->notifier.mm = NULL;
}
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
struct mm_struct *mm, struct page **pages,
uint64_t start, uint64_t npages,
struct hmm_range **phmm_range, bool readonly,
bool mmap_locked)
{
struct hmm_range *hmm_range;
unsigned long timeout;
unsigned long i;
unsigned long *pfns;
int r = 0;
hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL);
if (unlikely(!hmm_range))
return -ENOMEM;
pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL);
if (unlikely(!pfns)) {
r = -ENOMEM;
goto out_free_range;
}
hmm_range->notifier = notifier;
hmm_range->default_flags = HMM_PFN_REQ_FAULT;
if (!readonly)
hmm_range->default_flags |= HMM_PFN_REQ_WRITE;
hmm_range->hmm_pfns = pfns;
hmm_range->start = start;
hmm_range->end = start + npages * PAGE_SIZE;
timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
retry:
hmm_range->notifier_seq = mmu_interval_read_begin(notifier);
if (likely(!mmap_locked))
mmap_read_lock(mm);
r = hmm_range_fault(hmm_range);
if (likely(!mmap_locked))
mmap_read_unlock(mm);
if (unlikely(r)) {
/*
* FIXME: This timeout should encompass the retry from
* mmu_interval_read_retry() as well.
*/
if (r == -EBUSY && !time_after(jiffies, timeout))
goto retry;
goto out_free_pfns;
}
/*
* Due to default_flags, all pages are HMM_PFN_VALID or
* hmm_range_fault() fails. FIXME: The pages cannot be touched outside
* the notifier_lock, and mmu_interval_read_retry() must be done first.
*/
for (i = 0; pages && i < npages; i++)
pages[i] = hmm_pfn_to_page(pfns[i]);
*phmm_range = hmm_range;
return 0;
out_free_pfns:
kvfree(pfns);
out_free_range:
kfree(hmm_range);
return r;
}
int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range)
{
int r;
r = mmu_interval_read_retry(hmm_range->notifier,
hmm_range->notifier_seq);
kvfree(hmm_range->hmm_pfns);
kfree(hmm_range);
return r;
}
......@@ -30,6 +30,13 @@
#include <linux/workqueue.h>
#include <linux/interval_tree.h>
int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
struct mm_struct *mm, struct page **pages,
uint64_t start, uint64_t npages,
struct hmm_range **phmm_range, bool readonly,
bool mmap_locked);
int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
#if defined(CONFIG_HMM_MIRROR)
int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr);
void amdgpu_mn_unregister(struct amdgpu_bo *bo);
......
......@@ -32,7 +32,6 @@
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
#include <linux/hmm.h>
#include <linux/pagemap.h>
#include <linux/sched/task.h>
#include <linux/sched/mm.h>
......@@ -663,10 +662,8 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned long start = gtt->userptr;
struct vm_area_struct *vma;
struct hmm_range *range;
unsigned long timeout;
struct mm_struct *mm;
unsigned long i;
bool readonly;
int r = 0;
mm = bo->notifier.mm;
......@@ -682,76 +679,26 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
if (!mmget_not_zero(mm)) /* Happens during process shutdown */
return -ESRCH;
range = kzalloc(sizeof(*range), GFP_KERNEL);
if (unlikely(!range)) {
r = -ENOMEM;
goto out;
}
range->notifier = &bo->notifier;
range->start = bo->notifier.interval_tree.start;
range->end = bo->notifier.interval_tree.last + 1;
range->default_flags = HMM_PFN_REQ_FAULT;
if (!amdgpu_ttm_tt_is_readonly(ttm))
range->default_flags |= HMM_PFN_REQ_WRITE;
range->hmm_pfns = kvmalloc_array(ttm->num_pages,
sizeof(*range->hmm_pfns), GFP_KERNEL);
if (unlikely(!range->hmm_pfns)) {
r = -ENOMEM;
goto out_free_ranges;
}
mmap_read_lock(mm);
vma = find_vma(mm, start);
mmap_read_unlock(mm);
if (unlikely(!vma || start < vma->vm_start)) {
r = -EFAULT;
goto out_unlock;
goto out_putmm;
}
if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
vma->vm_file)) {
r = -EPERM;
goto out_unlock;
}
mmap_read_unlock(mm);
timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
retry:
range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
mmap_read_lock(mm);
r = hmm_range_fault(range);
mmap_read_unlock(mm);
if (unlikely(r)) {
/*
* FIXME: This timeout should encompass the retry from
* mmu_interval_read_retry() as well.
*/
if (r == -EBUSY && !time_after(jiffies, timeout))
goto retry;
goto out_free_pfns;
goto out_putmm;
}
/*
* Due to default_flags, all pages are HMM_PFN_VALID or
* hmm_range_fault() fails. FIXME: The pages cannot be touched outside
* the notifier_lock, and mmu_interval_read_retry() must be done first.
*/
for (i = 0; i < ttm->num_pages; i++)
pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
gtt->range = range;
readonly = amdgpu_ttm_tt_is_readonly(ttm);
r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
ttm->num_pages, &gtt->range, readonly,
false);
out_putmm:
mmput(mm);
return 0;
out_unlock:
mmap_read_unlock(mm);
out_free_pfns:
kvfree(range->hmm_pfns);
out_free_ranges:
kfree(range);
out:
mmput(mm);
return r;
}
......@@ -780,10 +727,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
* FIXME: Must always hold notifier_lock for this, and must
* not ignore the return code.
*/
r = mmu_interval_read_retry(gtt->range->notifier,
gtt->range->notifier_seq);
kvfree(gtt->range->hmm_pfns);
kfree(gtt->range);
r = amdgpu_hmm_range_get_pages_done(gtt->range);
gtt->range = NULL;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment