Commit 5aeaccca authored by Philip Yang's avatar Philip Yang Committed by Alex Deucher

drm/amdgpu: support userptr cross VMAs case with HMM

userptr may cross two VMAs if the forked child process (not call exec
after fork) malloc buffer, then free it, and then malloc larger size
buf, kerenl will create new VMA adjacent to old VMA which was cloned
from parent process, some pages of userptr are in the first VMA, the
rest pages are in the second VMA.

HMM expects range only have one VMA, loop over all VMAs in the address
range, create multiple ranges to handle this case. See
is_mergeable_anon_vma in mm/mmap.c for details.
Signed-off-by: default avatarPhilip Yang <Philip.Yang@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 386a68e7
...@@ -711,7 +711,8 @@ struct amdgpu_ttm_tt { ...@@ -711,7 +711,8 @@ struct amdgpu_ttm_tt {
struct task_struct *usertask; struct task_struct *usertask;
uint32_t userflags; uint32_t userflags;
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
struct hmm_range range; struct hmm_range *ranges;
int nr_ranges;
#endif #endif
}; };
...@@ -723,62 +724,108 @@ struct amdgpu_ttm_tt { ...@@ -723,62 +724,108 @@ struct amdgpu_ttm_tt {
* once afterwards to stop HMM tracking * once afterwards to stop HMM tracking
*/ */
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
/* Support Userptr pages cross max 16 vmas */
#define MAX_NR_VMAS (16)
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages) int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
{ {
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
struct mm_struct *mm = gtt->usertask->mm; struct mm_struct *mm = gtt->usertask->mm;
unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; unsigned long start = gtt->userptr;
struct hmm_range *range = &gtt->range; unsigned long end = start + ttm->num_pages * PAGE_SIZE;
int r = 0, i; struct hmm_range *ranges;
struct vm_area_struct *vma = NULL, *vmas[MAX_NR_VMAS];
uint64_t *pfns, f;
int r = 0, i, nr_pages;
if (!mm) /* Happens during process shutdown */ if (!mm) /* Happens during process shutdown */
return -ESRCH; return -ESRCH;
amdgpu_hmm_init_range(range);
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
range->vma = find_vma(mm, gtt->userptr); /* user pages may cross multiple VMAs */
if (!range_in_vma(range->vma, gtt->userptr, end)) gtt->nr_ranges = 0;
do {
unsigned long vm_start;
if (gtt->nr_ranges >= MAX_NR_VMAS) {
DRM_ERROR("Too many VMAs in userptr range\n");
r = -EFAULT;
goto out;
}
vm_start = vma ? vma->vm_end : start;
vma = find_vma(mm, vm_start);
if (unlikely(!vma || vm_start < vma->vm_start)) {
r = -EFAULT; r = -EFAULT;
else if ((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) && goto out;
range->vma->vm_file) }
vmas[gtt->nr_ranges++] = vma;
} while (end > vma->vm_end);
DRM_DEBUG_DRIVER("0x%lx nr_ranges %d pages 0x%lx\n",
start, gtt->nr_ranges, ttm->num_pages);
if (unlikely((gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) &&
vmas[0]->vm_file)) {
r = -EPERM; r = -EPERM;
if (r)
goto out; goto out;
}
range->pfns = kvmalloc_array(ttm->num_pages, sizeof(uint64_t), ranges = kvmalloc_array(gtt->nr_ranges, sizeof(*ranges), GFP_KERNEL);
GFP_KERNEL); if (unlikely(!ranges)) {
if (range->pfns == NULL) {
r = -ENOMEM; r = -ENOMEM;
goto out; goto out;
} }
range->start = gtt->userptr;
range->end = end;
range->pfns[0] = range->flags[HMM_PFN_VALID]; pfns = kvmalloc_array(ttm->num_pages, sizeof(*pfns), GFP_KERNEL);
range->pfns[0] |= amdgpu_ttm_tt_is_readonly(ttm) ? if (unlikely(!pfns)) {
0 : range->flags[HMM_PFN_WRITE]; r = -ENOMEM;
for (i = 1; i < ttm->num_pages; i++) goto out_free_ranges;
range->pfns[i] = range->pfns[0]; }
for (i = 0; i < gtt->nr_ranges; i++)
amdgpu_hmm_init_range(&ranges[i]);
f = ranges[0].flags[HMM_PFN_VALID];
f |= amdgpu_ttm_tt_is_readonly(ttm) ?
0 : ranges[0].flags[HMM_PFN_WRITE];
memset64(pfns, f, ttm->num_pages);
for (nr_pages = 0, i = 0; i < gtt->nr_ranges; i++) {
ranges[i].vma = vmas[i];
ranges[i].start = max(start, vmas[i]->vm_start);
ranges[i].end = min(end, vmas[i]->vm_end);
ranges[i].pfns = pfns + nr_pages;
nr_pages += (ranges[i].end - ranges[i].start) / PAGE_SIZE;
r = hmm_vma_fault(&ranges[i], true);
if (unlikely(r))
break;
}
if (unlikely(r)) {
while (i--)
hmm_vma_range_done(&ranges[i]);
/* This may trigger page table update */
r = hmm_vma_fault(range, true);
if (r)
goto out_free_pfns; goto out_free_pfns;
}
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
for (i = 0; i < ttm->num_pages; i++) for (i = 0; i < ttm->num_pages; i++)
pages[i] = hmm_pfn_to_page(range, range->pfns[i]); pages[i] = hmm_pfn_to_page(&ranges[0], pfns[i]);
gtt->ranges = ranges;
return 0; return 0;
out_free_pfns: out_free_pfns:
kvfree(range->pfns); kvfree(pfns);
range->pfns = NULL; out_free_ranges:
kvfree(ranges);
out: out:
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return r; return r;
} }
...@@ -792,15 +839,23 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) ...@@ -792,15 +839,23 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
{ {
struct amdgpu_ttm_tt *gtt = (void *)ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm;
bool r = false; bool r = false;
int i;
if (!gtt || !gtt->userptr) if (!gtt || !gtt->userptr)
return false; return false;
WARN_ONCE(!gtt->range.pfns, "No user pages to check\n"); DRM_DEBUG_DRIVER("user_pages_done 0x%llx nr_ranges %d pages 0x%lx\n",
if (gtt->range.pfns) { gtt->userptr, gtt->nr_ranges, ttm->num_pages);
r = hmm_vma_range_done(&gtt->range);
kvfree(gtt->range.pfns); WARN_ONCE(!gtt->ranges || !gtt->ranges[0].pfns,
gtt->range.pfns = NULL; "No user pages to check\n");
if (gtt->ranges) {
for (i = 0; i < gtt->nr_ranges; i++)
r |= hmm_vma_range_done(&gtt->ranges[i]);
kvfree(gtt->ranges[0].pfns);
kvfree(gtt->ranges);
gtt->ranges = NULL;
} }
return r; return r;
...@@ -884,8 +939,9 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm) ...@@ -884,8 +939,9 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
sg_free_table(ttm->sg); sg_free_table(ttm->sg);
#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR)
if (gtt->range.pfns && if (gtt->ranges &&
ttm->pages[0] == hmm_pfn_to_page(&gtt->range, gtt->range.pfns[0])) ttm->pages[0] == hmm_pfn_to_page(&gtt->ranges[0],
gtt->ranges[0].pfns[0]))
WARN_ONCE(1, "Missing get_user_page_done\n"); WARN_ONCE(1, "Missing get_user_page_done\n");
#endif #endif
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment