Commit 7ef6b2d4 authored by Alex Sierra's avatar Alex Sierra Committed by Alex Deucher

drm/amdkfd: remap unaligned svm ranges that have split

Split SVM ranges that have been mapped into 2MB page table entries,
require to be remap in case the split has happened in a non-aligned
VA.
[WHY]:
This condition causes the 2MB page table entries be split into 4KB
PTEs.
Signed-off-by: default avatarAlex Sierra <alex.sierra@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent de009982
...@@ -1106,26 +1106,32 @@ svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last, ...@@ -1106,26 +1106,32 @@ svm_range_split(struct svm_range *prange, uint64_t start, uint64_t last,
} }
static int static int
svm_range_split_tail(struct svm_range *prange, svm_range_split_tail(struct svm_range *prange, uint64_t new_last,
uint64_t new_last, struct list_head *insert_list) struct list_head *insert_list, struct list_head *remap_list)
{ {
struct svm_range *tail; struct svm_range *tail;
int r = svm_range_split(prange, prange->start, new_last, &tail); int r = svm_range_split(prange, prange->start, new_last, &tail);
if (!r) if (!r) {
list_add(&tail->list, insert_list); list_add(&tail->list, insert_list);
if (!IS_ALIGNED(new_last + 1, 1UL << prange->granularity))
list_add(&tail->update_list, remap_list);
}
return r; return r;
} }
static int static int
svm_range_split_head(struct svm_range *prange, svm_range_split_head(struct svm_range *prange, uint64_t new_start,
uint64_t new_start, struct list_head *insert_list) struct list_head *insert_list, struct list_head *remap_list)
{ {
struct svm_range *head; struct svm_range *head;
int r = svm_range_split(prange, new_start, prange->last, &head); int r = svm_range_split(prange, new_start, prange->last, &head);
if (!r) if (!r) {
list_add(&head->list, insert_list); list_add(&head->list, insert_list);
if (!IS_ALIGNED(new_start, 1UL << prange->granularity))
list_add(&head->update_list, remap_list);
}
return r; return r;
} }
...@@ -2052,7 +2058,7 @@ static int ...@@ -2052,7 +2058,7 @@ static int
svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs, uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
struct list_head *update_list, struct list_head *insert_list, struct list_head *update_list, struct list_head *insert_list,
struct list_head *remove_list) struct list_head *remove_list, struct list_head *remap_list)
{ {
unsigned long last = start + size - 1UL; unsigned long last = start + size - 1UL;
struct svm_range_list *svms = &p->svms; struct svm_range_list *svms = &p->svms;
...@@ -2068,6 +2074,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, ...@@ -2068,6 +2074,7 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
INIT_LIST_HEAD(insert_list); INIT_LIST_HEAD(insert_list);
INIT_LIST_HEAD(remove_list); INIT_LIST_HEAD(remove_list);
INIT_LIST_HEAD(&new_list); INIT_LIST_HEAD(&new_list);
INIT_LIST_HEAD(remap_list);
node = interval_tree_iter_first(&svms->objects, start, last); node = interval_tree_iter_first(&svms->objects, start, last);
while (node) { while (node) {
...@@ -2104,14 +2111,14 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size, ...@@ -2104,14 +2111,14 @@ svm_range_add(struct kfd_process *p, uint64_t start, uint64_t size,
if (node->start < start) { if (node->start < start) {
pr_debug("change old range start\n"); pr_debug("change old range start\n");
r = svm_range_split_head(prange, start, r = svm_range_split_head(prange, start,
insert_list); insert_list, remap_list);
if (r) if (r)
goto out; goto out;
} }
if (node->last > last) { if (node->last > last) {
pr_debug("change old range last\n"); pr_debug("change old range last\n");
r = svm_range_split_tail(prange, last, r = svm_range_split_tail(prange, last,
insert_list); insert_list, remap_list);
if (r) if (r)
goto out; goto out;
} }
...@@ -3501,6 +3508,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm, ...@@ -3501,6 +3508,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
struct list_head update_list; struct list_head update_list;
struct list_head insert_list; struct list_head insert_list;
struct list_head remove_list; struct list_head remove_list;
struct list_head remap_list;
struct svm_range_list *svms; struct svm_range_list *svms;
struct svm_range *prange; struct svm_range *prange;
struct svm_range *next; struct svm_range *next;
...@@ -3532,7 +3540,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm, ...@@ -3532,7 +3540,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
/* Add new range and split existing ranges as needed */ /* Add new range and split existing ranges as needed */
r = svm_range_add(p, start, size, nattr, attrs, &update_list, r = svm_range_add(p, start, size, nattr, attrs, &update_list,
&insert_list, &remove_list); &insert_list, &remove_list, &remap_list);
if (r) { if (r) {
mutex_unlock(&svms->lock); mutex_unlock(&svms->lock);
mmap_write_unlock(mm); mmap_write_unlock(mm);
...@@ -3597,6 +3605,19 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm, ...@@ -3597,6 +3605,19 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
ret = r; ret = r;
} }
list_for_each_entry(prange, &remap_list, update_list) {
pr_debug("Remapping prange 0x%p [0x%lx 0x%lx]\n",
prange, prange->start, prange->last);
mutex_lock(&prange->migrate_mutex);
r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
true, true, prange->mapped_to_gpu);
if (r)
pr_debug("failed %d on remap svm range\n", r);
mutex_unlock(&prange->migrate_mutex);
if (r)
ret = r;
}
dynamic_svm_range_dump(svms); dynamic_svm_range_dump(svms);
mutex_unlock(&svms->lock); mutex_unlock(&svms->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment