Commit 601354f3 authored by Philip Yang's avatar Philip Yang Committed by Alex Deucher

drm/amdkfd: Update mapping if range attributes changed

Change SVM range mapping flags or access attributes don't trigger
migration, if range is already mapped on GPUs we should update GPU
mapping and pass flush_tlb flag true to amdgpu vm.

Change SVM range preferred_loc or migration granularity don't need
update GPU mapping, skip the validate_and_map.
Signed-off-by: default avatarPhilip Yang <Philip.Yang@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 6b9c63a6
......@@ -685,7 +685,8 @@ svm_range_check_attr(struct kfd_process *p,
static void
svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs)
uint32_t nattr, struct kfd_ioctl_svm_attribute *attrs,
bool *update_mapping)
{
uint32_t i;
int gpuidx;
......@@ -701,6 +702,7 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
case KFD_IOCTL_SVM_ATTR_ACCESS:
case KFD_IOCTL_SVM_ATTR_ACCESS_IN_PLACE:
case KFD_IOCTL_SVM_ATTR_NO_ACCESS:
*update_mapping = true;
gpuidx = kfd_process_gpuidx_from_gpuid(p,
attrs[i].value);
if (attrs[i].type == KFD_IOCTL_SVM_ATTR_NO_ACCESS) {
......@@ -715,9 +717,11 @@ svm_range_apply_attrs(struct kfd_process *p, struct svm_range *prange,
}
break;
case KFD_IOCTL_SVM_ATTR_SET_FLAGS:
*update_mapping = true;
prange->flags |= attrs[i].value;
break;
case KFD_IOCTL_SVM_ATTR_CLR_FLAGS:
*update_mapping = true;
prange->flags &= ~attrs[i].value;
break;
case KFD_IOCTL_SVM_ATTR_GRANULARITY:
......@@ -1250,7 +1254,7 @@ static int
svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
unsigned long offset, unsigned long npages, bool readonly,
dma_addr_t *dma_addr, struct amdgpu_device *bo_adev,
struct dma_fence **fence)
struct dma_fence **fence, bool flush_tlb)
{
struct amdgpu_device *adev = pdd->dev->adev;
struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv);
......@@ -1288,7 +1292,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
(last_domain == SVM_RANGE_VRAM_DOMAIN) ? 1 : 0,
pte_flags);
r = amdgpu_vm_update_range(adev, vm, false, false, false, NULL,
r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL,
last_start, prange->start + i,
pte_flags,
last_start - prange->start,
......@@ -1322,7 +1326,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange,
static int
svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
unsigned long npages, bool readonly,
unsigned long *bitmap, bool wait)
unsigned long *bitmap, bool wait, bool flush_tlb)
{
struct kfd_process_device *pdd;
struct amdgpu_device *bo_adev;
......@@ -1357,7 +1361,8 @@ svm_range_map_to_gpus(struct svm_range *prange, unsigned long offset,
r = svm_range_map_to_gpu(pdd, prange, offset, npages, readonly,
prange->dma_addr[gpuidx],
bo_adev, wait ? &fence : NULL);
bo_adev, wait ? &fence : NULL,
flush_tlb);
if (r)
break;
......@@ -1478,8 +1483,8 @@ static void *kfd_svm_page_owner(struct kfd_process *p, int32_t gpuidx)
* 5. Release page table (and SVM BO) reservation
*/
static int svm_range_validate_and_map(struct mm_struct *mm,
struct svm_range *prange,
int32_t gpuidx, bool intr, bool wait)
struct svm_range *prange, int32_t gpuidx,
bool intr, bool wait, bool flush_tlb)
{
struct svm_validate_context ctx;
unsigned long start, end, addr;
......@@ -1518,8 +1523,12 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
prange->bitmap_aip, MAX_GPU_INSTANCE);
}
if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE))
return 0;
if (bitmap_empty(ctx.bitmap, MAX_GPU_INSTANCE)) {
if (!prange->mapped_to_gpu)
return 0;
bitmap_copy(ctx.bitmap, prange->bitmap_access, MAX_GPU_INSTANCE);
}
if (prange->actual_loc && !prange->ttm_res) {
/* This should never happen. actual_loc gets set by
......@@ -1591,7 +1600,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
}
r = svm_range_map_to_gpus(prange, offset, npages, readonly,
ctx.bitmap, wait);
ctx.bitmap, wait, flush_tlb);
unlock_out:
svm_range_unlock(prange);
......@@ -1687,7 +1696,7 @@ static void svm_range_restore_work(struct work_struct *work)
mutex_lock(&prange->migrate_mutex);
r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
false, true);
false, true, false);
if (r)
pr_debug("failed %d to map 0x%lx to gpus\n", r,
prange->start);
......@@ -2825,7 +2834,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
}
}
r = svm_range_validate_and_map(mm, prange, gpuidx, false, false);
r = svm_range_validate_and_map(mm, prange, gpuidx, false, false, false);
if (r)
pr_debug("failed %d to map svms 0x%p [0x%lx 0x%lx] to gpus\n",
r, svms, prange->start, prange->last);
......@@ -3238,6 +3247,8 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
struct svm_range_list *svms;
struct svm_range *prange;
struct svm_range *next;
bool update_mapping = false;
bool flush_tlb;
int r = 0;
pr_debug("pasid 0x%x svms 0x%p [0x%llx 0x%llx] pages 0x%llx\n",
......@@ -3276,7 +3287,7 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
svm_range_add_notifier_locked(mm, prange);
}
list_for_each_entry(prange, &update_list, update_list) {
svm_range_apply_attrs(p, prange, nattr, attrs);
svm_range_apply_attrs(p, prange, nattr, attrs, &update_mapping);
/* TODO: unmap ranges from GPU that lost access */
}
list_for_each_entry_safe(prange, next, &remove_list, update_list) {
......@@ -3309,8 +3320,15 @@ svm_range_set_attr(struct kfd_process *p, struct mm_struct *mm,
continue;
}
if (!migrated && !update_mapping) {
mutex_unlock(&prange->migrate_mutex);
continue;
}
flush_tlb = !migrated && update_mapping && prange->mapped_to_gpu;
r = svm_range_validate_and_map(mm, prange, MAX_GPU_INSTANCE,
true, true);
true, true, flush_tlb);
if (r)
pr_debug("failed %d to map svm range\n", r);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment