Commit ea53af8a authored by Alex Sierra's avatar Alex Sierra Committed by Alex Deucher

drm/amdkfd: SVM API call to restore page tables

Use SVM API to restore page tables when retry fault and
compute context are enabled.
Signed-off-by: default avatarAlex Sierra <alex.sierra@amd.com>
Reviewed-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarFelix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 2383f56b
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include "amdgpu_gmc.h" #include "amdgpu_gmc.h"
#include "amdgpu_xgmi.h" #include "amdgpu_xgmi.h"
#include "amdgpu_dma_buf.h" #include "amdgpu_dma_buf.h"
#include "kfd_svm.h"
/** /**
* DOC: GPUVM * DOC: GPUVM
...@@ -3298,6 +3299,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) ...@@ -3298,6 +3299,7 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
uint64_t addr) uint64_t addr)
{ {
bool is_compute_context = false;
struct amdgpu_bo *root; struct amdgpu_bo *root;
uint64_t value, flags; uint64_t value, flags;
struct amdgpu_vm *vm; struct amdgpu_vm *vm;
...@@ -3305,15 +3307,25 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, ...@@ -3305,15 +3307,25 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
spin_lock(&adev->vm_manager.pasid_lock); spin_lock(&adev->vm_manager.pasid_lock);
vm = idr_find(&adev->vm_manager.pasid_idr, pasid); vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
if (vm) if (vm) {
root = amdgpu_bo_ref(vm->root.base.bo); root = amdgpu_bo_ref(vm->root.base.bo);
else is_compute_context = vm->is_compute_context;
} else {
root = NULL; root = NULL;
}
spin_unlock(&adev->vm_manager.pasid_lock); spin_unlock(&adev->vm_manager.pasid_lock);
if (!root) if (!root)
return false; return false;
addr /= AMDGPU_GPU_PAGE_SIZE;
if (is_compute_context &&
!svm_range_restore_pages(adev, pasid, addr)) {
amdgpu_bo_unref(&root);
return true;
}
r = amdgpu_bo_reserve(root, true); r = amdgpu_bo_reserve(root, true);
if (r) if (r)
goto error_unref; goto error_unref;
...@@ -3327,18 +3339,16 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, ...@@ -3327,18 +3339,16 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,
if (!vm) if (!vm)
goto error_unlock; goto error_unlock;
addr /= AMDGPU_GPU_PAGE_SIZE;
flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED | flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED |
AMDGPU_PTE_SYSTEM; AMDGPU_PTE_SYSTEM;
if (vm->is_compute_context) { if (is_compute_context) {
/* Intentionally setting invalid PTE flag /* Intentionally setting invalid PTE flag
* combination to force a no-retry-fault * combination to force a no-retry-fault
*/ */
flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | flags = AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE |
AMDGPU_PTE_TF; AMDGPU_PTE_TF;
value = 0; value = 0;
} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { } else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {
/* Redirect the access to the dummy page */ /* Redirect the access to the dummy page */
value = adev->dummy_page_addr; value = adev->dummy_page_addr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment