Commit 80e709ee authored by Chong Li's avatar Chong Li Committed by Alex Deucher

drm/amdgpu: add option params to enforce process isolation between graphics and compute

enforce process isolation between graphics and compute via using the same reserved vmid.

v2: remove params "struct amdgpu_vm *vm" from
    amdgpu_vmid_alloc_reserved and amdgpu_vmid_free_reserved.
Signed-off-by: default avatarChong Li <chongli2@amd.com>
Reviewed-by: default avatarChristian Koenig <Christian.Koenig@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 4e70da98
...@@ -214,6 +214,7 @@ extern int amdgpu_force_asic_type; ...@@ -214,6 +214,7 @@ extern int amdgpu_force_asic_type;
extern int amdgpu_smartshift_bias; extern int amdgpu_smartshift_bias;
extern int amdgpu_use_xgmi_p2p; extern int amdgpu_use_xgmi_p2p;
extern int amdgpu_mtype_local; extern int amdgpu_mtype_local;
extern bool enforce_isolation;
#ifdef CONFIG_HSA_AMD #ifdef CONFIG_HSA_AMD
extern int sched_policy; extern int sched_policy;
extern bool debug_evictions; extern bool debug_evictions;
......
...@@ -153,7 +153,7 @@ uint amdgpu_pg_mask = 0xffffffff; ...@@ -153,7 +153,7 @@ uint amdgpu_pg_mask = 0xffffffff;
uint amdgpu_sdma_phase_quantum = 32; uint amdgpu_sdma_phase_quantum = 32;
char *amdgpu_disable_cu; char *amdgpu_disable_cu;
char *amdgpu_virtual_display; char *amdgpu_virtual_display;
bool enforce_isolation;
/* /*
* OverDrive(bit 14) disabled by default * OverDrive(bit 14) disabled by default
* GFX DCS(bit 19) disabled by default * GFX DCS(bit 19) disabled by default
...@@ -973,6 +973,14 @@ MODULE_PARM_DESC( ...@@ -973,6 +973,14 @@ MODULE_PARM_DESC(
4 = AMDGPU_CPX_PARTITION_MODE)"); 4 = AMDGPU_CPX_PARTITION_MODE)");
module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444); module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444);
/**
* DOC: enforce_isolation (bool)
* enforce process isolation between graphics and compute via using the same reserved vmid.
*/
module_param(enforce_isolation, bool, 0444);
MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on");
/* These devices are not supported by amdgpu. /* These devices are not supported by amdgpu.
* They are supported by the mach64, r128, radeon drivers * They are supported by the mach64, r128, radeon drivers
*/ */
......
...@@ -409,7 +409,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -409,7 +409,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
if (r || !idle) if (r || !idle)
goto error; goto error;
if (vm->reserved_vmid[vmhub]) { if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) {
r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence); r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence);
if (r || !id) if (r || !id)
goto error; goto error;
...@@ -460,14 +460,11 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, ...@@ -460,14 +460,11 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
} }
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
unsigned vmhub) unsigned vmhub)
{ {
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
mutex_lock(&id_mgr->lock); mutex_lock(&id_mgr->lock);
if (vm->reserved_vmid[vmhub])
goto unlock;
++id_mgr->reserved_use_count; ++id_mgr->reserved_use_count;
if (!id_mgr->reserved) { if (!id_mgr->reserved) {
...@@ -479,27 +476,23 @@ int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, ...@@ -479,27 +476,23 @@ int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
list_del_init(&id->list); list_del_init(&id->list);
id_mgr->reserved = id; id_mgr->reserved = id;
} }
vm->reserved_vmid[vmhub] = true;
unlock:
mutex_unlock(&id_mgr->lock); mutex_unlock(&id_mgr->lock);
return 0; return 0;
} }
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
unsigned vmhub) unsigned vmhub)
{ {
struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub];
mutex_lock(&id_mgr->lock); mutex_lock(&id_mgr->lock);
if (vm->reserved_vmid[vmhub] && if (!--id_mgr->reserved_use_count) {
!--id_mgr->reserved_use_count) {
/* give the reserved ID back to normal round robin */ /* give the reserved ID back to normal round robin */
list_add(&id_mgr->reserved->list, &id_mgr->ids_lru); list_add(&id_mgr->reserved->list, &id_mgr->ids_lru);
id_mgr->reserved = NULL; id_mgr->reserved = NULL;
} }
vm->reserved_vmid[vmhub] = false;
mutex_unlock(&id_mgr->lock); mutex_unlock(&id_mgr->lock);
} }
...@@ -578,6 +571,10 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) ...@@ -578,6 +571,10 @@ void amdgpu_vmid_mgr_init(struct amdgpu_device *adev)
list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru);
} }
} }
/* alloc a default reserved vmid to enforce isolation */
if (enforce_isolation)
amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
} }
/** /**
......
...@@ -79,11 +79,9 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv, ...@@ -79,11 +79,9 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
struct amdgpu_vmid *id); struct amdgpu_vmid *id);
int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev,
struct amdgpu_vm *vm, unsigned vmhub);
unsigned vmhub);
void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, void amdgpu_vmid_free_reserved(struct amdgpu_device *adev,
struct amdgpu_vm *vm, unsigned vmhub);
unsigned vmhub);
int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
struct amdgpu_job *job, struct dma_fence **fence); struct amdgpu_job *job, struct dma_fence **fence);
void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub,
......
...@@ -2284,8 +2284,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) ...@@ -2284,8 +2284,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
} }
dma_fence_put(vm->last_update); dma_fence_put(vm->last_update);
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
amdgpu_vmid_free_reserved(adev, vm, i); for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) {
if (vm->reserved_vmid[i]) {
amdgpu_vmid_free_reserved(adev, i);
vm->reserved_vmid[i] = false;
}
}
} }
/** /**
...@@ -2368,7 +2374,6 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -2368,7 +2374,6 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
union drm_amdgpu_vm *args = data; union drm_amdgpu_vm *args = data;
struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_device *adev = drm_to_adev(dev);
struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_fpriv *fpriv = filp->driver_priv;
int r;
/* No valid flags defined yet */ /* No valid flags defined yet */
if (args->in.flags) if (args->in.flags)
...@@ -2377,13 +2382,17 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) ...@@ -2377,13 +2382,17 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
switch (args->in.op) { switch (args->in.op) {
case AMDGPU_VM_OP_RESERVE_VMID: case AMDGPU_VM_OP_RESERVE_VMID:
/* We only have requirement to reserve vmid from gfxhub */ /* We only have requirement to reserve vmid from gfxhub */
r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, if (!fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
AMDGPU_GFXHUB(0)); amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0));
if (r) fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = true;
return r; }
break; break;
case AMDGPU_VM_OP_UNRESERVE_VMID: case AMDGPU_VM_OP_UNRESERVE_VMID:
amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB(0)); if (fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)]) {
amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(0));
fpriv->vm.reserved_vmid[AMDGPU_GFXHUB(0)] = false;
}
break; break;
default: default:
return -EINVAL; return -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment