Commit 04e9c0ce authored by Matthew Brost's avatar Matthew Brost

drm/xe: Add VM bind IOCTL error injection

Add VM bind IOCTL error injection which steals MSB of the bind flags
field which if set injects errors at various points in the VM bind
IOCTL. Intended to validate error paths. Enabled by CONFIG_DRM_XE_DEBUG.

v4:
 - Change define layout (Jonathan)
Signed-off-by: default avatarMatthew Brost <matthew.brost@intel.com>
Reviewed-by: default avatarJonathan Cavitt <jonathan.cavitt@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240704041652.272920-8-matthew.brost@intel.com
parent a708f650
...@@ -23,6 +23,10 @@ ...@@ -23,6 +23,10 @@
#include "xe_sriov_types.h" #include "xe_sriov_types.h"
#include "xe_step_types.h" #include "xe_step_types.h"
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
#define TEST_VM_OPS_ERROR
#endif
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY) #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
#include "soc/intel_pch.h" #include "soc/intel_pch.h"
#include "intel_display_core.h" #include "intel_display_core.h"
...@@ -477,6 +481,14 @@ struct xe_device { ...@@ -477,6 +481,14 @@ struct xe_device {
int mode; int mode;
} wedged; } wedged;
#ifdef TEST_VM_OPS_ERROR
/**
* @vm_inject_error_position: inject errors at different places in VM
* bind IOCTL based on this value
*/
u8 vm_inject_error_position;
#endif
/* private: */ /* private: */
#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY) #if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
......
...@@ -1860,6 +1860,12 @@ int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops) ...@@ -1860,6 +1860,12 @@ int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
xe_tile_assert(tile, pt_update_ops->current_op <= xe_tile_assert(tile, pt_update_ops->current_op <=
pt_update_ops->num_ops); pt_update_ops->num_ops);
#ifdef TEST_VM_OPS_ERROR
if (vops->inject_error &&
vops->vm->xe->vm_inject_error_position == FORCE_OP_ERROR_PREPARE)
return -ENOSPC;
#endif
return 0; return 0;
} }
...@@ -2000,6 +2006,12 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops) ...@@ -2000,6 +2006,12 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
return dma_fence_get_stub(); return dma_fence_get_stub();
} }
#ifdef TEST_VM_OPS_ERROR
if (vops->inject_error &&
vm->xe->vm_inject_error_position == FORCE_OP_ERROR_RUN)
return ERR_PTR(-ENOSPC);
#endif
if (pt_update_ops->needs_invalidation) { if (pt_update_ops->needs_invalidation) {
ifence = kzalloc(sizeof(*ifence), GFP_KERNEL); ifence = kzalloc(sizeof(*ifence), GFP_KERNEL);
if (!ifence) { if (!ifence) {
......
...@@ -2478,6 +2478,12 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec, ...@@ -2478,6 +2478,12 @@ static int vm_bind_ioctl_ops_lock_and_prep(struct drm_exec *exec,
return err; return err;
} }
#ifdef TEST_VM_OPS_ERROR
if (vops->inject_error &&
vm->xe->vm_inject_error_position == FORCE_OP_ERROR_LOCK)
return -ENOSPC;
#endif
return 0; return 0;
} }
...@@ -2714,11 +2720,18 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, ...@@ -2714,11 +2720,18 @@ static int vm_bind_ioctl_ops_execute(struct xe_vm *vm,
return err; return err;
} }
#define SUPPORTED_FLAGS \ #define SUPPORTED_FLAGS_STUB \
(DRM_XE_VM_BIND_FLAG_READONLY | \ (DRM_XE_VM_BIND_FLAG_READONLY | \
DRM_XE_VM_BIND_FLAG_IMMEDIATE | \ DRM_XE_VM_BIND_FLAG_IMMEDIATE | \
DRM_XE_VM_BIND_FLAG_NULL | \ DRM_XE_VM_BIND_FLAG_NULL | \
DRM_XE_VM_BIND_FLAG_DUMPABLE) DRM_XE_VM_BIND_FLAG_DUMPABLE)
#ifdef TEST_VM_OPS_ERROR
#define SUPPORTED_FLAGS (SUPPORTED_FLAGS_STUB | FORCE_OP_ERROR)
#else
#define SUPPORTED_FLAGS SUPPORTED_FLAGS_STUB
#endif
#define XE_64K_PAGE_MASK 0xffffull #define XE_64K_PAGE_MASK 0xffffull
#define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP) #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP)
...@@ -3066,6 +3079,15 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) ...@@ -3066,6 +3079,15 @@ int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops); err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops);
if (err) if (err)
goto unwind_ops; goto unwind_ops;
#ifdef TEST_VM_OPS_ERROR
if (flags & FORCE_OP_ERROR) {
vops.inject_error = true;
vm->xe->vm_inject_error_position =
(vm->xe->vm_inject_error_position + 1) %
FORCE_OP_ERROR_COUNT;
}
#endif
} }
/* Nothing to do */ /* Nothing to do */
......
...@@ -23,6 +23,16 @@ struct xe_user_fence; ...@@ -23,6 +23,16 @@ struct xe_user_fence;
struct xe_vm; struct xe_vm;
struct xe_vm_pgtable_update_op; struct xe_vm_pgtable_update_op;
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
#define TEST_VM_OPS_ERROR
#define FORCE_OP_ERROR BIT(31)
#define FORCE_OP_ERROR_LOCK 0
#define FORCE_OP_ERROR_PREPARE 1
#define FORCE_OP_ERROR_RUN 2
#define FORCE_OP_ERROR_COUNT 3
#endif
#define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS #define XE_VMA_READ_ONLY DRM_GPUVA_USERBITS
#define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1) #define XE_VMA_DESTROYED (DRM_GPUVA_USERBITS << 1)
#define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2) #define XE_VMA_ATOMIC_PTE_BIT (DRM_GPUVA_USERBITS << 2)
...@@ -359,6 +369,10 @@ struct xe_vma_ops { ...@@ -359,6 +369,10 @@ struct xe_vma_ops {
u32 num_syncs; u32 num_syncs;
/** @pt_update_ops: page table update operations */ /** @pt_update_ops: page table update operations */
struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE]; struct xe_vm_pgtable_update_ops pt_update_ops[XE_MAX_TILES_PER_DEVICE];
#ifdef TEST_VM_OPS_ERROR
/** @inject_error: inject error to test error handling */
bool inject_error;
#endif
}; };
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment