Commit 7224788f authored by Rodrigo Vivi's avatar Rodrigo Vivi

drm/xe: Kill XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS extension

This extension is currently not used and it is not aligned with
the error handling on async VM_BIND. Let's remove it and along with
that, since it was the only extension for the vm_create, remove VM
extension entirely.

v2: rebase on top of the removal of drm_xe_ext_exec_queue_set_property

Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
Signed-off-by: default avatarFrancois Dugast <francois.dugast@intel.com>
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Reviewed-by: default avatarMatthew Brost <matthew.brost@intel.com>
parent 5dc079d1
...@@ -1531,37 +1531,6 @@ static void flush_async_ops(struct xe_vm *vm) ...@@ -1531,37 +1531,6 @@ static void flush_async_ops(struct xe_vm *vm)
flush_work(&vm->async_ops.work); flush_work(&vm->async_ops.work);
} }
static void vm_error_capture(struct xe_vm *vm, int err,
u32 op, u64 addr, u64 size)
{
struct drm_xe_vm_bind_op_error_capture capture;
u64 __user *address =
u64_to_user_ptr(vm->async_ops.error_capture.addr);
bool in_kthread = !current->mm;
capture.error = err;
capture.op = op;
capture.addr = addr;
capture.size = size;
if (in_kthread) {
if (!mmget_not_zero(vm->async_ops.error_capture.mm))
goto mm_closed;
kthread_use_mm(vm->async_ops.error_capture.mm);
}
if (copy_to_user(address, &capture, sizeof(capture)))
drm_warn(&vm->xe->drm, "Copy to user failed");
if (in_kthread) {
kthread_unuse_mm(vm->async_ops.error_capture.mm);
mmput(vm->async_ops.error_capture.mm);
}
mm_closed:
wake_up_all(&vm->async_ops.error_capture.wq);
}
static void xe_vm_close(struct xe_vm *vm) static void xe_vm_close(struct xe_vm *vm)
{ {
down_write(&vm->lock); down_write(&vm->lock);
...@@ -2036,91 +2005,6 @@ static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma, ...@@ -2036,91 +2005,6 @@ static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma,
return 0; return 0;
} }
static int vm_set_error_capture_address(struct xe_device *xe, struct xe_vm *vm,
u64 value)
{
if (XE_IOCTL_DBG(xe, !value))
return -EINVAL;
if (XE_IOCTL_DBG(xe, !(vm->flags & XE_VM_FLAG_ASYNC_BIND_OPS)))
return -EOPNOTSUPP;
if (XE_IOCTL_DBG(xe, vm->async_ops.error_capture.addr))
return -EOPNOTSUPP;
vm->async_ops.error_capture.mm = current->mm;
vm->async_ops.error_capture.addr = value;
init_waitqueue_head(&vm->async_ops.error_capture.wq);
return 0;
}
typedef int (*xe_vm_set_property_fn)(struct xe_device *xe, struct xe_vm *vm,
u64 value);
static const xe_vm_set_property_fn vm_set_property_funcs[] = {
[XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS] =
vm_set_error_capture_address,
};
static int vm_user_ext_set_property(struct xe_device *xe, struct xe_vm *vm,
u64 extension)
{
u64 __user *address = u64_to_user_ptr(extension);
struct drm_xe_ext_set_property ext;
int err;
err = __copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
if (XE_IOCTL_DBG(xe, ext.property >=
ARRAY_SIZE(vm_set_property_funcs)) ||
XE_IOCTL_DBG(xe, ext.pad) ||
XE_IOCTL_DBG(xe, ext.reserved[0] || ext.reserved[1]))
return -EINVAL;
return vm_set_property_funcs[ext.property](xe, vm, ext.value);
}
typedef int (*xe_vm_user_extension_fn)(struct xe_device *xe, struct xe_vm *vm,
u64 extension);
static const xe_vm_set_property_fn vm_user_extension_funcs[] = {
[XE_VM_EXTENSION_SET_PROPERTY] = vm_user_ext_set_property,
};
#define MAX_USER_EXTENSIONS 16
static int vm_user_extensions(struct xe_device *xe, struct xe_vm *vm,
u64 extensions, int ext_number)
{
u64 __user *address = u64_to_user_ptr(extensions);
struct xe_user_extension ext;
int err;
if (XE_IOCTL_DBG(xe, ext_number >= MAX_USER_EXTENSIONS))
return -E2BIG;
err = __copy_from_user(&ext, address, sizeof(ext));
if (XE_IOCTL_DBG(xe, err))
return -EFAULT;
if (XE_IOCTL_DBG(xe, ext.pad) ||
XE_IOCTL_DBG(xe, ext.name >=
ARRAY_SIZE(vm_user_extension_funcs)))
return -EINVAL;
err = vm_user_extension_funcs[ext.name](xe, vm, extensions);
if (XE_IOCTL_DBG(xe, err))
return err;
if (ext.next_extension)
return vm_user_extensions(xe, vm, ext.next_extension,
++ext_number);
return 0;
}
#define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \ #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_SCRATCH_PAGE | \
DRM_XE_VM_CREATE_COMPUTE_MODE | \ DRM_XE_VM_CREATE_COMPUTE_MODE | \
DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \ DRM_XE_VM_CREATE_ASYNC_BIND_OPS | \
...@@ -2138,6 +2022,9 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, ...@@ -2138,6 +2022,9 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
int err; int err;
u32 flags = 0; u32 flags = 0;
if (XE_IOCTL_DBG(xe, args->extensions))
return -EINVAL;
if (XE_WA(xe_root_mmio_gt(xe), 14016763929)) if (XE_WA(xe_root_mmio_gt(xe), 14016763929))
args->flags |= DRM_XE_VM_CREATE_SCRATCH_PAGE; args->flags |= DRM_XE_VM_CREATE_SCRATCH_PAGE;
...@@ -2180,14 +2067,6 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data, ...@@ -2180,14 +2067,6 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
if (IS_ERR(vm)) if (IS_ERR(vm))
return PTR_ERR(vm); return PTR_ERR(vm);
if (args->extensions) {
err = vm_user_extensions(xe, vm, args->extensions, 0);
if (XE_IOCTL_DBG(xe, err)) {
xe_vm_close_and_put(vm);
return err;
}
}
mutex_lock(&xef->vm.lock); mutex_lock(&xef->vm.lock);
err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL); err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
mutex_unlock(&xef->vm.lock); mutex_unlock(&xef->vm.lock);
...@@ -3087,8 +2966,6 @@ static void xe_vma_op_work_func(struct work_struct *w) ...@@ -3087,8 +2966,6 @@ static void xe_vma_op_work_func(struct work_struct *w)
vm_set_async_error(vm, err); vm_set_async_error(vm, err);
up_write(&vm->lock); up_write(&vm->lock);
if (vm->async_ops.error_capture.addr)
vm_error_capture(vm, err, 0, 0, 0);
break; break;
} }
up_write(&vm->lock); up_write(&vm->lock);
......
...@@ -552,23 +552,6 @@ struct drm_xe_gem_mmap_offset { ...@@ -552,23 +552,6 @@ struct drm_xe_gem_mmap_offset {
__u64 reserved[2]; __u64 reserved[2];
}; };
/**
* struct drm_xe_vm_bind_op_error_capture - format of VM bind op error capture
*/
struct drm_xe_vm_bind_op_error_capture {
/** @error: errno that occurred */
__s32 error;
/** @op: operation that encounter an error */
__u32 op;
/** @addr: address of bind op */
__u64 addr;
/** @size: size of bind */
__u64 size;
};
/** struct drm_xe_ext_set_property - XE set property extension */ /** struct drm_xe_ext_set_property - XE set property extension */
struct drm_xe_ext_set_property { struct drm_xe_ext_set_property {
/** @base: base user extension */ /** @base: base user extension */
...@@ -589,7 +572,6 @@ struct drm_xe_ext_set_property { ...@@ -589,7 +572,6 @@ struct drm_xe_ext_set_property {
struct drm_xe_vm_create { struct drm_xe_vm_create {
#define XE_VM_EXTENSION_SET_PROPERTY 0 #define XE_VM_EXTENSION_SET_PROPERTY 0
#define XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS 0
/** @extensions: Pointer to the first extension struct, if any */ /** @extensions: Pointer to the first extension struct, if any */
__u64 extensions; __u64 extensions;
...@@ -674,10 +656,7 @@ struct drm_xe_vm_bind_op { ...@@ -674,10 +656,7 @@ struct drm_xe_vm_bind_op {
* practice the bind op is good and will complete. * practice the bind op is good and will complete.
* *
* If this flag is set and doesn't return an error, the bind op can * If this flag is set and doesn't return an error, the bind op can
* still fail and recovery is needed. If configured, the bind op that * still fail and recovery is needed. It should free memory
* caused the error will be captured in drm_xe_vm_bind_op_error_capture.
* Once the user sees the error (via a ufence +
* XE_VM_PROPERTY_BIND_OP_ERROR_CAPTURE_ADDRESS), it should free memory
* via non-async unbinds, and then restart all queued async binds op via * via non-async unbinds, and then restart all queued async binds op via
* XE_VM_BIND_OP_RESTART. Or alternatively the user should destroy the * XE_VM_BIND_OP_RESTART. Or alternatively the user should destroy the
* VM. * VM.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment