Commit cbfbe47f authored by Emil Velikov's avatar Emil Velikov Committed by Emil Velikov

drm/vmwgfx: use core drm to extend/check vmw_execbuf_ioctl

Currently vmw_execbuf_ioctl() open-codes the permission checking, size
extending and copying that is already done in core drm.

Kill all the duplication, adding a few comments for clarity.

Cc: VMware Graphics <linux-graphics-maintainer@vmware.com>
Cc: Thomas Hellstrom <thellstrom@vmware.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Signed-off-by: default avatarEmil Velikov <emil.velikov@collabora.com>
Tested-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190522164119.24139-3-emil.l.velikov@gmail.com
parent bcde7d34
...@@ -186,7 +186,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = { ...@@ -186,7 +186,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
DRM_RENDER_ALLOW), DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl,
DRM_AUTH | DRM_RENDER_ALLOW), DRM_AUTH | DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, DRM_AUTH |
DRM_RENDER_ALLOW), DRM_RENDER_ALLOW),
VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl,
DRM_RENDER_ALLOW), DRM_RENDER_ALLOW),
...@@ -1117,15 +1117,7 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, ...@@ -1117,15 +1117,7 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
&vmw_ioctls[nr - DRM_COMMAND_BASE]; &vmw_ioctls[nr - DRM_COMMAND_BASE];
if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) { if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) {
ret = (long) drm_ioctl_permit(ioctl->flags, file_priv); return ioctl_func(filp, cmd, arg);
if (unlikely(ret != 0))
return ret;
if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN))
goto out_io_encoding;
return (long) vmw_execbuf_ioctl(dev, arg, file_priv,
_IOC_SIZE(cmd));
} else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) { } else if (nr == DRM_COMMAND_BASE + DRM_VMW_UPDATE_LAYOUT) {
if (!drm_is_current_master(file_priv) && if (!drm_is_current_master(file_priv) &&
!capable(CAP_SYS_ADMIN)) !capable(CAP_SYS_ADMIN))
......
...@@ -910,8 +910,8 @@ static inline struct page *vmw_piter_page(struct vmw_piter *viter) ...@@ -910,8 +910,8 @@ static inline struct page *vmw_piter_page(struct vmw_piter *viter)
* Command submission - vmwgfx_execbuf.c * Command submission - vmwgfx_execbuf.c
*/ */
extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv, size_t size); struct drm_file *file_priv);
extern int vmw_execbuf_process(struct drm_file *file_priv, extern int vmw_execbuf_process(struct drm_file *file_priv,
struct vmw_private *dev_priv, struct vmw_private *dev_priv,
void __user *user_commands, void __user *user_commands,
......
...@@ -3995,54 +3995,40 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv) ...@@ -3995,54 +3995,40 @@ void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
mutex_unlock(&dev_priv->cmdbuf_mutex); mutex_unlock(&dev_priv->cmdbuf_mutex);
} }
int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv, size_t size) struct drm_file *file_priv)
{ {
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_execbuf_arg arg; struct drm_vmw_execbuf_arg *arg = data;
int ret; int ret;
static const size_t copy_offset[] = {
offsetof(struct drm_vmw_execbuf_arg, context_handle),
sizeof(struct drm_vmw_execbuf_arg)};
struct dma_fence *in_fence = NULL; struct dma_fence *in_fence = NULL;
if (unlikely(size < copy_offset[0])) {
VMW_DEBUG_USER("Invalid command size, ioctl %d\n",
DRM_VMW_EXECBUF);
return -EINVAL;
}
if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0)
return -EFAULT;
/* /*
* Extend the ioctl argument while maintaining backwards compatibility: * Extend the ioctl argument while maintaining backwards compatibility:
* We take different code paths depending on the value of arg.version. * We take different code paths depending on the value of arg->version.
*
* Note: The ioctl argument is extended and zeropadded by core DRM.
*/ */
if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION || if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
arg.version == 0)) { arg->version == 0)) {
VMW_DEBUG_USER("Incorrect execbuf version.\n"); VMW_DEBUG_USER("Incorrect execbuf version.\n");
return -EINVAL; return -EINVAL;
} }
if (arg.version > 1 && switch (arg->version) {
copy_from_user(&arg.context_handle,
(void __user *) (data + copy_offset[0]),
copy_offset[arg.version - 1] - copy_offset[0]) != 0)
return -EFAULT;
switch (arg.version) {
case 1: case 1:
arg.context_handle = (uint32_t) -1; /* For v1 core DRM have extended + zeropadded the data */
arg->context_handle = (uint32_t) -1;
break; break;
case 2: case 2:
default: default:
/* For v2 and later core DRM would have correctly copied it */
break; break;
} }
/* If imported a fence FD from elsewhere, then wait on it */ /* If imported a fence FD from elsewhere, then wait on it */
if (arg.flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) { if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
in_fence = sync_file_get_fence(arg.imported_fence_fd); in_fence = sync_file_get_fence(arg->imported_fence_fd);
if (!in_fence) { if (!in_fence) {
VMW_DEBUG_USER("Cannot get imported fence\n"); VMW_DEBUG_USER("Cannot get imported fence\n");
...@@ -4059,11 +4045,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, ...@@ -4059,11 +4045,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data,
return ret; return ret;
ret = vmw_execbuf_process(file_priv, dev_priv, ret = vmw_execbuf_process(file_priv, dev_priv,
(void __user *)(unsigned long)arg.commands, (void __user *)(unsigned long)arg->commands,
NULL, arg.command_size, arg.throttle_us, NULL, arg->command_size, arg->throttle_us,
arg.context_handle, arg->context_handle,
(void __user *)(unsigned long)arg.fence_rep, (void __user *)(unsigned long)arg->fence_rep,
NULL, arg.flags); NULL, arg->flags);
ttm_read_unlock(&dev_priv->reservation_sem); ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment