Commit 9bbfda54 authored by Thomas Hellstrom's avatar Thomas Hellstrom

drm/vmwgfx: Kill unneeded legacy security features

At one point, the GPU command verifier and user-space handle manager
couldn't properly protect GPU clients from accessing each other's data.
Instead there was an elaborate mechanism to make sure only the active
master's primary clients could render. The other clients were either
put to sleep or even killed (if the master had exited). VRAM was
evicted on master switch. With the advent of render-node functionality,
we relaxed the VRAM eviction, but the other mechanisms stayed in place.

Now that the GPU command verifier and ttm object manager properly
isolate primary clients from different master realms we can remove the
master switch related code and drop those legacy features.
Signed-off-by: default avatarThomas Hellstrom <thellstrom@vmware.com>
Reviewed-by: default avatarBrian Paul <brianp@vmware.com>
Reviewed-by: default avatarDeepak Rawat <drawat@vmware.com>
Acked-by: default avatarEmil Velikov <emil.velikov@collabora.com>
parent 358d76d9
...@@ -29,7 +29,6 @@ ...@@ -29,7 +29,6 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/ */
#include <drm/ttm/ttm_module.h>
#include <linux/atomic.h> #include <linux/atomic.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/wait.h> #include <linux/wait.h>
...@@ -49,8 +48,6 @@ void ttm_lock_init(struct ttm_lock *lock) ...@@ -49,8 +48,6 @@ void ttm_lock_init(struct ttm_lock *lock)
init_waitqueue_head(&lock->queue); init_waitqueue_head(&lock->queue);
lock->rw = 0; lock->rw = 0;
lock->flags = 0; lock->flags = 0;
lock->kill_takers = false;
lock->signal = SIGKILL;
} }
void ttm_read_unlock(struct ttm_lock *lock) void ttm_read_unlock(struct ttm_lock *lock)
...@@ -66,11 +63,6 @@ static bool __ttm_read_lock(struct ttm_lock *lock) ...@@ -66,11 +63,6 @@ static bool __ttm_read_lock(struct ttm_lock *lock)
bool locked = false; bool locked = false;
spin_lock(&lock->lock); spin_lock(&lock->lock);
if (unlikely(lock->kill_takers)) {
send_sig(lock->signal, current, 0);
spin_unlock(&lock->lock);
return false;
}
if (lock->rw >= 0 && lock->flags == 0) { if (lock->rw >= 0 && lock->flags == 0) {
++lock->rw; ++lock->rw;
locked = true; locked = true;
...@@ -98,11 +90,6 @@ static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked) ...@@ -98,11 +90,6 @@ static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
*locked = false; *locked = false;
spin_lock(&lock->lock); spin_lock(&lock->lock);
if (unlikely(lock->kill_takers)) {
send_sig(lock->signal, current, 0);
spin_unlock(&lock->lock);
return false;
}
if (lock->rw >= 0 && lock->flags == 0) { if (lock->rw >= 0 && lock->flags == 0) {
++lock->rw; ++lock->rw;
block = false; block = false;
...@@ -147,11 +134,6 @@ static bool __ttm_write_lock(struct ttm_lock *lock) ...@@ -147,11 +134,6 @@ static bool __ttm_write_lock(struct ttm_lock *lock)
bool locked = false; bool locked = false;
spin_lock(&lock->lock); spin_lock(&lock->lock);
if (unlikely(lock->kill_takers)) {
send_sig(lock->signal, current, 0);
spin_unlock(&lock->lock);
return false;
}
if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) { if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
lock->rw = -1; lock->rw = -1;
lock->flags &= ~TTM_WRITE_LOCK_PENDING; lock->flags &= ~TTM_WRITE_LOCK_PENDING;
...@@ -182,88 +164,6 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible) ...@@ -182,88 +164,6 @@ int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
return ret; return ret;
} }
static int __ttm_vt_unlock(struct ttm_lock *lock)
{
int ret = 0;
spin_lock(&lock->lock);
if (unlikely(!(lock->flags & TTM_VT_LOCK)))
ret = -EINVAL;
lock->flags &= ~TTM_VT_LOCK;
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
return ret;
}
static void ttm_vt_lock_remove(struct ttm_base_object **p_base)
{
struct ttm_base_object *base = *p_base;
struct ttm_lock *lock = container_of(base, struct ttm_lock, base);
int ret;
*p_base = NULL;
ret = __ttm_vt_unlock(lock);
BUG_ON(ret != 0);
}
static bool __ttm_vt_lock(struct ttm_lock *lock)
{
bool locked = false;
spin_lock(&lock->lock);
if (lock->rw == 0) {
lock->flags &= ~TTM_VT_LOCK_PENDING;
lock->flags |= TTM_VT_LOCK;
locked = true;
} else {
lock->flags |= TTM_VT_LOCK_PENDING;
}
spin_unlock(&lock->lock);
return locked;
}
int ttm_vt_lock(struct ttm_lock *lock,
bool interruptible,
struct ttm_object_file *tfile)
{
int ret = 0;
if (interruptible) {
ret = wait_event_interruptible(lock->queue,
__ttm_vt_lock(lock));
if (unlikely(ret != 0)) {
spin_lock(&lock->lock);
lock->flags &= ~TTM_VT_LOCK_PENDING;
wake_up_all(&lock->queue);
spin_unlock(&lock->lock);
return ret;
}
} else
wait_event(lock->queue, __ttm_vt_lock(lock));
/*
* Add a base-object, the destructor of which will
* make sure the lock is released if the client dies
* while holding it.
*/
ret = ttm_base_object_init(tfile, &lock->base, false,
ttm_lock_type, &ttm_vt_lock_remove, NULL);
if (ret)
(void)__ttm_vt_unlock(lock);
else
lock->vt_holder = tfile;
return ret;
}
int ttm_vt_unlock(struct ttm_lock *lock)
{
return ttm_ref_object_base_unref(lock->vt_holder,
lock->base.handle, TTM_REF_USAGE);
}
void ttm_suspend_unlock(struct ttm_lock *lock) void ttm_suspend_unlock(struct ttm_lock *lock)
{ {
spin_lock(&lock->lock); spin_lock(&lock->lock);
......
...@@ -63,8 +63,6 @@ ...@@ -63,8 +63,6 @@
* @lock: Spinlock protecting some lock members. * @lock: Spinlock protecting some lock members.
* @rw: Read-write lock counter. Protected by @lock. * @rw: Read-write lock counter. Protected by @lock.
* @flags: Lock state. Protected by @lock. * @flags: Lock state. Protected by @lock.
* @kill_takers: Boolean whether to kill takers of the lock.
* @signal: Signal to send when kill_takers is true.
*/ */
struct ttm_lock { struct ttm_lock {
...@@ -73,9 +71,6 @@ struct ttm_lock { ...@@ -73,9 +71,6 @@ struct ttm_lock {
spinlock_t lock; spinlock_t lock;
int32_t rw; int32_t rw;
uint32_t flags; uint32_t flags;
bool kill_takers;
int signal;
struct ttm_object_file *vt_holder;
}; };
...@@ -220,29 +215,4 @@ extern void ttm_write_unlock(struct ttm_lock *lock); ...@@ -220,29 +215,4 @@ extern void ttm_write_unlock(struct ttm_lock *lock);
*/ */
extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible); extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
/**
* ttm_lock_set_kill
*
* @lock: Pointer to a struct ttm_lock
* @val: Boolean whether to kill processes taking the lock.
* @signal: Signal to send to the process taking the lock.
*
* The kill-when-taking-lock functionality is used to kill processes that keep
* on using the TTM functionality when its resources has been taken down, for
* example when the X server exits. A typical sequence would look like this:
* - X server takes lock in write mode.
* - ttm_lock_set_kill() is called with @val set to true.
* - As part of X server exit, TTM resources are taken down.
* - X server releases the lock on file release.
* - Another dri client wants to render, takes the lock and is killed.
*
*/
static inline void ttm_lock_set_kill(struct ttm_lock *lock, bool val,
int signal)
{
lock->kill_takers = val;
if (val)
lock->signal = signal;
}
#endif #endif
...@@ -254,7 +254,6 @@ static int vmw_restrict_dma_mask; ...@@ -254,7 +254,6 @@ static int vmw_restrict_dma_mask;
static int vmw_assume_16bpp; static int vmw_assume_16bpp;
static int vmw_probe(struct pci_dev *, const struct pci_device_id *); static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static void vmw_master_init(struct vmw_master *);
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr); void *ptr);
...@@ -762,10 +761,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -762,10 +761,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
DRM_INFO("MMIO at 0x%08x size is %u kiB\n", DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
dev_priv->mmio_start, dev_priv->mmio_size / 1024); dev_priv->mmio_start, dev_priv->mmio_size / 1024);
vmw_master_init(&dev_priv->fbdev_master);
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
dev_priv->active_master = &dev_priv->fbdev_master;
dev_priv->mmio_virt = memremap(dev_priv->mmio_start, dev_priv->mmio_virt = memremap(dev_priv->mmio_start,
dev_priv->mmio_size, MEMREMAP_WB); dev_priv->mmio_size, MEMREMAP_WB);
...@@ -1012,18 +1007,7 @@ static void vmw_driver_unload(struct drm_device *dev) ...@@ -1012,18 +1007,7 @@ static void vmw_driver_unload(struct drm_device *dev)
static void vmw_postclose(struct drm_device *dev, static void vmw_postclose(struct drm_device *dev,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct vmw_fpriv *vmw_fp; struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
vmw_fp = vmw_fpriv(file_priv);
if (vmw_fp->locked_master) {
struct vmw_master *vmaster =
vmw_master(vmw_fp->locked_master);
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
ttm_vt_unlock(&vmaster->lock);
drm_master_put(&vmw_fp->locked_master);
}
ttm_object_file_release(&vmw_fp->tfile); ttm_object_file_release(&vmw_fp->tfile);
kfree(vmw_fp); kfree(vmw_fp);
...@@ -1052,55 +1036,6 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) ...@@ -1052,55 +1036,6 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
return ret; return ret;
} }
static struct vmw_master *vmw_master_check(struct drm_device *dev,
struct drm_file *file_priv,
unsigned int flags)
{
int ret;
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
struct vmw_master *vmaster;
if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH))
return NULL;
ret = mutex_lock_interruptible(&dev->master_mutex);
if (unlikely(ret != 0))
return ERR_PTR(-ERESTARTSYS);
if (drm_is_current_master(file_priv)) {
mutex_unlock(&dev->master_mutex);
return NULL;
}
/*
* Check if we were previously master, but now dropped. In that
* case, allow at least render node functionality.
*/
if (vmw_fp->locked_master) {
mutex_unlock(&dev->master_mutex);
if (flags & DRM_RENDER_ALLOW)
return NULL;
DRM_ERROR("Dropped master trying to access ioctl that "
"requires authentication.\n");
return ERR_PTR(-EACCES);
}
mutex_unlock(&dev->master_mutex);
/*
* Take the TTM lock. Possibly sleep waiting for the authenticating
* master to become master again, or for a SIGTERM if the
* authenticating master exits.
*/
vmaster = vmw_master(file_priv->master);
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
vmaster = ERR_PTR(ret);
return vmaster;
}
static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg, unsigned long arg,
long (*ioctl_func)(struct file *, unsigned int, long (*ioctl_func)(struct file *, unsigned int,
...@@ -1109,7 +1044,6 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, ...@@ -1109,7 +1044,6 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
struct drm_file *file_priv = filp->private_data; struct drm_file *file_priv = filp->private_data;
struct drm_device *dev = file_priv->minor->dev; struct drm_device *dev = file_priv->minor->dev;
unsigned int nr = DRM_IOCTL_NR(cmd); unsigned int nr = DRM_IOCTL_NR(cmd);
struct vmw_master *vmaster;
unsigned int flags; unsigned int flags;
long ret; long ret;
...@@ -1145,21 +1079,7 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd, ...@@ -1145,21 +1079,7 @@ static long vmw_generic_ioctl(struct file *filp, unsigned int cmd,
} else if (!drm_ioctl_flags(nr, &flags)) } else if (!drm_ioctl_flags(nr, &flags))
return -EINVAL; return -EINVAL;
vmaster = vmw_master_check(dev, file_priv, flags); return ioctl_func(filp, cmd, arg);
if (IS_ERR(vmaster)) {
ret = PTR_ERR(vmaster);
if (ret != -ERESTARTSYS)
DRM_INFO("IOCTL ERROR Command %d, Error %ld.\n",
nr, ret);
return ret;
}
ret = ioctl_func(filp, cmd, arg);
if (vmaster)
ttm_read_unlock(&vmaster->lock);
return ret;
out_io_encoding: out_io_encoding:
DRM_ERROR("Invalid command format, ioctl %d\n", DRM_ERROR("Invalid command format, ioctl %d\n",
...@@ -1186,65 +1106,10 @@ static void vmw_lastclose(struct drm_device *dev) ...@@ -1186,65 +1106,10 @@ static void vmw_lastclose(struct drm_device *dev)
{ {
} }
static void vmw_master_init(struct vmw_master *vmaster)
{
ttm_lock_init(&vmaster->lock);
}
static int vmw_master_create(struct drm_device *dev,
struct drm_master *master)
{
struct vmw_master *vmaster;
vmaster = kzalloc(sizeof(*vmaster), GFP_KERNEL);
if (unlikely(!vmaster))
return -ENOMEM;
vmw_master_init(vmaster);
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
master->driver_priv = vmaster;
return 0;
}
static void vmw_master_destroy(struct drm_device *dev,
struct drm_master *master)
{
struct vmw_master *vmaster = vmw_master(master);
master->driver_priv = NULL;
kfree(vmaster);
}
static int vmw_master_set(struct drm_device *dev, static int vmw_master_set(struct drm_device *dev,
struct drm_file *file_priv, struct drm_file *file_priv,
bool from_open) bool from_open)
{ {
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
struct vmw_master *active = dev_priv->active_master;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret = 0;
if (active) {
BUG_ON(active != &dev_priv->fbdev_master);
ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
if (unlikely(ret != 0))
return ret;
ttm_lock_set_kill(&active->lock, true, SIGTERM);
dev_priv->active_master = NULL;
}
ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
if (!from_open) {
ttm_vt_unlock(&vmaster->lock);
BUG_ON(vmw_fp->locked_master != file_priv->master);
drm_master_put(&vmw_fp->locked_master);
}
dev_priv->active_master = vmaster;
/* /*
* Inform a new master that the layout may have changed while * Inform a new master that the layout may have changed while
* it was gone. * it was gone.
...@@ -1259,31 +1124,10 @@ static void vmw_master_drop(struct drm_device *dev, ...@@ -1259,31 +1124,10 @@ static void vmw_master_drop(struct drm_device *dev,
struct drm_file *file_priv) struct drm_file *file_priv)
{ {
struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
/**
* Make sure the master doesn't disappear while we have
* it locked.
*/
vmw_fp->locked_master = drm_master_get(file_priv->master);
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
vmw_kms_legacy_hotspot_clear(dev_priv); vmw_kms_legacy_hotspot_clear(dev_priv);
if (unlikely((ret != 0))) {
DRM_ERROR("Unable to lock TTM at VT switch.\n");
drm_master_put(&vmw_fp->locked_master);
}
ttm_lock_set_kill(&vmaster->lock, false, SIGTERM);
if (!dev_priv->enable_fb) if (!dev_priv->enable_fb)
vmw_svga_disable(dev_priv); vmw_svga_disable(dev_priv);
dev_priv->active_master = &dev_priv->fbdev_master;
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
ttm_vt_unlock(&dev_priv->fbdev_master.lock);
} }
/** /**
...@@ -1562,8 +1406,6 @@ static struct drm_driver driver = { ...@@ -1562,8 +1406,6 @@ static struct drm_driver driver = {
.disable_vblank = vmw_disable_vblank, .disable_vblank = vmw_disable_vblank,
.ioctls = vmw_ioctls, .ioctls = vmw_ioctls,
.num_ioctls = ARRAY_SIZE(vmw_ioctls), .num_ioctls = ARRAY_SIZE(vmw_ioctls),
.master_create = vmw_master_create,
.master_destroy = vmw_master_destroy,
.master_set = vmw_master_set, .master_set = vmw_master_set,
.master_drop = vmw_master_drop, .master_drop = vmw_master_drop,
.open = vmw_driver_open, .open = vmw_driver_open,
......
...@@ -81,7 +81,6 @@ ...@@ -81,7 +81,6 @@
#define VMW_RES_SHADER ttm_driver_type4 #define VMW_RES_SHADER ttm_driver_type4
struct vmw_fpriv { struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile; struct ttm_object_file *tfile;
bool gb_aware; /* user-space is guest-backed aware */ bool gb_aware; /* user-space is guest-backed aware */
}; };
...@@ -393,10 +392,6 @@ struct vmw_sw_context{ ...@@ -393,10 +392,6 @@ struct vmw_sw_context{
struct vmw_legacy_display; struct vmw_legacy_display;
struct vmw_overlay; struct vmw_overlay;
struct vmw_master {
struct ttm_lock lock;
};
struct vmw_vga_topology_state { struct vmw_vga_topology_state {
uint32_t width; uint32_t width;
uint32_t height; uint32_t height;
...@@ -559,11 +554,8 @@ struct vmw_private { ...@@ -559,11 +554,8 @@ struct vmw_private {
spinlock_t svga_lock; spinlock_t svga_lock;
/** /**
* Master management. * PM management.
*/ */
struct vmw_master *active_master;
struct vmw_master fbdev_master;
struct notifier_block pm_nb; struct notifier_block pm_nb;
bool refuse_hibernation; bool refuse_hibernation;
bool suspend_locked; bool suspend_locked;
...@@ -632,11 +624,6 @@ static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv) ...@@ -632,11 +624,6 @@ static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
return (struct vmw_fpriv *)file_priv->driver_priv; return (struct vmw_fpriv *)file_priv->driver_priv;
} }
static inline struct vmw_master *vmw_master(struct drm_master *master)
{
return (struct vmw_master *) master->driver_priv;
}
/* /*
* The locking here is fine-grained, so that it is performed once * The locking here is fine-grained, so that it is performed once
* for every read- and write operation. This is of course costly, but we * for every read- and write operation. This is of course costly, but we
...@@ -1102,7 +1089,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf, ...@@ -1102,7 +1089,6 @@ void vmw_kms_cursor_snoop(struct vmw_surface *srf,
int vmw_kms_write_svga(struct vmw_private *vmw_priv, int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch, unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth); unsigned bpp, unsigned depth);
void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv, bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch, uint32_t pitch,
uint32_t height); uint32_t height);
......
...@@ -946,12 +946,6 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, ...@@ -946,12 +946,6 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv,
if (unlikely(drm_is_render_client(file_priv))) if (unlikely(drm_is_render_client(file_priv)))
require_exist = true; require_exist = true;
if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) {
DRM_ERROR("Locked master refused legacy "
"surface reference.\n");
return -EACCES;
}
handle = u_handle; handle = u_handle;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment