Commit 0a3579e3 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'vmwgfx-fixes-4.3-150924' of git://people.freedesktop.org/~thomash/linux into drm-fixes

Pull request of 2015-09-24

Vmwgfx fixes for 4.3:
 - A couple of uninitialized variable fixes by Christian Engelmayer
 - A TTM fix for a bug that causes problems with the new vmwgfx device init
 - A vmwgfx refcounting fix
 - A vmwgfx iomem caching fix
 - A DRM change to allow also control clients to read the drm driver version.

* tag 'vmwgfx-fixes-4.3-150924' of git://people.freedesktop.org/~thomash/linux:
  drm: Allow also control clients to check the drm version
  drm/vmwgfx: Fix uninitialized return in vmw_kms_helper_dirty()
  drm/vmwgfx: Fix uninitialized return in vmw_cotable_unbind()
  drm/vmwgfx: Only build on X86
  drm/ttm: Fix memory space allocation v2
  drm/vmwgfx: Map the fifo as cached
  drm/vmwgfx: Fix up user_dmabuf refcounting
parents e4b35f95 30c64664
...@@ -520,7 +520,8 @@ EXPORT_SYMBOL(drm_ioctl_permit); ...@@ -520,7 +520,8 @@ EXPORT_SYMBOL(drm_ioctl_permit);
/** Ioctl table */ /** Ioctl table */
static const struct drm_ioctl_desc drm_ioctls[] = { static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF(DRM_IOCTL_VERSION, drm_version,
DRM_UNLOCKED|DRM_RENDER_ALLOW|DRM_CONTROL_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_UNIQUE, drm_getunique, 0),
DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0), DRM_IOCTL_DEF(DRM_IOCTL_GET_MAGIC, drm_getmagic, 0),
DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF(DRM_IOCTL_IRQ_BUSID, drm_irq_by_busid, DRM_MASTER|DRM_ROOT_ONLY),
......
...@@ -882,6 +882,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -882,6 +882,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (ret) if (ret)
return ret; return ret;
man = &bdev->man[mem_type]; man = &bdev->man[mem_type];
if (!man->has_type || !man->use_type)
continue;
type_ok = ttm_bo_mt_compatible(man, mem_type, place, type_ok = ttm_bo_mt_compatible(man, mem_type, place,
&cur_flags); &cur_flags);
...@@ -889,6 +891,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -889,6 +891,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (!type_ok) if (!type_ok)
continue; continue;
type_found = true;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags); cur_flags);
/* /*
...@@ -901,12 +904,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -901,12 +904,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (mem_type == TTM_PL_SYSTEM) if (mem_type == TTM_PL_SYSTEM)
break; break;
if (man->has_type && man->use_type) { ret = (*man->func->get_node)(man, bo, place, mem);
type_found = true; if (unlikely(ret))
ret = (*man->func->get_node)(man, bo, place, mem); return ret;
if (unlikely(ret))
return ret;
}
if (mem->mm_node) if (mem->mm_node)
break; break;
} }
...@@ -917,9 +918,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -917,9 +918,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
return 0; return 0;
} }
if (!type_found)
return -EINVAL;
for (i = 0; i < placement->num_busy_placement; ++i) { for (i = 0; i < placement->num_busy_placement; ++i) {
const struct ttm_place *place = &placement->busy_placement[i]; const struct ttm_place *place = &placement->busy_placement[i];
...@@ -927,11 +925,12 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -927,11 +925,12 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (ret) if (ret)
return ret; return ret;
man = &bdev->man[mem_type]; man = &bdev->man[mem_type];
if (!man->has_type) if (!man->has_type || !man->use_type)
continue; continue;
if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags)) if (!ttm_bo_mt_compatible(man, mem_type, place, &cur_flags))
continue; continue;
type_found = true;
cur_flags = ttm_bo_select_caching(man, bo->mem.placement, cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
cur_flags); cur_flags);
/* /*
...@@ -957,8 +956,13 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ...@@ -957,8 +956,13 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
if (ret == -ERESTARTSYS) if (ret == -ERESTARTSYS)
has_erestartsys = true; has_erestartsys = true;
} }
ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
return ret; if (!type_found) {
printk(KERN_ERR TTM_PFX "No compatible memory type found.\n");
return -EINVAL;
}
return (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
} }
EXPORT_SYMBOL(ttm_bo_mem_space); EXPORT_SYMBOL(ttm_bo_mem_space);
......
config DRM_VMWGFX config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU" tristate "DRM driver for VMware Virtual GPU"
depends on DRM && PCI depends on DRM && PCI && X86
select FB_DEFERRED_IO select FB_DEFERRED_IO
select FB_CFB_FILLRECT select FB_CFB_FILLRECT
select FB_CFB_COPYAREA select FB_CFB_COPYAREA
......
...@@ -311,7 +311,6 @@ static int vmw_cotable_unbind(struct vmw_resource *res, ...@@ -311,7 +311,6 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
struct vmw_private *dev_priv = res->dev_priv; struct vmw_private *dev_priv = res->dev_priv;
struct ttm_buffer_object *bo = val_buf->bo; struct ttm_buffer_object *bo = val_buf->bo;
struct vmw_fence_obj *fence; struct vmw_fence_obj *fence;
int ret;
if (list_empty(&res->mob_head)) if (list_empty(&res->mob_head))
return 0; return 0;
...@@ -328,7 +327,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res, ...@@ -328,7 +327,7 @@ static int vmw_cotable_unbind(struct vmw_resource *res,
if (likely(fence != NULL)) if (likely(fence != NULL))
vmw_fence_obj_unreference(&fence); vmw_fence_obj_unreference(&fence);
return ret; return 0;
} }
/** /**
......
...@@ -752,12 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -752,12 +752,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
dev_priv->active_master = &dev_priv->fbdev_master; dev_priv->active_master = &dev_priv->fbdev_master;
dev_priv->mmio_virt = ioremap_cache(dev_priv->mmio_start,
dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, dev_priv->mmio_size);
dev_priv->mmio_size);
dev_priv->mmio_virt = ioremap_wc(dev_priv->mmio_start,
dev_priv->mmio_size);
if (unlikely(dev_priv->mmio_virt == NULL)) { if (unlikely(dev_priv->mmio_virt == NULL)) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -913,7 +909,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -913,7 +909,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
out_err4: out_err4:
iounmap(dev_priv->mmio_virt); iounmap(dev_priv->mmio_virt);
out_err3: out_err3:
arch_phys_wc_del(dev_priv->mmio_mtrr);
vmw_ttm_global_release(dev_priv); vmw_ttm_global_release(dev_priv);
out_err0: out_err0:
for (i = vmw_res_context; i < vmw_res_max; ++i) for (i = vmw_res_context; i < vmw_res_max; ++i)
...@@ -964,7 +959,6 @@ static int vmw_driver_unload(struct drm_device *dev) ...@@ -964,7 +959,6 @@ static int vmw_driver_unload(struct drm_device *dev)
ttm_object_device_release(&dev_priv->tdev); ttm_object_device_release(&dev_priv->tdev);
iounmap(dev_priv->mmio_virt); iounmap(dev_priv->mmio_virt);
arch_phys_wc_del(dev_priv->mmio_mtrr);
if (dev_priv->ctx.staged_bindings) if (dev_priv->ctx.staged_bindings)
vmw_binding_state_free(dev_priv->ctx.staged_bindings); vmw_binding_state_free(dev_priv->ctx.staged_bindings);
vmw_ttm_global_release(dev_priv); vmw_ttm_global_release(dev_priv);
......
...@@ -376,7 +376,6 @@ struct vmw_private { ...@@ -376,7 +376,6 @@ struct vmw_private {
uint32_t initial_width; uint32_t initial_width;
uint32_t initial_height; uint32_t initial_height;
u32 __iomem *mmio_virt; u32 __iomem *mmio_virt;
int mmio_mtrr;
uint32_t capabilities; uint32_t capabilities;
uint32_t max_gmr_ids; uint32_t max_gmr_ids;
uint32_t max_gmr_pages; uint32_t max_gmr_pages;
...@@ -631,7 +630,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, ...@@ -631,7 +630,8 @@ extern int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
uint32_t size, uint32_t size,
bool shareable, bool shareable,
uint32_t *handle, uint32_t *handle,
struct vmw_dma_buffer **p_dma_buf); struct vmw_dma_buffer **p_dma_buf,
struct ttm_base_object **p_base);
extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, extern int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
struct vmw_dma_buffer *dma_buf, struct vmw_dma_buffer *dma_buf,
uint32_t *handle); uint32_t *handle);
...@@ -645,7 +645,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo, ...@@ -645,7 +645,8 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
uint32_t cur_validate_node); uint32_t cur_validate_node);
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo); extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t id, struct vmw_dma_buffer **out); uint32_t id, struct vmw_dma_buffer **out,
struct ttm_base_object **base);
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data, extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
......
...@@ -1236,7 +1236,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ...@@ -1236,7 +1236,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
NULL);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n"); DRM_ERROR("Could not find or use MOB buffer.\n");
ret = -EINVAL; ret = -EINVAL;
...@@ -1296,7 +1297,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ...@@ -1296,7 +1297,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc; struct vmw_relocation *reloc;
int ret; int ret;
ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo,
NULL);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n"); DRM_ERROR("Could not find or use GMR region.\n");
ret = -EINVAL; ret = -EINVAL;
......
...@@ -1685,7 +1685,6 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, ...@@ -1685,7 +1685,6 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
struct drm_crtc *crtc; struct drm_crtc *crtc;
u32 num_units = 0; u32 num_units = 0;
u32 i, k; u32 i, k;
int ret;
dirty->dev_priv = dev_priv; dirty->dev_priv = dev_priv;
...@@ -1711,7 +1710,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv, ...@@ -1711,7 +1710,7 @@ int vmw_kms_helper_dirty(struct vmw_private *dev_priv,
if (!dirty->cmd) { if (!dirty->cmd) {
DRM_ERROR("Couldn't reserve fifo space " DRM_ERROR("Couldn't reserve fifo space "
"for dirty blits.\n"); "for dirty blits.\n");
return ret; return -ENOMEM;
} }
memset(dirty->cmd, 0, dirty->fifo_reserve_size); memset(dirty->cmd, 0, dirty->fifo_reserve_size);
} }
......
...@@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data, ...@@ -484,7 +484,7 @@ int vmw_overlay_ioctl(struct drm_device *dev, void *data,
goto out_unlock; goto out_unlock;
} }
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf); ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
......
...@@ -354,7 +354,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, ...@@ -354,7 +354,7 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv,
} }
*out_surf = NULL; *out_surf = NULL;
ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf); ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf, NULL);
return ret; return ret;
} }
...@@ -481,7 +481,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, ...@@ -481,7 +481,8 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
uint32_t size, uint32_t size,
bool shareable, bool shareable,
uint32_t *handle, uint32_t *handle,
struct vmw_dma_buffer **p_dma_buf) struct vmw_dma_buffer **p_dma_buf,
struct ttm_base_object **p_base)
{ {
struct vmw_user_dma_buffer *user_bo; struct vmw_user_dma_buffer *user_bo;
struct ttm_buffer_object *tmp; struct ttm_buffer_object *tmp;
...@@ -515,6 +516,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, ...@@ -515,6 +516,10 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
} }
*p_dma_buf = &user_bo->dma; *p_dma_buf = &user_bo->dma;
if (p_base) {
*p_base = &user_bo->prime.base;
kref_get(&(*p_base)->refcount);
}
*handle = user_bo->prime.base.hash.key; *handle = user_bo->prime.base.hash.key;
out_no_base_object: out_no_base_object:
...@@ -631,6 +636,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, ...@@ -631,6 +636,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
struct vmw_dma_buffer *dma_buf; struct vmw_dma_buffer *dma_buf;
struct vmw_user_dma_buffer *user_bo; struct vmw_user_dma_buffer *user_bo;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct ttm_base_object *buffer_base;
int ret; int ret;
if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
...@@ -643,7 +649,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, ...@@ -643,7 +649,8 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
switch (arg->op) { switch (arg->op) {
case drm_vmw_synccpu_grab: case drm_vmw_synccpu_grab:
ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf); ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf,
&buffer_base);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
return ret; return ret;
...@@ -651,6 +658,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, ...@@ -651,6 +658,7 @@ int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
dma); dma);
ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
vmw_dmabuf_unreference(&dma_buf); vmw_dmabuf_unreference(&dma_buf);
ttm_base_object_unref(&buffer_base);
if (unlikely(ret != 0 && ret != -ERESTARTSYS && if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
ret != -EBUSY)) { ret != -EBUSY)) {
DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
...@@ -692,7 +700,8 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, ...@@ -692,7 +700,8 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
return ret; return ret;
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
req->size, false, &handle, &dma_buf); req->size, false, &handle, &dma_buf,
NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_dmabuf; goto out_no_dmabuf;
...@@ -721,7 +730,8 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data, ...@@ -721,7 +730,8 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
} }
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t handle, struct vmw_dma_buffer **out) uint32_t handle, struct vmw_dma_buffer **out,
struct ttm_base_object **p_base)
{ {
struct vmw_user_dma_buffer *vmw_user_bo; struct vmw_user_dma_buffer *vmw_user_bo;
struct ttm_base_object *base; struct ttm_base_object *base;
...@@ -743,7 +753,10 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, ...@@ -743,7 +753,10 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
prime.base); prime.base);
(void)ttm_bo_reference(&vmw_user_bo->dma.base); (void)ttm_bo_reference(&vmw_user_bo->dma.base);
ttm_base_object_unref(&base); if (p_base)
*p_base = base;
else
ttm_base_object_unref(&base);
*out = &vmw_user_bo->dma; *out = &vmw_user_bo->dma;
return 0; return 0;
...@@ -1004,7 +1017,7 @@ int vmw_dumb_create(struct drm_file *file_priv, ...@@ -1004,7 +1017,7 @@ int vmw_dumb_create(struct drm_file *file_priv,
ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
args->size, false, &args->handle, args->size, false, &args->handle,
&dma_buf); &dma_buf, NULL);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_dmabuf; goto out_no_dmabuf;
...@@ -1032,7 +1045,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv, ...@@ -1032,7 +1045,7 @@ int vmw_dumb_map_offset(struct drm_file *file_priv,
struct vmw_dma_buffer *out_buf; struct vmw_dma_buffer *out_buf;
int ret; int ret;
ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf); ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf, NULL);
if (ret != 0) if (ret != 0)
return -EINVAL; return -EINVAL;
......
...@@ -855,7 +855,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, ...@@ -855,7 +855,7 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
if (buffer_handle != SVGA3D_INVALID_ID) { if (buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_dmabuf_lookup(tfile, buffer_handle, ret = vmw_user_dmabuf_lookup(tfile, buffer_handle,
&buffer); &buffer, NULL);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Could not find buffer for shader " DRM_ERROR("Could not find buffer for shader "
"creation.\n"); "creation.\n");
......
...@@ -46,6 +46,7 @@ struct vmw_user_surface { ...@@ -46,6 +46,7 @@ struct vmw_user_surface {
struct vmw_surface srf; struct vmw_surface srf;
uint32_t size; uint32_t size;
struct drm_master *master; struct drm_master *master;
struct ttm_base_object *backup_base;
}; };
/** /**
...@@ -656,6 +657,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base) ...@@ -656,6 +657,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
struct vmw_resource *res = &user_srf->srf.res; struct vmw_resource *res = &user_srf->srf.res;
*p_base = NULL; *p_base = NULL;
ttm_base_object_unref(&user_srf->backup_base);
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
} }
...@@ -851,7 +853,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -851,7 +853,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
res->backup_size, res->backup_size,
true, true,
&backup_handle, &backup_handle,
&res->backup); &res->backup,
&user_srf->backup_base);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
goto out_unlock; goto out_unlock;
...@@ -1321,7 +1324,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -1321,7 +1324,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
if (req->buffer_handle != SVGA3D_INVALID_ID) { if (req->buffer_handle != SVGA3D_INVALID_ID) {
ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle,
&res->backup); &res->backup,
&user_srf->backup_base);
if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE <
res->backup_size) { res->backup_size) {
DRM_ERROR("Surface backup buffer is too small.\n"); DRM_ERROR("Surface backup buffer is too small.\n");
...@@ -1335,7 +1339,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, ...@@ -1335,7 +1339,8 @@ int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
req->drm_surface_flags & req->drm_surface_flags &
drm_vmw_surface_flag_shareable, drm_vmw_surface_flag_shareable,
&backup_handle, &backup_handle,
&res->backup); &res->backup,
&user_srf->backup_base);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
vmw_resource_unreference(&res); vmw_resource_unreference(&res);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment