Commit 18ffe4b1 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
  vmwgfx: Fix fb VRAM pinning failure due to fragmentation
  vmwgfx: Remove initialisation of dev::devname
  vmwgfx: Enable use of the vblank system
  vmwgfx: vt-switch (master drop) fixes
  drm/vmwgfx: Fix breakage introduced by commit "drm: block userspace under allocating buffer and having drivers overwrite it (v2)"
  drm: Hold the mutex when dropping the last GEM reference (v2)
  drm/gem: handlecount isn't really a kref so don't make it one.
  drm: i810/i830: fix locked ioctl variant
  drm/radeon/kms: add quirk for MSI K9A2GM motherboard
  drm/radeon/kms: fix potential segfault in r600_ioctl_wait_idle
  drm: Prune GEM vma entries
  drm/radeon/kms: fix up encoder info messages for DFP6
  drm/radeon: fix PCI ID 5657 to be an RV410
parents b10c4d40 abb295f3
...@@ -148,7 +148,7 @@ int drm_gem_object_init(struct drm_device *dev, ...@@ -148,7 +148,7 @@ int drm_gem_object_init(struct drm_device *dev,
return -ENOMEM; return -ENOMEM;
kref_init(&obj->refcount); kref_init(&obj->refcount);
kref_init(&obj->handlecount); atomic_set(&obj->handle_count, 0);
obj->size = size; obj->size = size;
atomic_inc(&dev->object_count); atomic_inc(&dev->object_count);
...@@ -462,28 +462,6 @@ drm_gem_object_free(struct kref *kref) ...@@ -462,28 +462,6 @@ drm_gem_object_free(struct kref *kref)
} }
EXPORT_SYMBOL(drm_gem_object_free); EXPORT_SYMBOL(drm_gem_object_free);
/**
* Called after the last reference to the object has been lost.
* Must be called without holding struct_mutex
*
* Frees the object
*/
void
drm_gem_object_free_unlocked(struct kref *kref)
{
struct drm_gem_object *obj = (struct drm_gem_object *) kref;
struct drm_device *dev = obj->dev;
if (dev->driver->gem_free_object_unlocked != NULL)
dev->driver->gem_free_object_unlocked(obj);
else if (dev->driver->gem_free_object != NULL) {
mutex_lock(&dev->struct_mutex);
dev->driver->gem_free_object(obj);
mutex_unlock(&dev->struct_mutex);
}
}
EXPORT_SYMBOL(drm_gem_object_free_unlocked);
static void drm_gem_object_ref_bug(struct kref *list_kref) static void drm_gem_object_ref_bug(struct kref *list_kref)
{ {
BUG(); BUG();
...@@ -496,12 +474,8 @@ static void drm_gem_object_ref_bug(struct kref *list_kref) ...@@ -496,12 +474,8 @@ static void drm_gem_object_ref_bug(struct kref *list_kref)
* called before drm_gem_object_free or we'll be touching * called before drm_gem_object_free or we'll be touching
* freed memory * freed memory
*/ */
void void drm_gem_object_handle_free(struct drm_gem_object *obj)
drm_gem_object_handle_free(struct kref *kref)
{ {
struct drm_gem_object *obj = container_of(kref,
struct drm_gem_object,
handlecount);
struct drm_device *dev = obj->dev; struct drm_device *dev = obj->dev;
/* Remove any name for this object */ /* Remove any name for this object */
...@@ -528,6 +502,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma) ...@@ -528,6 +502,10 @@ void drm_gem_vm_open(struct vm_area_struct *vma)
struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_object *obj = vma->vm_private_data;
drm_gem_object_reference(obj); drm_gem_object_reference(obj);
mutex_lock(&obj->dev->struct_mutex);
drm_vm_open_locked(vma);
mutex_unlock(&obj->dev->struct_mutex);
} }
EXPORT_SYMBOL(drm_gem_vm_open); EXPORT_SYMBOL(drm_gem_vm_open);
...@@ -535,7 +513,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma) ...@@ -535,7 +513,10 @@ void drm_gem_vm_close(struct vm_area_struct *vma)
{ {
struct drm_gem_object *obj = vma->vm_private_data; struct drm_gem_object *obj = vma->vm_private_data;
drm_gem_object_unreference_unlocked(obj); mutex_lock(&obj->dev->struct_mutex);
drm_vm_close_locked(vma);
drm_gem_object_unreference(obj);
mutex_unlock(&obj->dev->struct_mutex);
} }
EXPORT_SYMBOL(drm_gem_vm_close); EXPORT_SYMBOL(drm_gem_vm_close);
......
...@@ -255,7 +255,7 @@ int drm_gem_one_name_info(int id, void *ptr, void *data) ...@@ -255,7 +255,7 @@ int drm_gem_one_name_info(int id, void *ptr, void *data)
seq_printf(m, "%6d %8zd %7d %8d\n", seq_printf(m, "%6d %8zd %7d %8d\n",
obj->name, obj->size, obj->name, obj->size,
atomic_read(&obj->handlecount.refcount), atomic_read(&obj->handle_count),
atomic_read(&obj->refcount.refcount)); atomic_read(&obj->refcount.refcount));
return 0; return 0;
} }
......
...@@ -433,15 +433,7 @@ static void drm_vm_open(struct vm_area_struct *vma) ...@@ -433,15 +433,7 @@ static void drm_vm_open(struct vm_area_struct *vma)
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
/** void drm_vm_close_locked(struct vm_area_struct *vma)
* \c close method for all virtual memory types.
*
* \param vma virtual memory area.
*
* Search the \p vma private data entry in drm_device::vmalist, unlink it, and
* free it.
*/
static void drm_vm_close(struct vm_area_struct *vma)
{ {
struct drm_file *priv = vma->vm_file->private_data; struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev; struct drm_device *dev = priv->minor->dev;
...@@ -451,7 +443,6 @@ static void drm_vm_close(struct vm_area_struct *vma) ...@@ -451,7 +443,6 @@ static void drm_vm_close(struct vm_area_struct *vma)
vma->vm_start, vma->vm_end - vma->vm_start); vma->vm_start, vma->vm_end - vma->vm_start);
atomic_dec(&dev->vma_count); atomic_dec(&dev->vma_count);
mutex_lock(&dev->struct_mutex);
list_for_each_entry_safe(pt, temp, &dev->vmalist, head) { list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
if (pt->vma == vma) { if (pt->vma == vma) {
list_del(&pt->head); list_del(&pt->head);
...@@ -459,6 +450,23 @@ static void drm_vm_close(struct vm_area_struct *vma) ...@@ -459,6 +450,23 @@ static void drm_vm_close(struct vm_area_struct *vma)
break; break;
} }
} }
}
/**
* \c close method for all virtual memory types.
*
* \param vma virtual memory area.
*
* Search the \p vma private data entry in drm_device::vmalist, unlink it, and
* free it.
*/
static void drm_vm_close(struct vm_area_struct *vma)
{
struct drm_file *priv = vma->vm_file->private_data;
struct drm_device *dev = priv->minor->dev;
mutex_lock(&dev->struct_mutex);
drm_vm_close_locked(vma);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
} }
......
...@@ -116,7 +116,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma) ...@@ -116,7 +116,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations i810_buffer_fops = { static const struct file_operations i810_buffer_fops = {
.open = drm_open, .open = drm_open,
.release = drm_release, .release = drm_release,
.unlocked_ioctl = drm_ioctl, .unlocked_ioctl = i810_ioctl,
.mmap = i810_mmap_buffers, .mmap = i810_mmap_buffers,
.fasync = drm_fasync, .fasync = drm_fasync,
}; };
......
...@@ -118,7 +118,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma) ...@@ -118,7 +118,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
static const struct file_operations i830_buffer_fops = { static const struct file_operations i830_buffer_fops = {
.open = drm_open, .open = drm_open,
.release = drm_release, .release = drm_release,
.unlocked_ioctl = drm_ioctl, .unlocked_ioctl = i830_ioctl,
.mmap = i830_mmap_buffers, .mmap = i830_mmap_buffers,
.fasync = drm_fasync, .fasync = drm_fasync,
}; };
......
...@@ -136,14 +136,12 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -136,14 +136,12 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
return -ENOMEM; return -ENOMEM;
ret = drm_gem_handle_create(file_priv, obj, &handle); ret = drm_gem_handle_create(file_priv, obj, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(obj);
if (ret) { if (ret) {
drm_gem_object_unreference_unlocked(obj);
return ret; return ret;
} }
/* Sink the floating reference from kref_init(handlecount) */
drm_gem_object_handle_unreference_unlocked(obj);
args->handle = handle; args->handle = handle;
return 0; return 0;
} }
......
...@@ -237,8 +237,10 @@ int intel_fbdev_destroy(struct drm_device *dev, ...@@ -237,8 +237,10 @@ int intel_fbdev_destroy(struct drm_device *dev,
drm_fb_helper_fini(&ifbdev->helper); drm_fb_helper_fini(&ifbdev->helper);
drm_framebuffer_cleanup(&ifb->base); drm_framebuffer_cleanup(&ifb->base);
if (ifb->obj) if (ifb->obj) {
drm_gem_object_handle_unreference(ifb->obj);
drm_gem_object_unreference(ifb->obj); drm_gem_object_unreference(ifb->obj);
}
return 0; return 0;
} }
......
...@@ -352,6 +352,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) ...@@ -352,6 +352,7 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
if (nouveau_fb->nvbo) { if (nouveau_fb->nvbo) {
nouveau_bo_unmap(nouveau_fb->nvbo); nouveau_bo_unmap(nouveau_fb->nvbo);
drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem);
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL; nouveau_fb->nvbo = NULL;
} }
......
...@@ -167,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, ...@@ -167,11 +167,9 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
goto out; goto out;
ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(nvbo->gem);
out: out:
drm_gem_object_handle_unreference_unlocked(nvbo->gem);
if (ret)
drm_gem_object_unreference_unlocked(nvbo->gem);
return ret; return ret;
} }
......
...@@ -79,6 +79,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) ...@@ -79,6 +79,7 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
nouveau_bo_unpin(chan->notifier_bo); nouveau_bo_unpin(chan->notifier_bo);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem);
drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
drm_mm_takedown(&chan->notifier_heap); drm_mm_takedown(&chan->notifier_heap);
} }
......
...@@ -3528,7 +3528,8 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo) ...@@ -3528,7 +3528,8 @@ void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo)
/* r7xx hw bug. write to HDP_DEBUG1 followed by fb read /* r7xx hw bug. write to HDP_DEBUG1 followed by fb read
* rather than write to HDP_REG_COHERENCY_FLUSH_CNTL * rather than write to HDP_REG_COHERENCY_FLUSH_CNTL
*/ */
if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740)) { if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_RV740) &&
rdev->vram_scratch.ptr) {
void __iomem *ptr = (void *)rdev->vram_scratch.ptr; void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
u32 tmp; u32 tmp;
......
...@@ -317,6 +317,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev, ...@@ -317,6 +317,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
*connector_type = DRM_MODE_CONNECTOR_DVID; *connector_type = DRM_MODE_CONNECTOR_DVID;
} }
/* MSI K9A2GM V2/V3 board has no HDMI or DVI */
if ((dev->pdev->device == 0x796e) &&
(dev->pdev->subsystem_vendor == 0x1462) &&
(dev->pdev->subsystem_device == 0x7302)) {
if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) ||
(supported_device == ATOM_DEVICE_DFP3_SUPPORT))
return false;
}
/* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */
if ((dev->pdev->device == 0x7941) && if ((dev->pdev->device == 0x7941) &&
(dev->pdev->subsystem_vendor == 0x147b) && (dev->pdev->subsystem_vendor == 0x147b) &&
......
...@@ -349,6 +349,8 @@ static void radeon_print_display_setup(struct drm_device *dev) ...@@ -349,6 +349,8 @@ static void radeon_print_display_setup(struct drm_device *dev)
DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]); DRM_INFO(" DFP4: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_DFP5_SUPPORT) if (devices & ATOM_DEVICE_DFP5_SUPPORT)
DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]); DRM_INFO(" DFP5: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_DFP6_SUPPORT)
DRM_INFO(" DFP6: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_TV1_SUPPORT) if (devices & ATOM_DEVICE_TV1_SUPPORT)
DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]); DRM_INFO(" TV1: %s\n", encoder_names[radeon_encoder->encoder_id]);
if (devices & ATOM_DEVICE_CV_SUPPORT) if (devices & ATOM_DEVICE_CV_SUPPORT)
...@@ -841,8 +843,9 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb) ...@@ -841,8 +843,9 @@ static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
{ {
struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb); struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
if (radeon_fb->obj) if (radeon_fb->obj) {
drm_gem_object_unreference_unlocked(radeon_fb->obj); drm_gem_object_unreference_unlocked(radeon_fb->obj);
}
drm_framebuffer_cleanup(fb); drm_framebuffer_cleanup(fb);
kfree(radeon_fb); kfree(radeon_fb);
} }
......
...@@ -94,8 +94,10 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) ...@@ -94,8 +94,10 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
ret = radeon_bo_reserve(rbo, false); ret = radeon_bo_reserve(rbo, false);
if (likely(ret == 0)) { if (likely(ret == 0)) {
radeon_bo_kunmap(rbo); radeon_bo_kunmap(rbo);
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo); radeon_bo_unreserve(rbo);
} }
drm_gem_object_handle_unreference(gobj);
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
} }
...@@ -325,8 +327,6 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb ...@@ -325,8 +327,6 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
{ {
struct fb_info *info; struct fb_info *info;
struct radeon_framebuffer *rfb = &rfbdev->rfb; struct radeon_framebuffer *rfb = &rfbdev->rfb;
struct radeon_bo *rbo;
int r;
if (rfbdev->helper.fbdev) { if (rfbdev->helper.fbdev) {
info = rfbdev->helper.fbdev; info = rfbdev->helper.fbdev;
...@@ -338,14 +338,8 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb ...@@ -338,14 +338,8 @@ static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfb
} }
if (rfb->obj) { if (rfb->obj) {
rbo = rfb->obj->driver_private; radeonfb_destroy_pinned_object(rfb->obj);
r = radeon_bo_reserve(rbo, false); rfb->obj = NULL;
if (likely(r == 0)) {
radeon_bo_kunmap(rbo);
radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo);
}
drm_gem_object_unreference_unlocked(rfb->obj);
} }
drm_fb_helper_fini(&rfbdev->helper); drm_fb_helper_fini(&rfbdev->helper);
drm_framebuffer_cleanup(&rfb->base); drm_framebuffer_cleanup(&rfb->base);
......
...@@ -201,11 +201,11 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data, ...@@ -201,11 +201,11 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
return r; return r;
} }
r = drm_gem_handle_create(filp, gobj, &handle); r = drm_gem_handle_create(filp, gobj, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj);
if (r) { if (r) {
drm_gem_object_unreference_unlocked(gobj);
return r; return r;
} }
drm_gem_object_handle_unreference_unlocked(gobj);
args->handle = handle; args->handle = handle;
return 0; return 0;
} }
......
...@@ -148,13 +148,16 @@ static struct pci_device_id vmw_pci_id_list[] = { ...@@ -148,13 +148,16 @@ static struct pci_device_id vmw_pci_id_list[] = {
{0, 0, 0} {0, 0, 0}
}; };
static char *vmw_devname = "vmwgfx"; static int enable_fbdev;
static int vmw_probe(struct pci_dev *, const struct pci_device_id *); static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
static void vmw_master_init(struct vmw_master *); static void vmw_master_init(struct vmw_master *);
static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val, static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
void *ptr); void *ptr);
MODULE_PARM_DESC(enable_fbdev, "Enable vmwgfx fbdev");
module_param_named(enable_fbdev, enable_fbdev, int, 0600);
static void vmw_print_capabilities(uint32_t capabilities) static void vmw_print_capabilities(uint32_t capabilities)
{ {
DRM_INFO("Capabilities:\n"); DRM_INFO("Capabilities:\n");
...@@ -192,8 +195,6 @@ static int vmw_request_device(struct vmw_private *dev_priv) ...@@ -192,8 +195,6 @@ static int vmw_request_device(struct vmw_private *dev_priv)
{ {
int ret; int ret;
vmw_kms_save_vga(dev_priv);
ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
if (unlikely(ret != 0)) { if (unlikely(ret != 0)) {
DRM_ERROR("Unable to initialize FIFO.\n"); DRM_ERROR("Unable to initialize FIFO.\n");
...@@ -206,9 +207,35 @@ static int vmw_request_device(struct vmw_private *dev_priv) ...@@ -206,9 +207,35 @@ static int vmw_request_device(struct vmw_private *dev_priv)
static void vmw_release_device(struct vmw_private *dev_priv) static void vmw_release_device(struct vmw_private *dev_priv)
{ {
vmw_fifo_release(dev_priv, &dev_priv->fifo); vmw_fifo_release(dev_priv, &dev_priv->fifo);
vmw_kms_restore_vga(dev_priv);
} }
int vmw_3d_resource_inc(struct vmw_private *dev_priv)
{
int ret = 0;
mutex_lock(&dev_priv->release_mutex);
if (unlikely(dev_priv->num_3d_resources++ == 0)) {
ret = vmw_request_device(dev_priv);
if (unlikely(ret != 0))
--dev_priv->num_3d_resources;
}
mutex_unlock(&dev_priv->release_mutex);
return ret;
}
void vmw_3d_resource_dec(struct vmw_private *dev_priv)
{
int32_t n3d;
mutex_lock(&dev_priv->release_mutex);
if (unlikely(--dev_priv->num_3d_resources == 0))
vmw_release_device(dev_priv);
n3d = (int32_t) dev_priv->num_3d_resources;
mutex_unlock(&dev_priv->release_mutex);
BUG_ON(n3d < 0);
}
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
{ {
...@@ -228,6 +255,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -228,6 +255,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->last_read_sequence = (uint32_t) -100; dev_priv->last_read_sequence = (uint32_t) -100;
mutex_init(&dev_priv->hw_mutex); mutex_init(&dev_priv->hw_mutex);
mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
rwlock_init(&dev_priv->resource_lock); rwlock_init(&dev_priv->resource_lock);
idr_init(&dev_priv->context_idr); idr_init(&dev_priv->context_idr);
idr_init(&dev_priv->surface_idr); idr_init(&dev_priv->surface_idr);
...@@ -244,6 +272,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -244,6 +272,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->vram_start = pci_resource_start(dev->pdev, 1); dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); dev_priv->mmio_start = pci_resource_start(dev->pdev, 2);
dev_priv->enable_fb = enable_fbdev;
mutex_lock(&dev_priv->hw_mutex); mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
...@@ -343,17 +373,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -343,17 +373,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev->dev_private = dev_priv; dev->dev_private = dev_priv;
if (!dev->devname)
dev->devname = vmw_devname;
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
ret = drm_irq_install(dev);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed installing irq: %d\n", ret);
goto out_no_irq;
}
}
ret = pci_request_regions(dev->pdev, "vmwgfx probe"); ret = pci_request_regions(dev->pdev, "vmwgfx probe");
dev_priv->stealth = (ret != 0); dev_priv->stealth = (ret != 0);
if (dev_priv->stealth) { if (dev_priv->stealth) {
...@@ -369,26 +388,52 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) ...@@ -369,26 +388,52 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
goto out_no_device; goto out_no_device;
} }
} }
ret = vmw_request_device(dev_priv); ret = vmw_kms_init(dev_priv);
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto out_no_device; goto out_no_kms;
vmw_kms_init(dev_priv);
vmw_overlay_init(dev_priv); vmw_overlay_init(dev_priv);
vmw_fb_init(dev_priv); if (dev_priv->enable_fb) {
ret = vmw_3d_resource_inc(dev_priv);
if (unlikely(ret != 0))
goto out_no_fifo;
vmw_kms_save_vga(dev_priv);
vmw_fb_init(dev_priv);
DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
"Detected device 3D availability.\n" :
"Detected no device 3D availability.\n");
} else {
DRM_INFO("Delayed 3D detection since we're not "
"running the device in SVGA mode yet.\n");
}
if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
ret = drm_irq_install(dev);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed installing irq: %d\n", ret);
goto out_no_irq;
}
}
dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier;
register_pm_notifier(&dev_priv->pm_nb); register_pm_notifier(&dev_priv->pm_nb);
DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ? "Have 3D\n" : "No 3D\n");
return 0; return 0;
out_no_device:
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
drm_irq_uninstall(dev_priv->dev);
if (dev->devname == vmw_devname)
dev->devname = NULL;
out_no_irq: out_no_irq:
if (dev_priv->enable_fb) {
vmw_fb_close(dev_priv);
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv);
}
out_no_fifo:
vmw_overlay_close(dev_priv);
vmw_kms_close(dev_priv);
out_no_kms:
if (dev_priv->stealth)
pci_release_region(dev->pdev, 2);
else
pci_release_regions(dev->pdev);
out_no_device:
ttm_object_device_release(&dev_priv->tdev); ttm_object_device_release(&dev_priv->tdev);
out_err4: out_err4:
iounmap(dev_priv->mmio_virt); iounmap(dev_priv->mmio_virt);
...@@ -415,19 +460,20 @@ static int vmw_driver_unload(struct drm_device *dev) ...@@ -415,19 +460,20 @@ static int vmw_driver_unload(struct drm_device *dev)
unregister_pm_notifier(&dev_priv->pm_nb); unregister_pm_notifier(&dev_priv->pm_nb);
vmw_fb_close(dev_priv); if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
drm_irq_uninstall(dev_priv->dev);
if (dev_priv->enable_fb) {
vmw_fb_close(dev_priv);
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv);
}
vmw_kms_close(dev_priv); vmw_kms_close(dev_priv);
vmw_overlay_close(dev_priv); vmw_overlay_close(dev_priv);
vmw_release_device(dev_priv);
if (dev_priv->stealth) if (dev_priv->stealth)
pci_release_region(dev->pdev, 2); pci_release_region(dev->pdev, 2);
else else
pci_release_regions(dev->pdev); pci_release_regions(dev->pdev);
if (dev_priv->capabilities & SVGA_CAP_IRQMASK)
drm_irq_uninstall(dev_priv->dev);
if (dev->devname == vmw_devname)
dev->devname = NULL;
ttm_object_device_release(&dev_priv->tdev); ttm_object_device_release(&dev_priv->tdev);
iounmap(dev_priv->mmio_virt); iounmap(dev_priv->mmio_virt);
drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start, drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
...@@ -500,7 +546,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, ...@@ -500,7 +546,7 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
struct drm_ioctl_desc *ioctl = struct drm_ioctl_desc *ioctl =
&vmw_ioctls[nr - DRM_COMMAND_BASE]; &vmw_ioctls[nr - DRM_COMMAND_BASE];
if (unlikely(ioctl->cmd != cmd)) { if (unlikely(ioctl->cmd_drv != cmd)) {
DRM_ERROR("Invalid command format, ioctl %d\n", DRM_ERROR("Invalid command format, ioctl %d\n",
nr - DRM_COMMAND_BASE); nr - DRM_COMMAND_BASE);
return -EINVAL; return -EINVAL;
...@@ -589,6 +635,16 @@ static int vmw_master_set(struct drm_device *dev, ...@@ -589,6 +635,16 @@ static int vmw_master_set(struct drm_device *dev,
struct vmw_master *vmaster = vmw_master(file_priv->master); struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret = 0; int ret = 0;
if (!dev_priv->enable_fb) {
ret = vmw_3d_resource_inc(dev_priv);
if (unlikely(ret != 0))
return ret;
vmw_kms_save_vga(dev_priv);
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 0);
mutex_unlock(&dev_priv->hw_mutex);
}
if (active) { if (active) {
BUG_ON(active != &dev_priv->fbdev_master); BUG_ON(active != &dev_priv->fbdev_master);
ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile);
...@@ -617,7 +673,13 @@ static int vmw_master_set(struct drm_device *dev, ...@@ -617,7 +673,13 @@ static int vmw_master_set(struct drm_device *dev,
return 0; return 0;
out_no_active_lock: out_no_active_lock:
vmw_release_device(dev_priv); if (!dev_priv->enable_fb) {
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv);
}
return ret; return ret;
} }
...@@ -645,11 +707,23 @@ static void vmw_master_drop(struct drm_device *dev, ...@@ -645,11 +707,23 @@ static void vmw_master_drop(struct drm_device *dev,
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM); ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
if (!dev_priv->enable_fb) {
ret = ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM);
if (unlikely(ret != 0))
DRM_ERROR("Unable to clean VRAM on master drop.\n");
mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
mutex_unlock(&dev_priv->hw_mutex);
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv);
}
dev_priv->active_master = &dev_priv->fbdev_master; dev_priv->active_master = &dev_priv->fbdev_master;
ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM);
ttm_vt_unlock(&dev_priv->fbdev_master.lock); ttm_vt_unlock(&dev_priv->fbdev_master.lock);
vmw_fb_on(dev_priv); if (dev_priv->enable_fb)
vmw_fb_on(dev_priv);
} }
...@@ -722,6 +796,7 @@ static struct drm_driver driver = { ...@@ -722,6 +796,7 @@ static struct drm_driver driver = {
.irq_postinstall = vmw_irq_postinstall, .irq_postinstall = vmw_irq_postinstall,
.irq_uninstall = vmw_irq_uninstall, .irq_uninstall = vmw_irq_uninstall,
.irq_handler = vmw_irq_handler, .irq_handler = vmw_irq_handler,
.get_vblank_counter = vmw_get_vblank_counter,
.reclaim_buffers_locked = NULL, .reclaim_buffers_locked = NULL,
.get_map_ofs = drm_core_get_map_ofs, .get_map_ofs = drm_core_get_map_ofs,
.get_reg_ofs = drm_core_get_reg_ofs, .get_reg_ofs = drm_core_get_reg_ofs,
......
...@@ -277,6 +277,7 @@ struct vmw_private { ...@@ -277,6 +277,7 @@ struct vmw_private {
bool stealth; bool stealth;
bool is_opened; bool is_opened;
bool enable_fb;
/** /**
* Master management. * Master management.
...@@ -285,6 +286,9 @@ struct vmw_private { ...@@ -285,6 +286,9 @@ struct vmw_private {
struct vmw_master *active_master; struct vmw_master *active_master;
struct vmw_master fbdev_master; struct vmw_master fbdev_master;
struct notifier_block pm_nb; struct notifier_block pm_nb;
struct mutex release_mutex;
uint32_t num_3d_resources;
}; };
static inline struct vmw_private *vmw_priv(struct drm_device *dev) static inline struct vmw_private *vmw_priv(struct drm_device *dev)
...@@ -319,6 +323,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv, ...@@ -319,6 +323,9 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
return val; return val;
} }
int vmw_3d_resource_inc(struct vmw_private *dev_priv);
void vmw_3d_resource_dec(struct vmw_private *dev_priv);
/** /**
* GMR utilities - vmwgfx_gmr.c * GMR utilities - vmwgfx_gmr.c
*/ */
...@@ -511,6 +518,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv, ...@@ -511,6 +518,7 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned bbp, unsigned depth); unsigned bbp, unsigned depth);
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv); struct drm_file *file_priv);
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
/** /**
* Overlay control - vmwgfx_overlay.c * Overlay control - vmwgfx_overlay.c
......
...@@ -615,6 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv, ...@@ -615,6 +615,11 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
if (unlikely(ret != 0)) if (unlikely(ret != 0))
goto err_unlock; goto err_unlock;
if (bo->mem.mem_type == TTM_PL_VRAM &&
bo->mem.mm_node->start < bo->num_pages)
(void) ttm_bo_validate(bo, &vmw_sys_placement, false,
false, false);
ret = ttm_bo_validate(bo, &ne_placement, false, false, false); ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
/* Could probably bug on */ /* Could probably bug on */
......
...@@ -106,6 +106,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) ...@@ -106,6 +106,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
mutex_lock(&dev_priv->hw_mutex); mutex_lock(&dev_priv->hw_mutex);
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
vmw_write(dev_priv, SVGA_REG_ENABLE, 1); vmw_write(dev_priv, SVGA_REG_ENABLE, 1);
min = 4; min = 4;
...@@ -175,6 +176,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) ...@@ -175,6 +176,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
dev_priv->config_done_state); dev_priv->config_done_state);
vmw_write(dev_priv, SVGA_REG_ENABLE, vmw_write(dev_priv, SVGA_REG_ENABLE,
dev_priv->enable_state); dev_priv->enable_state);
vmw_write(dev_priv, SVGA_REG_TRACES,
dev_priv->traces_state);
mutex_unlock(&dev_priv->hw_mutex); mutex_unlock(&dev_priv->hw_mutex);
vmw_fence_queue_takedown(&fifo->fence_queue); vmw_fence_queue_takedown(&fifo->fence_queue);
......
...@@ -898,7 +898,19 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv) ...@@ -898,7 +898,19 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH); save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT); save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
if (i == 0 && vmw_priv->num_displays == 1 &&
save->width == 0 && save->height == 0) {
/*
* It should be fairly safe to assume that these
* values are uninitialized.
*/
save->width = vmw_priv->vga_width - save->pos_x;
save->height = vmw_priv->vga_height - save->pos_y;
}
} }
return 0; return 0;
} }
...@@ -984,3 +996,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, ...@@ -984,3 +996,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
ttm_read_unlock(&vmaster->lock); ttm_read_unlock(&vmaster->lock);
return ret; return ret;
} }
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
{
return 0;
}
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
#include "vmwgfx_kms.h" #include "vmwgfx_kms.h"
#define VMWGFX_LDU_NUM_DU 8
#define vmw_crtc_to_ldu(x) \ #define vmw_crtc_to_ldu(x) \
container_of(x, struct vmw_legacy_display_unit, base.crtc) container_of(x, struct vmw_legacy_display_unit, base.crtc)
#define vmw_encoder_to_ldu(x) \ #define vmw_encoder_to_ldu(x) \
...@@ -536,6 +538,10 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) ...@@ -536,6 +538,10 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev;
int i;
int ret;
if (dev_priv->ldu_priv) { if (dev_priv->ldu_priv) {
DRM_INFO("ldu system already on\n"); DRM_INFO("ldu system already on\n");
return -EINVAL; return -EINVAL;
...@@ -553,23 +559,24 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv) ...@@ -553,23 +559,24 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
drm_mode_create_dirty_info_property(dev_priv->dev); drm_mode_create_dirty_info_property(dev_priv->dev);
vmw_ldu_init(dev_priv, 0);
/* for old hardware without multimon only enable one display */
if (dev_priv->capabilities & SVGA_CAP_MULTIMON) { if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
vmw_ldu_init(dev_priv, 1); for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i)
vmw_ldu_init(dev_priv, 2); vmw_ldu_init(dev_priv, i);
vmw_ldu_init(dev_priv, 3); ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU);
vmw_ldu_init(dev_priv, 4); } else {
vmw_ldu_init(dev_priv, 5); /* for old hardware without multimon only enable one display */
vmw_ldu_init(dev_priv, 6); vmw_ldu_init(dev_priv, 0);
vmw_ldu_init(dev_priv, 7); ret = drm_vblank_init(dev, 1);
} }
return 0; return ret;
} }
int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv) int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
{ {
struct drm_device *dev = dev_priv->dev;
drm_vblank_cleanup(dev);
if (!dev_priv->ldu_priv) if (!dev_priv->ldu_priv)
return -ENOSYS; return -ENOSYS;
......
...@@ -211,6 +211,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res) ...@@ -211,6 +211,7 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
cmd->body.cid = cpu_to_le32(res->id); cmd->body.cid = cpu_to_le32(res->id);
vmw_fifo_commit(dev_priv, sizeof(*cmd)); vmw_fifo_commit(dev_priv, sizeof(*cmd));
vmw_3d_resource_dec(dev_priv);
} }
static int vmw_context_init(struct vmw_private *dev_priv, static int vmw_context_init(struct vmw_private *dev_priv,
...@@ -247,6 +248,7 @@ static int vmw_context_init(struct vmw_private *dev_priv, ...@@ -247,6 +248,7 @@ static int vmw_context_init(struct vmw_private *dev_priv,
cmd->body.cid = cpu_to_le32(res->id); cmd->body.cid = cpu_to_le32(res->id);
vmw_fifo_commit(dev_priv, sizeof(*cmd)); vmw_fifo_commit(dev_priv, sizeof(*cmd));
(void) vmw_3d_resource_inc(dev_priv);
vmw_resource_activate(res, vmw_hw_context_destroy); vmw_resource_activate(res, vmw_hw_context_destroy);
return 0; return 0;
} }
...@@ -406,6 +408,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) ...@@ -406,6 +408,7 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)
cmd->body.sid = cpu_to_le32(res->id); cmd->body.sid = cpu_to_le32(res->id);
vmw_fifo_commit(dev_priv, sizeof(*cmd)); vmw_fifo_commit(dev_priv, sizeof(*cmd));
vmw_3d_resource_dec(dev_priv);
} }
void vmw_surface_res_free(struct vmw_resource *res) void vmw_surface_res_free(struct vmw_resource *res)
...@@ -473,6 +476,7 @@ int vmw_surface_init(struct vmw_private *dev_priv, ...@@ -473,6 +476,7 @@ int vmw_surface_init(struct vmw_private *dev_priv,
} }
vmw_fifo_commit(dev_priv, submit_size); vmw_fifo_commit(dev_priv, submit_size);
(void) vmw_3d_resource_inc(dev_priv);
vmw_resource_activate(res, vmw_hw_surface_destroy); vmw_resource_activate(res, vmw_hw_surface_destroy);
return 0; return 0;
} }
......
...@@ -612,7 +612,7 @@ struct drm_gem_object { ...@@ -612,7 +612,7 @@ struct drm_gem_object {
struct kref refcount; struct kref refcount;
/** Handle count of this object. Each handle also holds a reference */ /** Handle count of this object. Each handle also holds a reference */
struct kref handlecount; atomic_t handle_count; /* number of handles on this object */
/** Related drm device */ /** Related drm device */
struct drm_device *dev; struct drm_device *dev;
...@@ -808,7 +808,6 @@ struct drm_driver { ...@@ -808,7 +808,6 @@ struct drm_driver {
*/ */
int (*gem_init_object) (struct drm_gem_object *obj); int (*gem_init_object) (struct drm_gem_object *obj);
void (*gem_free_object) (struct drm_gem_object *obj); void (*gem_free_object) (struct drm_gem_object *obj);
void (*gem_free_object_unlocked) (struct drm_gem_object *obj);
/* vga arb irq handler */ /* vga arb irq handler */
void (*vgaarb_irq)(struct drm_device *dev, bool state); void (*vgaarb_irq)(struct drm_device *dev, bool state);
...@@ -1175,6 +1174,7 @@ extern int drm_release(struct inode *inode, struct file *filp); ...@@ -1175,6 +1174,7 @@ extern int drm_release(struct inode *inode, struct file *filp);
extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); extern int drm_mmap(struct file *filp, struct vm_area_struct *vma);
extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma);
extern void drm_vm_open_locked(struct vm_area_struct *vma); extern void drm_vm_open_locked(struct vm_area_struct *vma);
extern void drm_vm_close_locked(struct vm_area_struct *vma);
extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map); extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map);
extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev); extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev);
extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
...@@ -1455,12 +1455,11 @@ int drm_gem_init(struct drm_device *dev); ...@@ -1455,12 +1455,11 @@ int drm_gem_init(struct drm_device *dev);
void drm_gem_destroy(struct drm_device *dev); void drm_gem_destroy(struct drm_device *dev);
void drm_gem_object_release(struct drm_gem_object *obj); void drm_gem_object_release(struct drm_gem_object *obj);
void drm_gem_object_free(struct kref *kref); void drm_gem_object_free(struct kref *kref);
void drm_gem_object_free_unlocked(struct kref *kref);
struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
size_t size); size_t size);
int drm_gem_object_init(struct drm_device *dev, int drm_gem_object_init(struct drm_device *dev,
struct drm_gem_object *obj, size_t size); struct drm_gem_object *obj, size_t size);
void drm_gem_object_handle_free(struct kref *kref); void drm_gem_object_handle_free(struct drm_gem_object *obj);
void drm_gem_vm_open(struct vm_area_struct *vma); void drm_gem_vm_open(struct vm_area_struct *vma);
void drm_gem_vm_close(struct vm_area_struct *vma); void drm_gem_vm_close(struct vm_area_struct *vma);
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
...@@ -1483,8 +1482,12 @@ drm_gem_object_unreference(struct drm_gem_object *obj) ...@@ -1483,8 +1482,12 @@ drm_gem_object_unreference(struct drm_gem_object *obj)
static inline void static inline void
drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
{ {
if (obj != NULL) if (obj != NULL) {
kref_put(&obj->refcount, drm_gem_object_free_unlocked); struct drm_device *dev = obj->dev;
mutex_lock(&dev->struct_mutex);
kref_put(&obj->refcount, drm_gem_object_free);
mutex_unlock(&dev->struct_mutex);
}
} }
int drm_gem_handle_create(struct drm_file *file_priv, int drm_gem_handle_create(struct drm_file *file_priv,
...@@ -1495,7 +1498,7 @@ static inline void ...@@ -1495,7 +1498,7 @@ static inline void
drm_gem_object_handle_reference(struct drm_gem_object *obj) drm_gem_object_handle_reference(struct drm_gem_object *obj)
{ {
drm_gem_object_reference(obj); drm_gem_object_reference(obj);
kref_get(&obj->handlecount); atomic_inc(&obj->handle_count);
} }
static inline void static inline void
...@@ -1504,12 +1507,15 @@ drm_gem_object_handle_unreference(struct drm_gem_object *obj) ...@@ -1504,12 +1507,15 @@ drm_gem_object_handle_unreference(struct drm_gem_object *obj)
if (obj == NULL) if (obj == NULL)
return; return;
if (atomic_read(&obj->handle_count) == 0)
return;
/* /*
* Must bump handle count first as this may be the last * Must bump handle count first as this may be the last
* ref, in which case the object would disappear before we * ref, in which case the object would disappear before we
* checked for a name * checked for a name
*/ */
kref_put(&obj->handlecount, drm_gem_object_handle_free); if (atomic_dec_and_test(&obj->handle_count))
drm_gem_object_handle_free(obj);
drm_gem_object_unreference(obj); drm_gem_object_unreference(obj);
} }
...@@ -1519,12 +1525,17 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) ...@@ -1519,12 +1525,17 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
if (obj == NULL) if (obj == NULL)
return; return;
if (atomic_read(&obj->handle_count) == 0)
return;
/* /*
* Must bump handle count first as this may be the last * Must bump handle count first as this may be the last
* ref, in which case the object would disappear before we * ref, in which case the object would disappear before we
* checked for a name * checked for a name
*/ */
kref_put(&obj->handlecount, drm_gem_object_handle_free);
if (atomic_dec_and_test(&obj->handle_count))
drm_gem_object_handle_free(obj);
drm_gem_object_unreference_unlocked(obj); drm_gem_object_unreference_unlocked(obj);
} }
......
...@@ -85,7 +85,6 @@ ...@@ -85,7 +85,6 @@
{0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
{0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
{0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \
...@@ -103,6 +102,7 @@ ...@@ -103,6 +102,7 @@
{0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \
{0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \
{0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \
{0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment