Commit dda9cd9f authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
  drm: don't drop handle reference on unload
  drm/ttm: Fix two race conditions + fix busy codepaths
parents afe14746 dab8dcfa
...@@ -238,8 +238,8 @@ int intel_fbdev_destroy(struct drm_device *dev, ...@@ -238,8 +238,8 @@ int intel_fbdev_destroy(struct drm_device *dev,
drm_framebuffer_cleanup(&ifb->base); drm_framebuffer_cleanup(&ifb->base);
if (ifb->obj) { if (ifb->obj) {
drm_gem_object_handle_unreference(ifb->obj);
drm_gem_object_unreference(ifb->obj); drm_gem_object_unreference(ifb->obj);
ifb->obj = NULL;
} }
return 0; return 0;
......
...@@ -352,7 +352,6 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev) ...@@ -352,7 +352,6 @@ nouveau_fbcon_destroy(struct drm_device *dev, struct nouveau_fbdev *nfbdev)
if (nouveau_fb->nvbo) { if (nouveau_fb->nvbo) {
nouveau_bo_unmap(nouveau_fb->nvbo); nouveau_bo_unmap(nouveau_fb->nvbo);
drm_gem_object_handle_unreference_unlocked(nouveau_fb->nvbo->gem);
drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem); drm_gem_object_unreference_unlocked(nouveau_fb->nvbo->gem);
nouveau_fb->nvbo = NULL; nouveau_fb->nvbo = NULL;
} }
......
...@@ -79,7 +79,6 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan) ...@@ -79,7 +79,6 @@ nouveau_notifier_takedown_channel(struct nouveau_channel *chan)
mutex_lock(&dev->struct_mutex); mutex_lock(&dev->struct_mutex);
nouveau_bo_unpin(chan->notifier_bo); nouveau_bo_unpin(chan->notifier_bo);
mutex_unlock(&dev->struct_mutex); mutex_unlock(&dev->struct_mutex);
drm_gem_object_handle_unreference_unlocked(chan->notifier_bo->gem);
drm_gem_object_unreference_unlocked(chan->notifier_bo->gem); drm_gem_object_unreference_unlocked(chan->notifier_bo->gem);
drm_mm_takedown(&chan->notifier_heap); drm_mm_takedown(&chan->notifier_heap);
} }
......
...@@ -97,7 +97,6 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) ...@@ -97,7 +97,6 @@ static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj)
radeon_bo_unpin(rbo); radeon_bo_unpin(rbo);
radeon_bo_unreserve(rbo); radeon_bo_unreserve(rbo);
} }
drm_gem_object_handle_unreference(gobj);
drm_gem_object_unreference_unlocked(gobj); drm_gem_object_unreference_unlocked(gobj);
} }
......
...@@ -441,6 +441,43 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ...@@ -441,6 +441,43 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
return ret; return ret;
} }
/**
* Call bo::reserved and with the lru lock held.
* Will release GPU memory type usage on destruction.
* This is the place to put in driver specific hooks.
* Will release the bo::reserved lock and the
* lru lock on exit.
*/
static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
{
struct ttm_bo_global *glob = bo->glob;
if (bo->ttm) {
/**
* Release the lru_lock, since we don't want to have
* an atomic requirement on ttm_tt[unbind|destroy].
*/
spin_unlock(&glob->lru_lock);
ttm_tt_unbind(bo->ttm);
ttm_tt_destroy(bo->ttm);
bo->ttm = NULL;
spin_lock(&glob->lru_lock);
}
if (bo->mem.mm_node) {
drm_mm_put_block(bo->mem.mm_node);
bo->mem.mm_node = NULL;
}
atomic_set(&bo->reserved, 0);
wake_up_all(&bo->event_queue);
spin_unlock(&glob->lru_lock);
}
/** /**
* If bo idle, remove from delayed- and lru lists, and unref. * If bo idle, remove from delayed- and lru lists, and unref.
* If not idle, and already on delayed list, do nothing. * If not idle, and already on delayed list, do nothing.
...@@ -456,6 +493,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) ...@@ -456,6 +493,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
int ret; int ret;
spin_lock(&bo->lock); spin_lock(&bo->lock);
retry:
(void) ttm_bo_wait(bo, false, false, !remove_all); (void) ttm_bo_wait(bo, false, false, !remove_all);
if (!bo->sync_obj) { if (!bo->sync_obj) {
...@@ -464,31 +502,52 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all) ...@@ -464,31 +502,52 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
spin_unlock(&bo->lock); spin_unlock(&bo->lock);
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
put_count = ttm_bo_del_from_lru(bo); ret = ttm_bo_reserve_locked(bo, false, !remove_all, false, 0);
ret = ttm_bo_reserve_locked(bo, false, false, false, 0); /**
BUG_ON(ret); * Someone else has the object reserved. Bail and retry.
if (bo->ttm) */
ttm_tt_unbind(bo->ttm);
if (unlikely(ret == -EBUSY)) {
spin_unlock(&glob->lru_lock);
spin_lock(&bo->lock);
goto requeue;
}
/**
* We can re-check for sync object without taking
* the bo::lock since setting the sync object requires
* also bo::reserved. A busy object at this point may
* be caused by another thread starting an accelerated
* eviction.
*/
if (unlikely(bo->sync_obj)) {
atomic_set(&bo->reserved, 0);
wake_up_all(&bo->event_queue);
spin_unlock(&glob->lru_lock);
spin_lock(&bo->lock);
if (remove_all)
goto retry;
else
goto requeue;
}
put_count = ttm_bo_del_from_lru(bo);
if (!list_empty(&bo->ddestroy)) { if (!list_empty(&bo->ddestroy)) {
list_del_init(&bo->ddestroy); list_del_init(&bo->ddestroy);
++put_count; ++put_count;
} }
if (bo->mem.mm_node) {
drm_mm_put_block(bo->mem.mm_node);
bo->mem.mm_node = NULL;
}
spin_unlock(&glob->lru_lock);
atomic_set(&bo->reserved, 0); ttm_bo_cleanup_memtype_use(bo);
while (put_count--) while (put_count--)
kref_put(&bo->list_kref, ttm_bo_ref_bug); kref_put(&bo->list_kref, ttm_bo_ref_bug);
return 0; return 0;
} }
requeue:
spin_lock(&glob->lru_lock); spin_lock(&glob->lru_lock);
if (list_empty(&bo->ddestroy)) { if (list_empty(&bo->ddestroy)) {
void *sync_obj = bo->sync_obj; void *sync_obj = bo->sync_obj;
......
...@@ -246,9 +246,11 @@ struct ttm_buffer_object { ...@@ -246,9 +246,11 @@ struct ttm_buffer_object {
atomic_t reserved; atomic_t reserved;
/** /**
* Members protected by the bo::lock * Members protected by the bo::lock
* In addition, setting sync_obj to anything else
* than NULL requires bo::reserved to be held. This allows for
* checking NULL while reserved but not holding bo::lock.
*/ */
void *sync_obj_arg; void *sync_obj_arg;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment