Commit abd2f577 authored by Maarten Lankhorst's avatar Maarten Lankhorst Committed by Daniel Vetter

drm/i915: Flatten obj->mm.lock

With userptr fixed, there is no need for all separate lockdep classes
now, and we can remove all lockdep tricks used. A trylock in the
shrinker is all we need now to flatten the locking hierarchy.
Signed-off-by: default avatarMaarten Lankhorst <maarten.lankhorst@linux.intel.com>
Reviewed-by: default avatarThomas Hellström <thomas.hellstrom@linux.intel.com>
[danvet: Resolve conflict because we don't have the patch from Chris
to rebrand i915_gem_shrinker_taints_mutex to fs_reclaim_taints_mutex.
It's not a bad idea, but if we do it, it should be moved to the right
header. See
https://lore.kernel.org/intel-gfx/20210202154318.19246-1-chris@chris-wilson.co.uk/]
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20210323155059.628690-18-maarten.lankhorst@linux.intel.com
parent ed29c269
...@@ -62,7 +62,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, ...@@ -62,7 +62,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
const struct drm_i915_gem_object_ops *ops, const struct drm_i915_gem_object_ops *ops,
struct lock_class_key *key, unsigned flags) struct lock_class_key *key, unsigned flags)
{ {
__mutex_init(&obj->mm.lock, ops->name ?: "obj->mm.lock", key); mutex_init(&obj->mm.lock);
spin_lock_init(&obj->vma.lock); spin_lock_init(&obj->vma.lock);
INIT_LIST_HEAD(&obj->vma.list); INIT_LIST_HEAD(&obj->vma.list);
...@@ -86,10 +86,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj, ...@@ -86,10 +86,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
mutex_init(&obj->mm.get_page.lock); mutex_init(&obj->mm.get_page.lock);
INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN); INIT_RADIX_TREE(&obj->mm.get_dma_page.radix, GFP_KERNEL | __GFP_NOWARN);
mutex_init(&obj->mm.get_dma_page.lock); mutex_init(&obj->mm.get_dma_page.lock);
if (IS_ENABLED(CONFIG_LOCKDEP) && i915_gem_object_is_shrinkable(obj))
i915_gem_shrinker_taints_mutex(to_i915(obj->base.dev),
&obj->mm.lock);
} }
/** /**
......
...@@ -346,27 +346,10 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, ...@@ -346,27 +346,10 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj); int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
enum i915_mm_subclass { /* lockdep subclass for obj->mm.lock/struct_mutex */
I915_MM_NORMAL = 0,
/*
* Only used by struct_mutex, when called "recursively" from
* direct-reclaim-esque. Safe because there is only every one
* struct_mutex in the entire system.
*/
I915_MM_SHRINKER = 1,
/*
* Used for obj->mm.lock when allocating pages. Safe because the object
* isn't yet on any LRU, and therefore the shrinker can't deadlock on
* it. As soon as the object has pages, obj->mm.lock nests within
* fs_reclaim.
*/
I915_MM_GET_PAGES = 1,
};
static inline int __must_check static inline int __must_check
i915_gem_object_pin_pages(struct drm_i915_gem_object *obj) i915_gem_object_pin_pages(struct drm_i915_gem_object *obj)
{ {
might_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES); might_lock(&obj->mm.lock);
if (atomic_inc_not_zero(&obj->mm.pages_pin_count)) if (atomic_inc_not_zero(&obj->mm.pages_pin_count))
return 0; return 0;
...@@ -410,6 +393,7 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj) ...@@ -410,6 +393,7 @@ i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
} }
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj); int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
int __i915_gem_object_put_pages_locked(struct drm_i915_gem_object *obj);
void i915_gem_object_truncate(struct drm_i915_gem_object *obj); void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
void i915_gem_object_writeback(struct drm_i915_gem_object *obj); void i915_gem_object_writeback(struct drm_i915_gem_object *obj);
......
...@@ -114,7 +114,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj) ...@@ -114,7 +114,7 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{ {
int err; int err;
err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES); err = mutex_lock_interruptible(&obj->mm.lock);
if (err) if (err)
return err; return err;
...@@ -196,21 +196,13 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) ...@@ -196,21 +196,13 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
return pages; return pages;
} }
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) int __i915_gem_object_put_pages_locked(struct drm_i915_gem_object *obj)
{ {
struct sg_table *pages; struct sg_table *pages;
int err;
if (i915_gem_object_has_pinned_pages(obj)) if (i915_gem_object_has_pinned_pages(obj))
return -EBUSY; return -EBUSY;
/* May be called by shrinker from within get_pages() (on another bo) */
mutex_lock(&obj->mm.lock);
if (unlikely(atomic_read(&obj->mm.pages_pin_count))) {
err = -EBUSY;
goto unlock;
}
i915_gem_object_release_mmap_offset(obj); i915_gem_object_release_mmap_offset(obj);
/* /*
...@@ -226,14 +218,22 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj) ...@@ -226,14 +218,22 @@ int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
* get_pages backends we should be better able to handle the * get_pages backends we should be better able to handle the
* cancellation of the async task in a more uniform manner. * cancellation of the async task in a more uniform manner.
*/ */
if (!pages) if (!IS_ERR_OR_NULL(pages))
pages = ERR_PTR(-EINVAL);
if (!IS_ERR(pages))
obj->ops->put_pages(obj, pages); obj->ops->put_pages(obj, pages);
err = 0; return 0;
unlock: }
int __i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
int err;
if (i915_gem_object_has_pinned_pages(obj))
return -EBUSY;
/* May be called by shrinker from within get_pages() (on another bo) */
mutex_lock(&obj->mm.lock);
err = __i915_gem_object_put_pages_locked(obj);
mutex_unlock(&obj->mm.lock); mutex_unlock(&obj->mm.lock);
return err; return err;
...@@ -341,7 +341,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, ...@@ -341,7 +341,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
!i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM)) !i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_IOMEM))
return ERR_PTR(-ENXIO); return ERR_PTR(-ENXIO);
err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES); err = mutex_lock_interruptible(&obj->mm.lock);
if (err) if (err)
return ERR_PTR(err); return ERR_PTR(err);
......
...@@ -236,7 +236,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) ...@@ -236,7 +236,7 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
if (err) if (err)
return err; return err;
err = mutex_lock_interruptible_nested(&obj->mm.lock, I915_MM_GET_PAGES); err = mutex_lock_interruptible(&obj->mm.lock);
if (err) if (err)
goto err_unlock; goto err_unlock;
......
...@@ -49,9 +49,9 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj, ...@@ -49,9 +49,9 @@ static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
flags = I915_GEM_OBJECT_UNBIND_TEST; flags = I915_GEM_OBJECT_UNBIND_TEST;
if (i915_gem_object_unbind(obj, flags) == 0) if (i915_gem_object_unbind(obj, flags) == 0)
__i915_gem_object_put_pages(obj); return true;
return !i915_gem_object_has_pages(obj); return false;
} }
static void try_to_writeback(struct drm_i915_gem_object *obj, static void try_to_writeback(struct drm_i915_gem_object *obj,
...@@ -200,10 +200,10 @@ i915_gem_shrink(struct drm_i915_private *i915, ...@@ -200,10 +200,10 @@ i915_gem_shrink(struct drm_i915_private *i915,
spin_unlock_irqrestore(&i915->mm.obj_lock, flags); spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
if (unsafe_drop_pages(obj, shrink)) { if (unsafe_drop_pages(obj, shrink) &&
mutex_trylock(&obj->mm.lock)) {
/* May arrive from get_pages on another bo */ /* May arrive from get_pages on another bo */
mutex_lock(&obj->mm.lock); if (!__i915_gem_object_put_pages_locked(obj)) {
if (!i915_gem_object_has_pages(obj)) {
try_to_writeback(obj, shrink); try_to_writeback(obj, shrink);
count += obj->base.size >> PAGE_SHIFT; count += obj->base.size >> PAGE_SHIFT;
} }
......
...@@ -253,7 +253,7 @@ static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool ...@@ -253,7 +253,7 @@ static int i915_gem_object_userptr_unbind(struct drm_i915_gem_object *obj, bool
if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj))) if (GEM_WARN_ON(i915_gem_object_has_pinned_pages(obj)))
return -EBUSY; return -EBUSY;
mutex_lock_nested(&obj->mm.lock, I915_MM_GET_PAGES); mutex_lock(&obj->mm.lock);
pages = __i915_gem_object_unset_pages(obj); pages = __i915_gem_object_unset_pages(obj);
if (!IS_ERR_OR_NULL(pages)) if (!IS_ERR_OR_NULL(pages))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment