Commit 99013b10 authored by Chris Wilson's avatar Chris Wilson

drm/i915: Make shrink/unshrink be atomic

Add an atomic counter and always take the spinlock around the pin/unpin
events, so that we can perform the list manipulation concurrently.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190910212204.17190-1-chris@chris-wilson.co.uk
parent 85dd14c2
...@@ -494,7 +494,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) ...@@ -494,7 +494,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
spin_lock_irqsave(&i915->mm.obj_lock, flags); spin_lock_irqsave(&i915->mm.obj_lock, flags);
if (obj->mm.madv == I915_MADV_WILLNEED) if (obj->mm.madv == I915_MADV_WILLNEED &&
!atomic_read(&obj->mm.shrink_pin))
list_move_tail(&obj->mm.link, &i915->mm.shrink_list); list_move_tail(&obj->mm.link, &i915->mm.shrink_list);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags); spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
......
...@@ -156,6 +156,7 @@ struct drm_i915_gem_object { ...@@ -156,6 +156,7 @@ struct drm_i915_gem_object {
struct { struct {
struct mutex lock; /* protects the pages and their use */ struct mutex lock; /* protects the pages and their use */
atomic_t pages_pin_count; atomic_t pages_pin_count;
atomic_t shrink_pin;
struct sg_table *pages; struct sg_table *pages;
void *mapping; void *mapping;
......
...@@ -71,6 +71,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, ...@@ -71,6 +71,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
list = &i915->mm.shrink_list; list = &i915->mm.shrink_list;
list_add_tail(&obj->mm.link, list); list_add_tail(&obj->mm.link, list);
atomic_set(&obj->mm.shrink_pin, 0);
spin_unlock_irqrestore(&i915->mm.obj_lock, flags); spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
} }
} }
......
...@@ -516,46 +516,52 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, ...@@ -516,46 +516,52 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj) void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *i915 = obj_to_i915(obj);
unsigned long flags;
/* /*
* We can only be called while the pages are pinned or when * We can only be called while the pages are pinned or when
* the pages are released. If pinned, we should only be called * the pages are released. If pinned, we should only be called
* from a single caller under controlled conditions; and on release * from a single caller under controlled conditions; and on release
* only one caller may release us. Neither the two may cross. * only one caller may release us. Neither the two may cross.
*/ */
if (!list_empty(&obj->mm.link)) { /* pinned by caller */ if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
struct drm_i915_private *i915 = obj_to_i915(obj); return;
unsigned long flags;
spin_lock_irqsave(&i915->mm.obj_lock, flags);
GEM_BUG_ON(list_empty(&obj->mm.link));
spin_lock_irqsave(&i915->mm.obj_lock, flags);
if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
!list_empty(&obj->mm.link)) {
list_del_init(&obj->mm.link); list_del_init(&obj->mm.link);
i915->mm.shrink_count--; i915->mm.shrink_count--;
i915->mm.shrink_memory -= obj->base.size; i915->mm.shrink_memory -= obj->base.size;
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
} }
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
} }
static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj, static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
struct list_head *head) struct list_head *head)
{ {
struct drm_i915_private *i915 = obj_to_i915(obj);
unsigned long flags;
GEM_BUG_ON(!i915_gem_object_has_pages(obj)); GEM_BUG_ON(!i915_gem_object_has_pages(obj));
GEM_BUG_ON(!list_empty(&obj->mm.link)); if (!i915_gem_object_is_shrinkable(obj))
return;
if (i915_gem_object_is_shrinkable(obj)) { if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
struct drm_i915_private *i915 = obj_to_i915(obj); return;
unsigned long flags;
spin_lock_irqsave(&i915->mm.obj_lock, flags); spin_lock_irqsave(&i915->mm.obj_lock, flags);
GEM_BUG_ON(!kref_read(&obj->base.refcount)); GEM_BUG_ON(!kref_read(&obj->base.refcount));
if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
GEM_BUG_ON(!list_empty(&obj->mm.link));
list_add_tail(&obj->mm.link, head); list_add_tail(&obj->mm.link, head);
i915->mm.shrink_count++; i915->mm.shrink_count++;
i915->mm.shrink_memory += obj->base.size; i915->mm.shrink_memory += obj->base.size;
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
} }
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
} }
void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj) void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
......
...@@ -134,8 +134,8 @@ static int __context_pin_state(struct i915_vma *vma) ...@@ -134,8 +134,8 @@ static int __context_pin_state(struct i915_vma *vma)
static void __context_unpin_state(struct i915_vma *vma) static void __context_unpin_state(struct i915_vma *vma)
{ {
__i915_vma_unpin(vma);
i915_vma_make_shrinkable(vma); i915_vma_make_shrinkable(vma);
__i915_vma_unpin(vma);
} }
static void __intel_context_retire(struct i915_active *active) static void __intel_context_retire(struct i915_active *active)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment