Commit da494d7c authored by Daniel Vetter's avatar Daniel Vetter

drm/i915: disable shrinker lock stealing for create_mmap_offset

The mmap offset structure is not part of the drm/i915 code, but
provided by gem helpers. To avoid leaky abstractions (by either
depending upon implementation details of said helper wrt to
preallocations, or reimplementing it in our code and so fuzzing
around in internal details of that helpr) simply disable
the shrinker lock stealing accross calls into the helper functions.

This should fix igt/gem_tiled_swapping.

v2: Fix cleanup path confusion bemoaned by Chris Wilson.
Reported-by: default avatarMika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@ffwll.ch>
parent 677feac2
......@@ -1517,9 +1517,11 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
if (obj->base.map_list.map)
return 0;
dev_priv->mm.shrinker_no_lock_stealing = true;
ret = drm_gem_create_mmap_offset(&obj->base);
if (ret != -ENOSPC)
return ret;
goto out;
/* Badly fragmented mmap space? The only way we can recover
* space is by destroying unwanted objects. We can't randomly release
......@@ -1531,10 +1533,14 @@ static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
i915_gem_purge(dev_priv, obj->base.size >> PAGE_SHIFT);
ret = drm_gem_create_mmap_offset(&obj->base);
if (ret != -ENOSPC)
return ret;
goto out;
i915_gem_shrink_all(dev_priv);
return drm_gem_create_mmap_offset(&obj->base);
ret = drm_gem_create_mmap_offset(&obj->base);
out:
dev_priv->mm.shrinker_no_lock_stealing = false;
return ret;
}
static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment