Commit e0ff7a7c authored by Chris Wilson's avatar Chris Wilson

drm/i915: Early rejection of buffer allocations larger than RAM

We currently try to pin and allocate the whole buffer at a time. If that
object is larger than RAM, we will try to pin the whole of physical
memory, force the machine into oom, and then still fail the allocation.

If the request is obviously too large, error out early. We opt to do
this in the backend to make it easy to use alternate paths that do not
require the entire object pinned, or may easily handle proxy objects
that are larger than physical memory.
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180903083337.13134-4-chris@chris-wilson.co.uk
parent fddcd00a
...@@ -2533,13 +2533,21 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2533,13 +2533,21 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
gfp_t noreclaim; gfp_t noreclaim;
int ret; int ret;
/* Assert that the object is not currently in any GPU domain. As it /*
* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in * wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache * a GPU cache
*/ */
GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
/*
* If there's no chance of allocating enough pages for the whole
* object, bail early.
*/
if (page_count > totalram_pages)
return -ENOMEM;
st = kmalloc(sizeof(*st), GFP_KERNEL); st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL) if (st == NULL)
return -ENOMEM; return -ENOMEM;
...@@ -2550,7 +2558,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2550,7 +2558,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
return -ENOMEM; return -ENOMEM;
} }
/* Get the list of pages out of our struct file. They'll be pinned /*
* Get the list of pages out of our struct file. They'll be pinned
* at this point until we release them. * at this point until we release them.
* *
* Fail silently without starting the shrinker * Fail silently without starting the shrinker
...@@ -2582,7 +2591,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2582,7 +2591,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++); i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
cond_resched(); cond_resched();
/* We've tried hard to allocate the memory by reaping /*
* We've tried hard to allocate the memory by reaping
* our own buffer, now let the real VM do its job and * our own buffer, now let the real VM do its job and
* go down in flames if truly OOM. * go down in flames if truly OOM.
* *
...@@ -2594,7 +2604,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2594,7 +2604,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
/* reclaim and warn, but no oom */ /* reclaim and warn, but no oom */
gfp = mapping_gfp_mask(mapping); gfp = mapping_gfp_mask(mapping);
/* Our bo are always dirty and so we require /*
* Our bo are always dirty and so we require
* kswapd to reclaim our pages (direct reclaim * kswapd to reclaim our pages (direct reclaim
* does not effectively begin pageout of our * does not effectively begin pageout of our
* buffers on its own). However, direct reclaim * buffers on its own). However, direct reclaim
...@@ -2638,7 +2649,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2638,7 +2649,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
ret = i915_gem_gtt_prepare_pages(obj, st); ret = i915_gem_gtt_prepare_pages(obj, st);
if (ret) { if (ret) {
/* DMA remapping failed? One possible cause is that /*
* DMA remapping failed? One possible cause is that
* it could not reserve enough large entries, asking * it could not reserve enough large entries, asking
* for PAGE_SIZE chunks instead may be helpful. * for PAGE_SIZE chunks instead may be helpful.
*/ */
...@@ -2672,7 +2684,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2672,7 +2684,8 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
sg_free_table(st); sg_free_table(st);
kfree(st); kfree(st);
/* shmemfs first checks if there is enough memory to allocate the page /*
* shmemfs first checks if there is enough memory to allocate the page
* and reports ENOSPC should there be insufficient, along with the usual * and reports ENOSPC should there be insufficient, along with the usual
* ENOMEM for a genuine allocation failure. * ENOMEM for a genuine allocation failure.
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment