Commit 2d2cfc12 authored by Chris Wilson's avatar Chris Wilson Committed by Jani Nikula

drm/i915: Recreate internal objects with single page segments if dmar fails

If we fail to dma-map the object, the most common cause is lack of space
inside the SW-IOTLB due to fragmentation. If we recreate the_sg_table
using segments of PAGE_SIZE (and single page allocations), we may succeed
in remapping the scatterlist.

First became a significant problem for the mock selftests after commit
5584f1b1 ("drm/i915: fix i915 running as dom0 under Xen") increased
the max_order.

Fixes: 920cf419 ("drm/i915: Introduce an internal allocator for disposable private objects")
Fixes: 5584f1b1 ("drm/i915: fix i915 running as dom0 under Xen")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170202132721.12711-1-chris@chris-wilson.co.ukReviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Cc: <stable@vger.kernel.org> # v4.10
(cherry picked from commit bb96dcf5)
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parent 33b7bfdf
...@@ -46,24 +46,12 @@ static struct sg_table * ...@@ -46,24 +46,12 @@ static struct sg_table *
i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned int npages = obj->base.size / PAGE_SIZE;
struct sg_table *st; struct sg_table *st;
struct scatterlist *sg; struct scatterlist *sg;
unsigned int npages;
int max_order; int max_order;
gfp_t gfp; gfp_t gfp;
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return ERR_PTR(-ENOMEM);
if (sg_alloc_table(st, npages, GFP_KERNEL)) {
kfree(st);
return ERR_PTR(-ENOMEM);
}
sg = st->sgl;
st->nents = 0;
max_order = MAX_ORDER; max_order = MAX_ORDER;
#ifdef CONFIG_SWIOTLB #ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */ if (swiotlb_nr_tbl()) /* minimum max swiotlb size is IO_TLB_SEGSIZE */
...@@ -77,6 +65,20 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) ...@@ -77,6 +65,20 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
gfp |= __GFP_DMA32; gfp |= __GFP_DMA32;
} }
create_st:
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (!st)
return ERR_PTR(-ENOMEM);
npages = obj->base.size / PAGE_SIZE;
if (sg_alloc_table(st, npages, GFP_KERNEL)) {
kfree(st);
return ERR_PTR(-ENOMEM);
}
sg = st->sgl;
st->nents = 0;
do { do {
int order = min(fls(npages) - 1, max_order); int order = min(fls(npages) - 1, max_order);
struct page *page; struct page *page;
...@@ -104,8 +106,15 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) ...@@ -104,8 +106,15 @@ i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
sg = __sg_next(sg); sg = __sg_next(sg);
} while (1); } while (1);
if (i915_gem_gtt_prepare_pages(obj, st)) if (i915_gem_gtt_prepare_pages(obj, st)) {
/* Failed to dma-map try again with single page sg segments */
if (get_order(st->sgl->length)) {
internal_free_pages(st);
max_order = 0;
goto create_st;
}
goto err; goto err;
}
/* Mark the pages as dontneed whilst they are still pinned. As soon /* Mark the pages as dontneed whilst they are still pinned. As soon
* as they are unpinned they are allowed to be reaped by the shrinker, * as they are unpinned they are allowed to be reaped by the shrinker,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment