Commit 7d1a31e1 authored by Chris Wilson's avatar Chris Wilson

Revert "drm/i915/lmem: Limit block size to 4G"

Mixing I915_ALLOC_CONTIGUOUS and I915_ALLOC_MAX_SEGMENT_SIZE fared
badly. The two directives conflict, with the contiguous request setting
the min_order to the full size of the object, and the max-segment-size
setting the max_order to the limit of the DMA mapper. This results in a
situation where max_order < min_order, causing our sanity checks to
fail.

Instead of limiting the buddy block size, in the previous patch we split
the oversized buddy into multiple scatterlist elements.

Fixes: d2cf0125 ("drm/i915/lmem: Limit block size to 4G")
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Niranjana Vishwanathapura <niranjana.vishwanathapura@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Reviewed-by: default avatarMatthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201202173444.14903-2-chris@chris-wilson.co.uk
parent a2843b3b
...@@ -43,7 +43,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) ...@@ -43,7 +43,7 @@ i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj)
return -ENOMEM; return -ENOMEM;
} }
flags = I915_ALLOC_MIN_PAGE_SIZE | I915_ALLOC_MAX_SEGMENT_SIZE; flags = I915_ALLOC_MIN_PAGE_SIZE;
if (obj->flags & I915_BO_ALLOC_CONTIGUOUS) if (obj->flags & I915_BO_ALLOC_CONTIGUOUS)
flags |= I915_ALLOC_CONTIGUOUS; flags |= I915_ALLOC_CONTIGUOUS;
......
...@@ -72,7 +72,6 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem, ...@@ -72,7 +72,6 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
struct list_head *blocks) struct list_head *blocks)
{ {
unsigned int min_order = 0; unsigned int min_order = 0;
unsigned int max_order;
unsigned long n_pages; unsigned long n_pages;
GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size)); GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size));
...@@ -93,28 +92,13 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem, ...@@ -93,28 +92,13 @@ __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem,
n_pages = size >> ilog2(mem->mm.chunk_size); n_pages = size >> ilog2(mem->mm.chunk_size);
/*
* If we going to feed this into an sg list we should limit the block
* sizes such that we don't exceed the i915_sg_segment_size().
*/
if (flags & I915_ALLOC_MAX_SEGMENT_SIZE) {
unsigned int max_segment = i915_sg_segment_size();
if (GEM_WARN_ON(max_segment < mem->mm.chunk_size))
max_order = 0;
else
max_order = ilog2(max_segment) - ilog2(mem->mm.chunk_size);
} else {
max_order = mem->mm.max_order;
}
mutex_lock(&mem->mm_lock); mutex_lock(&mem->mm_lock);
do { do {
struct i915_buddy_block *block; struct i915_buddy_block *block;
unsigned int order; unsigned int order;
order = min_t(u32, fls(n_pages) - 1, max_order); order = fls(n_pages) - 1;
GEM_BUG_ON(order > mem->mm.max_order); GEM_BUG_ON(order > mem->mm.max_order);
GEM_BUG_ON(order < min_order); GEM_BUG_ON(order < min_order);
......
...@@ -46,7 +46,6 @@ enum intel_region_id { ...@@ -46,7 +46,6 @@ enum intel_region_id {
#define I915_ALLOC_MIN_PAGE_SIZE BIT(0) #define I915_ALLOC_MIN_PAGE_SIZE BIT(0)
#define I915_ALLOC_CONTIGUOUS BIT(1) #define I915_ALLOC_CONTIGUOUS BIT(1)
#define I915_ALLOC_MAX_SEGMENT_SIZE BIT(2)
#define for_each_memory_region(mr, i915, id) \ #define for_each_memory_region(mr, i915, id) \
for (id = 0; id < ARRAY_SIZE((i915)->mm.regions); id++) \ for (id = 0; id < ARRAY_SIZE((i915)->mm.regions); id++) \
......
...@@ -356,21 +356,21 @@ static int igt_mock_splintered_region(void *arg) ...@@ -356,21 +356,21 @@ static int igt_mock_splintered_region(void *arg)
static int igt_mock_max_segment(void *arg) static int igt_mock_max_segment(void *arg)
{ {
const unsigned int max_segment = i915_sg_segment_size();
struct intel_memory_region *mem = arg; struct intel_memory_region *mem = arg;
struct drm_i915_private *i915 = mem->i915; struct drm_i915_private *i915 = mem->i915;
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct i915_buddy_block *block; struct i915_buddy_block *block;
struct scatterlist *sg;
LIST_HEAD(objects); LIST_HEAD(objects);
u64 size; u64 size;
int err = 0; int err = 0;
/* /*
* The size of block are only limited by the largest power-of-two that * While we may create very large contiguous blocks, we may need
* will fit in the region size, but to construct an object we also * to break those down for consumption elsewhere. In particular,
* require feeding it into an sg list, where the upper limit of the sg * dma-mapping with scatterlist elements have an implicit limit of
* entry is at most UINT_MAX, therefore when allocating with * UINT_MAX on each element.
* I915_ALLOC_MAX_SEGMENT_SIZE we shouldn't see blocks larger than
* i915_sg_segment_size().
*/ */
size = SZ_8G; size = SZ_8G;
...@@ -384,12 +384,23 @@ static int igt_mock_max_segment(void *arg) ...@@ -384,12 +384,23 @@ static int igt_mock_max_segment(void *arg)
goto out_put; goto out_put;
} }
err = -EINVAL;
list_for_each_entry(block, &obj->mm.blocks, link) { list_for_each_entry(block, &obj->mm.blocks, link) {
if (i915_buddy_block_size(&mem->mm, block) > i915_sg_segment_size()) { if (i915_buddy_block_size(&mem->mm, block) > max_segment) {
pr_err("%s found block size(%llu) larger than max sg_segment_size(%u)", err = 0;
__func__, break;
i915_buddy_block_size(&mem->mm, block), }
i915_sg_segment_size()); }
if (err) {
pr_err("%s: Failed to create a huge contiguous block\n",
__func__);
goto out_close;
}
for (sg = obj->mm.pages->sgl; sg; sg = sg_next(sg)) {
if (sg->length > max_segment) {
pr_err("%s: Created an oversized scatterlist entry, %u > %u\n",
__func__, sg->length, max_segment);
err = -EINVAL; err = -EINVAL;
goto out_close; goto out_close;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment