Commit 8c949515 authored by Matthew Auld's avatar Matthew Auld

drm/i915: use i915_sg_dma_sizes() for all backends

We rely on page_sizes.sg in setup_scratch_page() reporting the correct
value if the underlying sgl is not contiguous, however in
get_pages_internal() we are only looking at the layout of the created
pages when calculating the sg_page_sizes, and not the final sgl, which
could in theory be completely different. In such a situation we might
incorrectly think we have a 64K scratch page, when it is actually only
4K or similar split over multiple non-contiguous entries, which could
lead to broken behaviour when touching the scratch space within the
padding of a 64K GTT page-table. For most of the other backends we
already just call i915_sg_dma_sizes() on the final mapping, so rather
just move that into __i915_gem_object_set_pages() to avoid such issues
coming back to bite us later.

v2: Update missing conversion in gvt
Suggested-by: default avatarTvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Stuart Summers <stuart.summers@intel.com>
Cc: Andrzej Hajda <andrzej.hajda@intel.com>
Reviewed-by: default avatarTvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221108103238.165447-1-matthew.auld@intel.com
parent ccb0e027
......@@ -238,7 +238,6 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *sgt;
unsigned int sg_page_sizes;
assert_object_held(obj);
......@@ -262,8 +261,7 @@ static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
(!HAS_LLC(i915) && !IS_DG1(i915)))
wbinvd_on_all_cpus();
sg_page_sizes = i915_sg_dma_sizes(sgt->sgl);
__i915_gem_object_set_pages(obj, sgt, sg_page_sizes);
__i915_gem_object_set_pages(obj, sgt);
return 0;
}
......
......@@ -35,7 +35,6 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *st;
struct scatterlist *sg;
unsigned int sg_page_sizes;
unsigned int npages;
int max_order = MAX_ORDER;
unsigned int max_segment;
......@@ -64,7 +63,6 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
sg = st->sgl;
st->nents = 0;
sg_page_sizes = 0;
do {
int order = min(fls(npages) - 1, max_order);
......@@ -83,7 +81,6 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
} while (1);
sg_set_page(sg, page, PAGE_SIZE << order, 0);
sg_page_sizes |= PAGE_SIZE << order;
st->nents++;
npages -= 1 << order;
......@@ -105,7 +102,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
goto err;
}
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
__i915_gem_object_set_pages(obj, st);
return 0;
......
......@@ -403,8 +403,7 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
unsigned long n);
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes);
struct sg_table *pages);
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
......
......@@ -16,8 +16,7 @@
#include "i915_gem_mman.h"
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages,
unsigned int sg_page_sizes)
struct sg_table *pages)
{
struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned long supported = RUNTIME_INFO(i915)->page_sizes;
......@@ -45,8 +44,8 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
obj->mm.pages = pages;
GEM_BUG_ON(!sg_page_sizes);
obj->mm.page_sizes.phys = sg_page_sizes;
obj->mm.page_sizes.phys = i915_sg_dma_sizes(pages->sgl);
GEM_BUG_ON(!obj->mm.page_sizes.phys);
/*
* Calculate the supported page-sizes which fit into the given
......
......@@ -79,7 +79,7 @@ static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
/* We're no longer struct page backed */
obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
__i915_gem_object_set_pages(obj, st, sg->length);
__i915_gem_object_set_pages(obj, st);
return 0;
......@@ -209,11 +209,8 @@ static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj)
return 0;
err_xfer:
if (!IS_ERR_OR_NULL(pages)) {
unsigned int sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
}
if (!IS_ERR_OR_NULL(pages))
__i915_gem_object_set_pages(obj, pages);
return err;
}
......
......@@ -247,7 +247,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj)
if (i915_gem_object_can_bypass_llc(obj))
obj->cache_dirty = true;
__i915_gem_object_set_pages(obj, st, i915_sg_dma_sizes(st->sgl));
__i915_gem_object_set_pages(obj, st);
return 0;
......
......@@ -628,7 +628,7 @@ static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
sg_dma_len(pages->sgl),
POISON_INUSE);
__i915_gem_object_set_pages(obj, pages, obj->stolen->size);
__i915_gem_object_set_pages(obj, pages);
return 0;
}
......
......@@ -815,8 +815,7 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
GEM_BUG_ON(obj->mm.rsgt);
obj->mm.rsgt = rsgt;
__i915_gem_object_set_pages(obj, &rsgt->table,
i915_sg_dma_sizes(rsgt->table.sgl));
__i915_gem_object_set_pages(obj, &rsgt->table);
}
GEM_BUG_ON(bo->ttm && ((obj->base.size >> PAGE_SHIFT) < bo->ttm->num_pages));
......
......@@ -131,7 +131,6 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
const unsigned long num_pages = obj->base.size >> PAGE_SHIFT;
unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev);
struct sg_table *st;
unsigned int sg_page_sizes;
struct page **pvec;
int ret;
......@@ -170,8 +169,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
if (i915_gem_object_can_bypass_llc(obj))
obj->cache_dirty = true;
sg_page_sizes = i915_sg_dma_sizes(st->sgl);
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
__i915_gem_object_set_pages(obj, st);
return 0;
......
......@@ -68,7 +68,7 @@ static int huge_get_pages(struct drm_i915_gem_object *obj)
if (i915_gem_gtt_prepare_pages(obj, pages))
goto err;
__i915_gem_object_set_pages(obj, pages, PAGE_SIZE);
__i915_gem_object_set_pages(obj, pages);
return 0;
......
......@@ -136,7 +136,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
goto err;
GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
__i915_gem_object_set_pages(obj, st);
return 0;
......@@ -210,7 +210,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
const u64 max_len = rounddown_pow_of_two(UINT_MAX);
struct sg_table *st;
struct scatterlist *sg;
unsigned int sg_page_sizes;
u64 rem;
st = kmalloc(sizeof(*st), GFP);
......@@ -226,7 +225,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
rem = obj->base.size;
sg = st->sgl;
st->nents = 0;
sg_page_sizes = 0;
do {
unsigned int page_size = get_largest_page_size(i915, rem);
unsigned int len = min(page_size * div_u64(rem, page_size),
......@@ -239,8 +237,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
sg_dma_len(sg) = len;
sg_dma_address(sg) = page_size;
sg_page_sizes |= len;
st->nents++;
rem -= len;
......@@ -254,7 +250,7 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
i915_sg_trim(st);
__i915_gem_object_set_pages(obj, st, sg_page_sizes);
__i915_gem_object_set_pages(obj, st);
return 0;
}
......@@ -286,7 +282,7 @@ static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj)
sg_dma_len(sg) = obj->base.size;
sg_dma_address(sg) = page_size;
__i915_gem_object_set_pages(obj, st, sg->length);
__i915_gem_object_set_pages(obj, st);
return 0;
#undef GFP
......
......@@ -88,7 +88,7 @@ static int vgpu_gem_get_pages(
sg_dma_address(sg) = dma_addr;
}
__i915_gem_object_set_pages(obj, st, PAGE_SIZE);
__i915_gem_object_set_pages(obj, st);
out:
if (ret) {
dma_addr_t dma_addr;
......
......@@ -61,7 +61,6 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
#define PFN_BIAS 0x1000
struct sg_table *pages;
struct scatterlist *sg;
unsigned int sg_page_sizes;
typeof(obj->base.size) rem;
pages = kmalloc(sizeof(*pages), GFP);
......@@ -74,7 +73,6 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
return -ENOMEM;
}
sg_page_sizes = 0;
rem = obj->base.size;
for (sg = pages->sgl; sg; sg = sg_next(sg)) {
unsigned long len = min_t(typeof(rem), rem, BIT(31));
......@@ -83,13 +81,12 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
sg_dma_address(sg) = page_to_phys(sg_page(sg));
sg_dma_len(sg) = len;
sg_page_sizes |= len;
rem -= len;
}
GEM_BUG_ON(rem);
__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
__i915_gem_object_set_pages(obj, pages);
return 0;
#undef GFP
......
......@@ -41,7 +41,7 @@ static int mock_region_get_pages(struct drm_i915_gem_object *obj)
}
pages = &obj->mm.rsgt->table;
__i915_gem_object_set_pages(obj, pages, i915_sg_dma_sizes(pages->sgl));
__i915_gem_object_set_pages(obj, pages);
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment