Commit 84e8978e authored by Matthew Auld's avatar Matthew Auld Committed by Chris Wilson

drm/i915: s/sg_mask/sg_page_sizes/

It's a little unclear what the sg_mask actually is, so prefer the more
meaningful name of sg_page_sizes.
Suggested-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: default avatarMatthew Auld <matthew.auld@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20171009110024.29114-1-matthew.auld@intel.comReviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Signed-off-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
parent 43ae70d9
...@@ -3537,7 +3537,7 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, ...@@ -3537,7 +3537,7 @@ i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages, struct sg_table *pages,
unsigned int sg_mask); unsigned int sg_page_sizes);
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj); int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj);
static inline int __must_check static inline int __must_check
......
...@@ -2333,7 +2333,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2333,7 +2333,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct page *page; struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */ unsigned long last_pfn = 0; /* suppress gcc warning */
unsigned int max_segment = i915_sg_segment_size(); unsigned int max_segment = i915_sg_segment_size();
unsigned int sg_mask; unsigned int sg_page_sizes;
gfp_t noreclaim; gfp_t noreclaim;
int ret; int ret;
...@@ -2365,7 +2365,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2365,7 +2365,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
sg = st->sgl; sg = st->sgl;
st->nents = 0; st->nents = 0;
sg_mask = 0; sg_page_sizes = 0;
for (i = 0; i < page_count; i++) { for (i = 0; i < page_count; i++) {
const unsigned int shrink[] = { const unsigned int shrink[] = {
I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE, I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
...@@ -2419,7 +2419,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2419,7 +2419,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
sg->length >= max_segment || sg->length >= max_segment ||
page_to_pfn(page) != last_pfn + 1) { page_to_pfn(page) != last_pfn + 1) {
if (i) { if (i) {
sg_mask |= sg->length; sg_page_sizes |= sg->length;
sg = sg_next(sg); sg = sg_next(sg);
} }
st->nents++; st->nents++;
...@@ -2433,7 +2433,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2433,7 +2433,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL)); WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
} }
if (sg) { /* loop terminated early; short sg table */ if (sg) { /* loop terminated early; short sg table */
sg_mask |= sg->length; sg_page_sizes |= sg->length;
sg_mark_end(sg); sg_mark_end(sg);
} }
...@@ -2464,7 +2464,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2464,7 +2464,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
if (i915_gem_object_needs_bit17_swizzle(obj)) if (i915_gem_object_needs_bit17_swizzle(obj))
i915_gem_object_do_bit_17_swizzle(obj, st); i915_gem_object_do_bit_17_swizzle(obj, st);
__i915_gem_object_set_pages(obj, st, sg_mask); __i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0; return 0;
...@@ -2492,7 +2492,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) ...@@ -2492,7 +2492,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
struct sg_table *pages, struct sg_table *pages,
unsigned int sg_mask) unsigned int sg_page_sizes)
{ {
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
unsigned long supported = INTEL_INFO(i915)->page_sizes; unsigned long supported = INTEL_INFO(i915)->page_sizes;
...@@ -2512,16 +2512,16 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, ...@@ -2512,16 +2512,16 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
obj->mm.quirked = true; obj->mm.quirked = true;
} }
GEM_BUG_ON(!sg_mask); GEM_BUG_ON(!sg_page_sizes);
obj->mm.page_sizes.phys = sg_mask; obj->mm.page_sizes.phys = sg_page_sizes;
/* /*
* Calculate the supported page-sizes which fit into the given sg_mask. * Calculate the supported page-sizes which fit into the given
* This will give us the page-sizes which we may be able to use * sg_page_sizes. This will give us the page-sizes which we may be able
* opportunistically when later inserting into the GTT. For example if * to use opportunistically when later inserting into the GTT. For
* phys=2G, then in theory we should be able to use 1G, 2M, 64K or 4K * example if phys=2G, then in theory we should be able to use 1G, 2M,
* pages, although in practice this will depend on a number of other * 64K or 4K pages, although in practice this will depend on a number of
* factors. * other factors.
*/ */
obj->mm.page_sizes.sg = 0; obj->mm.page_sizes.sg = 0;
for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
......
...@@ -259,16 +259,16 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev, ...@@ -259,16 +259,16 @@ struct dma_buf *i915_gem_prime_export(struct drm_device *dev,
static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj) static int i915_gem_object_get_pages_dmabuf(struct drm_i915_gem_object *obj)
{ {
struct sg_table *pages; struct sg_table *pages;
unsigned int sg_mask; unsigned int sg_page_sizes;
pages = dma_buf_map_attachment(obj->base.import_attach, pages = dma_buf_map_attachment(obj->base.import_attach,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
if (IS_ERR(pages)) if (IS_ERR(pages))
return PTR_ERR(pages); return PTR_ERR(pages);
sg_mask = i915_sg_page_sizes(pages->sgl); sg_page_sizes = i915_sg_page_sizes(pages->sgl);
__i915_gem_object_set_pages(obj, pages, sg_mask); __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
return 0; return 0;
} }
......
...@@ -49,7 +49,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) ...@@ -49,7 +49,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
struct drm_i915_private *i915 = to_i915(obj->base.dev); struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *st; struct sg_table *st;
struct scatterlist *sg; struct scatterlist *sg;
unsigned int sg_mask; unsigned int sg_page_sizes;
unsigned int npages; unsigned int npages;
int max_order; int max_order;
gfp_t gfp; gfp_t gfp;
...@@ -88,7 +88,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) ...@@ -88,7 +88,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
sg = st->sgl; sg = st->sgl;
st->nents = 0; st->nents = 0;
sg_mask = 0; sg_page_sizes = 0;
do { do {
int order = min(fls(npages) - 1, max_order); int order = min(fls(npages) - 1, max_order);
...@@ -106,7 +106,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) ...@@ -106,7 +106,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
} while (1); } while (1);
sg_set_page(sg, page, PAGE_SIZE << order, 0); sg_set_page(sg, page, PAGE_SIZE << order, 0);
sg_mask |= PAGE_SIZE << order; sg_page_sizes |= PAGE_SIZE << order;
st->nents++; st->nents++;
npages -= 1 << order; npages -= 1 << order;
...@@ -135,7 +135,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) ...@@ -135,7 +135,7 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
*/ */
obj->mm.madv = I915_MADV_DONTNEED; obj->mm.madv = I915_MADV_DONTNEED;
__i915_gem_object_set_pages(obj, st, sg_mask); __i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0; return 0;
......
...@@ -405,7 +405,7 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, ...@@ -405,7 +405,7 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
{ {
unsigned int max_segment = i915_sg_segment_size(); unsigned int max_segment = i915_sg_segment_size();
struct sg_table *st; struct sg_table *st;
unsigned int sg_mask; unsigned int sg_page_sizes;
int ret; int ret;
st = kmalloc(sizeof(*st), GFP_KERNEL); st = kmalloc(sizeof(*st), GFP_KERNEL);
...@@ -435,9 +435,9 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, ...@@ -435,9 +435,9 @@ __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj,
return ERR_PTR(ret); return ERR_PTR(ret);
} }
sg_mask = i915_sg_page_sizes(st->sgl); sg_page_sizes = i915_sg_page_sizes(st->sgl);
__i915_gem_object_set_pages(obj, st, sg_mask); __i915_gem_object_set_pages(obj, st, sg_page_sizes);
return st; return st;
} }
......
...@@ -68,7 +68,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj) ...@@ -68,7 +68,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
unsigned int page_mask = obj->mm.page_mask; unsigned int page_mask = obj->mm.page_mask;
struct sg_table *st; struct sg_table *st;
struct scatterlist *sg; struct scatterlist *sg;
unsigned int sg_mask; unsigned int sg_page_sizes;
u64 rem; u64 rem;
st = kmalloc(sizeof(*st), GFP); st = kmalloc(sizeof(*st), GFP);
...@@ -83,7 +83,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj) ...@@ -83,7 +83,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
rem = obj->base.size; rem = obj->base.size;
sg = st->sgl; sg = st->sgl;
st->nents = 0; st->nents = 0;
sg_mask = 0; sg_page_sizes = 0;
/* /*
* Our goal here is simple, we want to greedily fill the object from * Our goal here is simple, we want to greedily fill the object from
...@@ -104,7 +104,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj) ...@@ -104,7 +104,7 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
goto err; goto err;
sg_set_page(sg, page, page_size, 0); sg_set_page(sg, page, page_size, 0);
sg_mask |= page_size; sg_page_sizes |= page_size;
st->nents++; st->nents++;
rem -= page_size; rem -= page_size;
...@@ -124,8 +124,8 @@ static int get_huge_pages(struct drm_i915_gem_object *obj) ...@@ -124,8 +124,8 @@ static int get_huge_pages(struct drm_i915_gem_object *obj)
obj->mm.madv = I915_MADV_DONTNEED; obj->mm.madv = I915_MADV_DONTNEED;
GEM_BUG_ON(sg_mask != obj->mm.page_mask); GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask);
__i915_gem_object_set_pages(obj, st, sg_mask); __i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0; return 0;
...@@ -192,7 +192,7 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) ...@@ -192,7 +192,7 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
const u64 max_len = rounddown_pow_of_two(UINT_MAX); const u64 max_len = rounddown_pow_of_two(UINT_MAX);
struct sg_table *st; struct sg_table *st;
struct scatterlist *sg; struct scatterlist *sg;
unsigned int sg_mask; unsigned int sg_page_sizes;
u64 rem; u64 rem;
st = kmalloc(sizeof(*st), GFP); st = kmalloc(sizeof(*st), GFP);
...@@ -208,7 +208,7 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) ...@@ -208,7 +208,7 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
rem = obj->base.size; rem = obj->base.size;
sg = st->sgl; sg = st->sgl;
st->nents = 0; st->nents = 0;
sg_mask = 0; sg_page_sizes = 0;
do { do {
unsigned int page_size = get_largest_page_size(i915, rem); unsigned int page_size = get_largest_page_size(i915, rem);
unsigned int len = min(page_size * div_u64(rem, page_size), unsigned int len = min(page_size * div_u64(rem, page_size),
...@@ -221,7 +221,7 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) ...@@ -221,7 +221,7 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
sg_dma_len(sg) = len; sg_dma_len(sg) = len;
sg_dma_address(sg) = page_size; sg_dma_address(sg) = page_size;
sg_mask |= len; sg_page_sizes |= len;
st->nents++; st->nents++;
...@@ -236,7 +236,7 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) ...@@ -236,7 +236,7 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj)
obj->mm.madv = I915_MADV_DONTNEED; obj->mm.madv = I915_MADV_DONTNEED;
__i915_gem_object_set_pages(obj, st, sg_mask); __i915_gem_object_set_pages(obj, st, sg_page_sizes);
return 0; return 0;
} }
......
...@@ -45,7 +45,7 @@ static int fake_get_pages(struct drm_i915_gem_object *obj) ...@@ -45,7 +45,7 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
#define PFN_BIAS 0x1000 #define PFN_BIAS 0x1000
struct sg_table *pages; struct sg_table *pages;
struct scatterlist *sg; struct scatterlist *sg;
unsigned int sg_mask; unsigned int sg_page_sizes;
typeof(obj->base.size) rem; typeof(obj->base.size) rem;
pages = kmalloc(sizeof(*pages), GFP); pages = kmalloc(sizeof(*pages), GFP);
...@@ -58,7 +58,7 @@ static int fake_get_pages(struct drm_i915_gem_object *obj) ...@@ -58,7 +58,7 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
return -ENOMEM; return -ENOMEM;
} }
sg_mask = 0; sg_page_sizes = 0;
rem = obj->base.size; rem = obj->base.size;
for (sg = pages->sgl; sg; sg = sg_next(sg)) { for (sg = pages->sgl; sg; sg = sg_next(sg)) {
unsigned long len = min_t(typeof(rem), rem, BIT(31)); unsigned long len = min_t(typeof(rem), rem, BIT(31));
...@@ -67,7 +67,7 @@ static int fake_get_pages(struct drm_i915_gem_object *obj) ...@@ -67,7 +67,7 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0); sg_set_page(sg, pfn_to_page(PFN_BIAS), len, 0);
sg_dma_address(sg) = page_to_phys(sg_page(sg)); sg_dma_address(sg) = page_to_phys(sg_page(sg));
sg_dma_len(sg) = len; sg_dma_len(sg) = len;
sg_mask |= len; sg_page_sizes |= len;
rem -= len; rem -= len;
} }
...@@ -75,7 +75,7 @@ static int fake_get_pages(struct drm_i915_gem_object *obj) ...@@ -75,7 +75,7 @@ static int fake_get_pages(struct drm_i915_gem_object *obj)
obj->mm.madv = I915_MADV_DONTNEED; obj->mm.madv = I915_MADV_DONTNEED;
__i915_gem_object_set_pages(obj, pages, sg_mask); __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
return 0; return 0;
#undef GFP #undef GFP
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment