Commit 6ed4e2e6 authored by Christian König's avatar Christian König Committed by Alex Deucher

drm/ttm: add transparent huge page support for wc or uc allocations v2

Add a new huge page pool and try to allocate from it when it makes sense.

v2: avoid compound pages for now
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Acked-by: default avatarAlex Deucher <alexander.deucher@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 8593e9b8
...@@ -95,7 +95,7 @@ struct ttm_pool_opts { ...@@ -95,7 +95,7 @@ struct ttm_pool_opts {
unsigned small; unsigned small;
}; };
#define NUM_POOLS 4 #define NUM_POOLS 6
/** /**
* struct ttm_pool_manager - Holds memory pools for fst allocation * struct ttm_pool_manager - Holds memory pools for fst allocation
...@@ -122,6 +122,8 @@ struct ttm_pool_manager { ...@@ -122,6 +122,8 @@ struct ttm_pool_manager {
struct ttm_page_pool uc_pool; struct ttm_page_pool uc_pool;
struct ttm_page_pool wc_pool_dma32; struct ttm_page_pool wc_pool_dma32;
struct ttm_page_pool uc_pool_dma32; struct ttm_page_pool uc_pool_dma32;
struct ttm_page_pool wc_pool_huge;
struct ttm_page_pool uc_pool_huge;
} ; } ;
}; };
}; };
...@@ -256,8 +258,8 @@ static int set_pages_array_uc(struct page **pages, int addrinarray) ...@@ -256,8 +258,8 @@ static int set_pages_array_uc(struct page **pages, int addrinarray)
/** /**
* Select the right pool or requested caching state and ttm flags. */ * Select the right pool or requested caching state and ttm flags. */
static struct ttm_page_pool *ttm_get_pool(int flags, static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
enum ttm_caching_state cstate) enum ttm_caching_state cstate)
{ {
int pool_index; int pool_index;
...@@ -269,9 +271,15 @@ static struct ttm_page_pool *ttm_get_pool(int flags, ...@@ -269,9 +271,15 @@ static struct ttm_page_pool *ttm_get_pool(int flags,
else else
pool_index = 0x1; pool_index = 0x1;
if (flags & TTM_PAGE_FLAG_DMA32) if (flags & TTM_PAGE_FLAG_DMA32) {
if (huge)
return NULL;
pool_index |= 0x2; pool_index |= 0x2;
} else if (huge) {
pool_index |= 0x4;
}
return &_manager->pools[pool_index]; return &_manager->pools[pool_index];
} }
...@@ -494,12 +502,14 @@ static void ttm_handle_caching_state_failure(struct list_head *pages, ...@@ -494,12 +502,14 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
* pages returned in pages array. * pages returned in pages array.
*/ */
static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
int ttm_flags, enum ttm_caching_state cstate, unsigned count) int ttm_flags, enum ttm_caching_state cstate,
unsigned count, unsigned order)
{ {
struct page **caching_array; struct page **caching_array;
struct page *p; struct page *p;
int r = 0; int r = 0;
unsigned i, cpages; unsigned i, j, cpages;
unsigned npages = 1 << order;
unsigned max_cpages = min(count, unsigned max_cpages = min(count,
(unsigned)(PAGE_SIZE/sizeof(struct page *))); (unsigned)(PAGE_SIZE/sizeof(struct page *)));
...@@ -512,7 +522,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, ...@@ -512,7 +522,7 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
} }
for (i = 0, cpages = 0; i < count; ++i) { for (i = 0, cpages = 0; i < count; ++i) {
p = alloc_page(gfp_flags); p = alloc_pages(gfp_flags, order);
if (!p) { if (!p) {
pr_err("Unable to get page %u\n", i); pr_err("Unable to get page %u\n", i);
...@@ -531,14 +541,18 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, ...@@ -531,14 +541,18 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
goto out; goto out;
} }
list_add(&p->lru, pages);
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
/* gfp flags of highmem page should never be dma32 so we /* gfp flags of highmem page should never be dma32 so we
* we should be fine in such case * we should be fine in such case
*/ */
if (!PageHighMem(p)) if (PageHighMem(p))
continue;
#endif #endif
{ for (j = 0; j < npages; ++j) {
caching_array[cpages++] = p; caching_array[cpages++] = p++;
if (cpages == max_cpages) { if (cpages == max_cpages) {
r = ttm_set_pages_caching(caching_array, r = ttm_set_pages_caching(caching_array,
...@@ -552,8 +566,6 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, ...@@ -552,8 +566,6 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
cpages = 0; cpages = 0;
} }
} }
list_add(&p->lru, pages);
} }
if (cpages) { if (cpages) {
...@@ -573,9 +585,9 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, ...@@ -573,9 +585,9 @@ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
* Fill the given pool if there aren't enough pages and the requested number of * Fill the given pool if there aren't enough pages and the requested number of
* pages is small. * pages is small.
*/ */
static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags,
int ttm_flags, enum ttm_caching_state cstate, unsigned count, enum ttm_caching_state cstate,
unsigned long *irq_flags) unsigned count, unsigned long *irq_flags)
{ {
struct page *p; struct page *p;
int r; int r;
...@@ -605,7 +617,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, ...@@ -605,7 +617,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
INIT_LIST_HEAD(&new_pages); INIT_LIST_HEAD(&new_pages);
r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
cstate, alloc_size); cstate, alloc_size, 0);
spin_lock_irqsave(&pool->lock, *irq_flags); spin_lock_irqsave(&pool->lock, *irq_flags);
if (!r) { if (!r) {
...@@ -635,7 +647,7 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, ...@@ -635,7 +647,7 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
struct list_head *pages, struct list_head *pages,
int ttm_flags, int ttm_flags,
enum ttm_caching_state cstate, enum ttm_caching_state cstate,
unsigned count) unsigned count, unsigned order)
{ {
unsigned long irq_flags; unsigned long irq_flags;
struct list_head *p; struct list_head *p;
...@@ -643,7 +655,9 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, ...@@ -643,7 +655,9 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
int r = 0; int r = 0;
spin_lock_irqsave(&pool->lock, irq_flags); spin_lock_irqsave(&pool->lock, irq_flags);
ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags); if (!order)
ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count,
&irq_flags);
if (count >= pool->npages) { if (count >= pool->npages) {
/* take all pages from the pool */ /* take all pages from the pool */
...@@ -698,7 +712,7 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, ...@@ -698,7 +712,7 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
* multiple requests in parallel. * multiple requests in parallel.
**/ **/
r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate, r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate,
count); count, order);
} }
return r; return r;
...@@ -708,8 +722,9 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, ...@@ -708,8 +722,9 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
static void ttm_put_pages(struct page **pages, unsigned npages, int flags, static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
enum ttm_caching_state cstate) enum ttm_caching_state cstate)
{ {
struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
unsigned long irq_flags; unsigned long irq_flags;
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
unsigned i; unsigned i;
if (pool == NULL) { if (pool == NULL) {
...@@ -737,8 +752,48 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, ...@@ -737,8 +752,48 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
return; return;
} }
i = 0;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (huge) {
unsigned max_size, n2free;
spin_lock_irqsave(&huge->lock, irq_flags);
while (i < npages) {
struct page *p = pages[i];
unsigned j;
if (!p)
break;
for (j = 0; j < HPAGE_PMD_NR; ++j)
if (p++ != pages[i + j])
break;
if (j != HPAGE_PMD_NR)
break;
list_add_tail(&pages[i]->lru, &huge->list);
for (j = 0; j < HPAGE_PMD_NR; ++j)
pages[i++] = NULL;
huge->npages++;
}
/* Check that we don't go over the pool limit */
max_size = _manager->options.max_size;
max_size /= HPAGE_PMD_NR;
if (huge->npages > max_size)
n2free = huge->npages - max_size;
else
n2free = 0;
spin_unlock_irqrestore(&huge->lock, irq_flags);
if (n2free)
ttm_page_pool_free(huge, n2free, false);
}
#endif
spin_lock_irqsave(&pool->lock, irq_flags); spin_lock_irqsave(&pool->lock, irq_flags);
for (i = 0; i < npages; i++) { while (i < npages) {
if (pages[i]) { if (pages[i]) {
if (page_count(pages[i]) != 1) if (page_count(pages[i]) != 1)
pr_err("Erroneous page count. Leaking pages.\n"); pr_err("Erroneous page count. Leaking pages.\n");
...@@ -746,6 +801,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, ...@@ -746,6 +801,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
pages[i] = NULL; pages[i] = NULL;
pool->npages++; pool->npages++;
} }
++i;
} }
/* Check that we don't go over the pool limit */ /* Check that we don't go over the pool limit */
npages = 0; npages = 0;
...@@ -768,7 +824,8 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, ...@@ -768,7 +824,8 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
static int ttm_get_pages(struct page **pages, unsigned npages, int flags, static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
enum ttm_caching_state cstate) enum ttm_caching_state cstate)
{ {
struct ttm_page_pool *pool = ttm_get_pool(flags, cstate); struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate);
struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate);
struct list_head plist; struct list_head plist;
struct page *p = NULL; struct page *p = NULL;
unsigned count; unsigned count;
...@@ -821,11 +878,28 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, ...@@ -821,11 +878,28 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
return 0; return 0;
} }
/* First we take pages from the pool */ count = 0;
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
if (huge && npages >= HPAGE_PMD_NR) {
INIT_LIST_HEAD(&plist);
ttm_page_pool_get_pages(huge, &plist, flags, cstate,
npages / HPAGE_PMD_NR,
HPAGE_PMD_ORDER);
list_for_each_entry(p, &plist, lru) {
unsigned j;
for (j = 0; j < HPAGE_PMD_NR; ++j)
pages[count++] = &p[j];
}
}
#endif
INIT_LIST_HEAD(&plist); INIT_LIST_HEAD(&plist);
r = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages); r = ttm_page_pool_get_pages(pool, &plist, flags, cstate,
npages - count, 0);
count = 0;
list_for_each_entry(p, &plist, lru) list_for_each_entry(p, &plist, lru)
pages[count++] = p; pages[count++] = p;
...@@ -872,6 +946,14 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) ...@@ -872,6 +946,14 @@ int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
ttm_page_pool_init_locked(&_manager->uc_pool_dma32, ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
GFP_USER | GFP_DMA32, "uc dma"); GFP_USER | GFP_DMA32, "uc dma");
ttm_page_pool_init_locked(&_manager->wc_pool_huge,
GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP),
"wc huge");
ttm_page_pool_init_locked(&_manager->uc_pool_huge,
GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP)
, "uc huge");
_manager->options.max_size = max_pages; _manager->options.max_size = max_pages;
_manager->options.small = SMALL_ALLOCATION; _manager->options.small = SMALL_ALLOCATION;
_manager->options.alloc_size = NUM_PAGES_TO_ALLOC; _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
...@@ -1041,12 +1123,12 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data) ...@@ -1041,12 +1123,12 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
seq_printf(m, "No pool allocator running.\n"); seq_printf(m, "No pool allocator running.\n");
return 0; return 0;
} }
seq_printf(m, "%6s %12s %13s %8s\n", seq_printf(m, "%7s %12s %13s %8s\n",
h[0], h[1], h[2], h[3]); h[0], h[1], h[2], h[3]);
for (i = 0; i < NUM_POOLS; ++i) { for (i = 0; i < NUM_POOLS; ++i) {
p = &_manager->pools[i]; p = &_manager->pools[i];
seq_printf(m, "%6s %12ld %13ld %8d\n", seq_printf(m, "%7s %12ld %13ld %8d\n",
p->name, p->nrefills, p->name, p->nrefills,
p->nfrees, p->npages); p->nfrees, p->npages);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment