Commit 4482d3c9 authored by Rajneesh Bhardwaj's avatar Rajneesh Bhardwaj Committed by Alex Deucher

drm/ttm: add NUMA node id to the pool

This allows backing ttm_tt structure with pages from different NUMA
pools.
Tested-by: default avatarGraham Sider <graham.sider@amd.com>
Reviewed-by: default avatarFelix Kuehling <felix.kuehling@amd.com>
Signed-off-by: default avatarChristian König <christian.koenig@amd.com>
Signed-off-by: default avatarRajneesh Bhardwaj <rajneesh.bhardwaj@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent c1d3f627
...@@ -213,7 +213,7 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs, ...@@ -213,7 +213,7 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
bdev->funcs = funcs; bdev->funcs = funcs;
ttm_sys_man_init(bdev); ttm_sys_man_init(bdev);
ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32); ttm_pool_init(&bdev->pool, dev, NUMA_NO_NODE, use_dma_alloc, use_dma32);
bdev->vma_manager = vma_manager; bdev->vma_manager = vma_manager;
spin_lock_init(&bdev->lru_lock); spin_lock_init(&bdev->lru_lock);
......
...@@ -98,7 +98,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, ...@@ -98,7 +98,7 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags,
__GFP_KSWAPD_RECLAIM; __GFP_KSWAPD_RECLAIM;
if (!pool->use_dma_alloc) { if (!pool->use_dma_alloc) {
p = alloc_pages(gfp_flags, order); p = alloc_pages_node(pool->nid, gfp_flags, order);
if (p) if (p)
p->private = order; p->private = order;
return p; return p;
...@@ -292,7 +292,7 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool, ...@@ -292,7 +292,7 @@ static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool,
enum ttm_caching caching, enum ttm_caching caching,
unsigned int order) unsigned int order)
{ {
if (pool->use_dma_alloc) if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE)
return &pool->caching[caching].orders[order]; return &pool->caching[caching].orders[order];
#ifdef CONFIG_X86 #ifdef CONFIG_X86
...@@ -550,29 +550,32 @@ EXPORT_SYMBOL(ttm_pool_free); ...@@ -550,29 +550,32 @@ EXPORT_SYMBOL(ttm_pool_free);
* *
* @pool: the pool to initialize * @pool: the pool to initialize
* @dev: device for DMA allocations and mappings * @dev: device for DMA allocations and mappings
* @nid: NUMA node to use for allocations
* @use_dma_alloc: true if coherent DMA alloc should be used * @use_dma_alloc: true if coherent DMA alloc should be used
* @use_dma32: true if GFP_DMA32 should be used * @use_dma32: true if GFP_DMA32 should be used
* *
* Initialize the pool and its pool types. * Initialize the pool and its pool types.
*/ */
void ttm_pool_init(struct ttm_pool *pool, struct device *dev, void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
bool use_dma_alloc, bool use_dma32) int nid, bool use_dma_alloc, bool use_dma32)
{ {
unsigned int i, j; unsigned int i, j;
WARN_ON(!dev && use_dma_alloc); WARN_ON(!dev && use_dma_alloc);
pool->dev = dev; pool->dev = dev;
pool->nid = nid;
pool->use_dma_alloc = use_dma_alloc; pool->use_dma_alloc = use_dma_alloc;
pool->use_dma32 = use_dma32; pool->use_dma32 = use_dma32;
if (use_dma_alloc) { if (use_dma_alloc || nid != NUMA_NO_NODE) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
for (j = 0; j < TTM_DIM_ORDER; ++j) for (j = 0; j < TTM_DIM_ORDER; ++j)
ttm_pool_type_init(&pool->caching[i].orders[j], ttm_pool_type_init(&pool->caching[i].orders[j],
pool, i, j); pool, i, j);
} }
} }
EXPORT_SYMBOL(ttm_pool_init);
/** /**
* ttm_pool_fini - Cleanup a pool * ttm_pool_fini - Cleanup a pool
...@@ -586,7 +589,7 @@ void ttm_pool_fini(struct ttm_pool *pool) ...@@ -586,7 +589,7 @@ void ttm_pool_fini(struct ttm_pool *pool)
{ {
unsigned int i, j; unsigned int i, j;
if (pool->use_dma_alloc) { if (pool->use_dma_alloc || pool->nid != NUMA_NO_NODE) {
for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i)
for (j = 0; j < TTM_DIM_ORDER; ++j) for (j = 0; j < TTM_DIM_ORDER; ++j)
ttm_pool_type_fini(&pool->caching[i].orders[j]); ttm_pool_type_fini(&pool->caching[i].orders[j]);
......
...@@ -61,12 +61,14 @@ struct ttm_pool_type { ...@@ -61,12 +61,14 @@ struct ttm_pool_type {
* struct ttm_pool - Pool for all caching and orders * struct ttm_pool - Pool for all caching and orders
* *
* @dev: the device we allocate pages for * @dev: the device we allocate pages for
* @nid: which numa node to use
* @use_dma_alloc: if coherent DMA allocations should be used * @use_dma_alloc: if coherent DMA allocations should be used
* @use_dma32: if GFP_DMA32 should be used * @use_dma32: if GFP_DMA32 should be used
* @caching: pools for each caching/order * @caching: pools for each caching/order
*/ */
struct ttm_pool { struct ttm_pool {
struct device *dev; struct device *dev;
int nid;
bool use_dma_alloc; bool use_dma_alloc;
bool use_dma32; bool use_dma32;
...@@ -81,7 +83,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, ...@@ -81,7 +83,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt,
void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt); void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt);
void ttm_pool_init(struct ttm_pool *pool, struct device *dev, void ttm_pool_init(struct ttm_pool *pool, struct device *dev,
bool use_dma_alloc, bool use_dma32); int nid, bool use_dma_alloc, bool use_dma32);
void ttm_pool_fini(struct ttm_pool *pool); void ttm_pool_fini(struct ttm_pool *pool);
int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m); int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment