Commit cb5f1a52 authored by Andrey Grodzovsky's avatar Andrey Grodzovsky Committed by Alex Deucher

drm/ttm: Allow page allocations w/o triggering OOM..

This to allow drivers to choose to avoid OOM invocation and handle
page allocation failures instead.

v2:
Remove extra new lines.
Signed-off-by: default avatarAndrey Grodzovsky <andrey.grodzovsky@amd.com>
Reviewed-by: default avatarChristian König <christian.koenig@amd.com>
Reviewed-by: default avatarRoger He <Hongbo.He@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 7b158d16
...@@ -235,6 +235,9 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc) ...@@ -235,6 +235,9 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
if (bdev->need_dma32) if (bdev->need_dma32)
page_flags |= TTM_PAGE_FLAG_DMA32; page_flags |= TTM_PAGE_FLAG_DMA32;
if (bdev->no_retry)
page_flags |= TTM_PAGE_FLAG_NO_RETRY;
switch (bo->type) { switch (bo->type) {
case ttm_bo_type_device: case ttm_bo_type_device:
if (zero_alloc) if (zero_alloc)
......
...@@ -741,6 +741,9 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, ...@@ -741,6 +741,9 @@ static int ttm_page_pool_get_pages(struct ttm_page_pool *pool,
if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO; gfp_flags |= __GFP_ZERO;
if (ttm_flags & TTM_PAGE_FLAG_NO_RETRY)
gfp_flags |= __GFP_RETRY_MAYFAIL;
/* ttm_alloc_new_pages doesn't reference pool so we can run /* ttm_alloc_new_pages doesn't reference pool so we can run
* multiple requests in parallel. * multiple requests in parallel.
**/ **/
...@@ -893,6 +896,9 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, ...@@ -893,6 +896,9 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
gfp_flags |= __GFP_ZERO; gfp_flags |= __GFP_ZERO;
if (flags & TTM_PAGE_FLAG_NO_RETRY)
gfp_flags |= __GFP_RETRY_MAYFAIL;
if (flags & TTM_PAGE_FLAG_DMA32) if (flags & TTM_PAGE_FLAG_DMA32)
gfp_flags |= GFP_DMA32; gfp_flags |= GFP_DMA32;
else else
......
...@@ -920,6 +920,9 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) ...@@ -920,6 +920,9 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge)
gfp_flags &= ~__GFP_COMP; gfp_flags &= ~__GFP_COMP;
} }
if (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY)
gfp_flags |= __GFP_RETRY_MAYFAIL;
return gfp_flags; return gfp_flags;
} }
......
...@@ -301,7 +301,11 @@ int ttm_tt_swapin(struct ttm_tt *ttm) ...@@ -301,7 +301,11 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
swap_space = swap_storage->f_mapping; swap_space = swap_storage->f_mapping;
for (i = 0; i < ttm->num_pages; ++i) { for (i = 0; i < ttm->num_pages; ++i) {
from_page = shmem_read_mapping_page(swap_space, i); gfp_t gfp_mask = mapping_gfp_mask(swap_space);
gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
from_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
if (IS_ERR(from_page)) { if (IS_ERR(from_page)) {
ret = PTR_ERR(from_page); ret = PTR_ERR(from_page);
goto out_err; goto out_err;
...@@ -350,10 +354,15 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage) ...@@ -350,10 +354,15 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
swap_space = swap_storage->f_mapping; swap_space = swap_storage->f_mapping;
for (i = 0; i < ttm->num_pages; ++i) { for (i = 0; i < ttm->num_pages; ++i) {
gfp_t gfp_mask = mapping_gfp_mask(swap_space);
gfp_mask |= (ttm->page_flags & TTM_PAGE_FLAG_NO_RETRY ? __GFP_RETRY_MAYFAIL : 0);
from_page = ttm->pages[i]; from_page = ttm->pages[i];
if (unlikely(from_page == NULL)) if (unlikely(from_page == NULL))
continue; continue;
to_page = shmem_read_mapping_page(swap_space, i);
to_page = shmem_read_mapping_page_gfp(swap_space, i, gfp_mask);
if (IS_ERR(to_page)) { if (IS_ERR(to_page)) {
ret = PTR_ERR(to_page); ret = PTR_ERR(to_page);
goto out_err; goto out_err;
......
...@@ -86,6 +86,7 @@ struct ttm_backend_func { ...@@ -86,6 +86,7 @@ struct ttm_backend_func {
#define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6) #define TTM_PAGE_FLAG_ZERO_ALLOC (1 << 6)
#define TTM_PAGE_FLAG_DMA32 (1 << 7) #define TTM_PAGE_FLAG_DMA32 (1 << 7)
#define TTM_PAGE_FLAG_SG (1 << 8) #define TTM_PAGE_FLAG_SG (1 << 8)
#define TTM_PAGE_FLAG_NO_RETRY (1 << 9)
enum ttm_caching_state { enum ttm_caching_state {
tt_uncached, tt_uncached,
...@@ -556,6 +557,7 @@ struct ttm_bo_global { ...@@ -556,6 +557,7 @@ struct ttm_bo_global {
* @dev_mapping: A pointer to the struct address_space representing the * @dev_mapping: A pointer to the struct address_space representing the
* device address space. * device address space.
* @wq: Work queue structure for the delayed delete workqueue. * @wq: Work queue structure for the delayed delete workqueue.
* @no_retry: Don't retry allocation if it fails
* *
*/ */
...@@ -592,6 +594,8 @@ struct ttm_bo_device { ...@@ -592,6 +594,8 @@ struct ttm_bo_device {
struct delayed_work wq; struct delayed_work wq;
bool need_dma32; bool need_dma32;
bool no_retry;
}; };
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment