Commit 2976db80 authored by Alexander Duyck's avatar Alexander Duyck Committed by Linus Torvalds

mm: rename __page_frag functions to __page_frag_cache, drop order from drain

This patch does two things.

First it goes through and renames the __page_frag prefixed functions to
__page_frag_cache so that we can be clear that we are draining or
refilling the cache, not the frags themselves.

Second we drop the order parameter from __page_frag_cache_drain since we
don't actually need to pass it since all fragments are either order 0 or
must be a compound page.

Link: http://lkml.kernel.org/r/20170104023954.13451.5678.stgit@localhost.localdomainSigned-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8c2dd3e4
...@@ -3962,8 +3962,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring) ...@@ -3962,8 +3962,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
PAGE_SIZE, PAGE_SIZE,
DMA_FROM_DEVICE, DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC); DMA_ATTR_SKIP_CPU_SYNC);
__page_frag_drain(buffer_info->page, 0, __page_frag_cache_drain(buffer_info->page,
buffer_info->pagecnt_bias); buffer_info->pagecnt_bias);
buffer_info->page = NULL; buffer_info->page = NULL;
} }
...@@ -6991,7 +6991,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, ...@@ -6991,7 +6991,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
PAGE_SIZE, DMA_FROM_DEVICE, PAGE_SIZE, DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC); DMA_ATTR_SKIP_CPU_SYNC);
__page_frag_drain(page, 0, rx_buffer->pagecnt_bias); __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
} }
/* clear contents of rx_buffer */ /* clear contents of rx_buffer */
......
...@@ -499,8 +499,7 @@ extern void free_hot_cold_page(struct page *page, bool cold); ...@@ -499,8 +499,7 @@ extern void free_hot_cold_page(struct page *page, bool cold);
extern void free_hot_cold_page_list(struct list_head *list, bool cold); extern void free_hot_cold_page_list(struct list_head *list, bool cold);
struct page_frag_cache; struct page_frag_cache;
extern void __page_frag_drain(struct page *page, unsigned int order, extern void __page_frag_cache_drain(struct page *page, unsigned int count);
unsigned int count);
extern void *page_frag_alloc(struct page_frag_cache *nc, extern void *page_frag_alloc(struct page_frag_cache *nc,
unsigned int fragsz, gfp_t gfp_mask); unsigned int fragsz, gfp_t gfp_mask);
extern void page_frag_free(void *addr); extern void page_frag_free(void *addr);
......
...@@ -3896,8 +3896,8 @@ EXPORT_SYMBOL(free_pages); ...@@ -3896,8 +3896,8 @@ EXPORT_SYMBOL(free_pages);
* drivers to provide a backing region of memory for use as either an * drivers to provide a backing region of memory for use as either an
* sk_buff->head, or to be used in the "frags" portion of skb_shared_info. * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
*/ */
static struct page *__page_frag_refill(struct page_frag_cache *nc, static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
struct page *page = NULL; struct page *page = NULL;
gfp_t gfp = gfp_mask; gfp_t gfp = gfp_mask;
...@@ -3917,19 +3917,20 @@ static struct page *__page_frag_refill(struct page_frag_cache *nc, ...@@ -3917,19 +3917,20 @@ static struct page *__page_frag_refill(struct page_frag_cache *nc,
return page; return page;
} }
void __page_frag_drain(struct page *page, unsigned int order, void __page_frag_cache_drain(struct page *page, unsigned int count)
unsigned int count)
{ {
VM_BUG_ON_PAGE(page_ref_count(page) == 0, page); VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
if (page_ref_sub_and_test(page, count)) { if (page_ref_sub_and_test(page, count)) {
unsigned int order = compound_order(page);
if (order == 0) if (order == 0)
free_hot_cold_page(page, false); free_hot_cold_page(page, false);
else else
__free_pages_ok(page, order); __free_pages_ok(page, order);
} }
} }
EXPORT_SYMBOL(__page_frag_drain); EXPORT_SYMBOL(__page_frag_cache_drain);
void *page_frag_alloc(struct page_frag_cache *nc, void *page_frag_alloc(struct page_frag_cache *nc,
unsigned int fragsz, gfp_t gfp_mask) unsigned int fragsz, gfp_t gfp_mask)
...@@ -3940,7 +3941,7 @@ void *page_frag_alloc(struct page_frag_cache *nc, ...@@ -3940,7 +3941,7 @@ void *page_frag_alloc(struct page_frag_cache *nc,
if (unlikely(!nc->va)) { if (unlikely(!nc->va)) {
refill: refill:
page = __page_frag_refill(nc, gfp_mask); page = __page_frag_cache_refill(nc, gfp_mask);
if (!page) if (!page)
return NULL; return NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment