Commit 8c2dd3e4 authored by Alexander Duyck's avatar Alexander Duyck Committed by Linus Torvalds

mm: rename __alloc_page_frag to page_frag_alloc and __free_page_frag to page_frag_free

Patch series "Page fragment updates", v4.

This patch series takes care of a few cleanups for the page fragments
API.

First we do some renames so that things are much more consistent.  First
we move the page_frag_ portion of the name to the front of the functions
names.  Secondly we split out the cache specific functions from the
other page fragment functions by adding the word "cache" to the name.

Finally I added a bit of documentation that will hopefully help to
explain some of this.  I plan to revisit this later as we get things
more ironed out in the near future with the changes planned for the DMA
setup to support eXpress Data Path.

This patch (of 3):

This patch renames the page frag functions to be more consistent with
other APIs.  Specifically we place the name page_frag first in the name
and then have either an alloc or free call name that we append as the
suffix.  This makes it a bit clearer in terms of naming.

In addition we drop the leading double underscores since we are
technically no longer a backing interface and instead the front end that
is called from the networking APIs.

Link: http://lkml.kernel.org/r/20170104023854.13451.67390.stgit@localhost.localdomainSigned-off-by: default avatarAlexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b4536f0c
...@@ -501,9 +501,9 @@ extern void free_hot_cold_page_list(struct list_head *list, bool cold); ...@@ -501,9 +501,9 @@ extern void free_hot_cold_page_list(struct list_head *list, bool cold);
struct page_frag_cache; struct page_frag_cache;
extern void __page_frag_drain(struct page *page, unsigned int order, extern void __page_frag_drain(struct page *page, unsigned int order,
unsigned int count); unsigned int count);
extern void *__alloc_page_frag(struct page_frag_cache *nc, extern void *page_frag_alloc(struct page_frag_cache *nc,
unsigned int fragsz, gfp_t gfp_mask); unsigned int fragsz, gfp_t gfp_mask);
extern void __free_page_frag(void *addr); extern void page_frag_free(void *addr);
#define __free_page(page) __free_pages((page), 0) #define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0) #define free_page(addr) free_pages((addr), 0)
......
...@@ -2480,7 +2480,7 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, ...@@ -2480,7 +2480,7 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
static inline void skb_free_frag(void *addr) static inline void skb_free_frag(void *addr)
{ {
__free_page_frag(addr); page_frag_free(addr);
} }
void *napi_alloc_frag(unsigned int fragsz); void *napi_alloc_frag(unsigned int fragsz);
......
...@@ -3931,8 +3931,8 @@ void __page_frag_drain(struct page *page, unsigned int order, ...@@ -3931,8 +3931,8 @@ void __page_frag_drain(struct page *page, unsigned int order,
} }
EXPORT_SYMBOL(__page_frag_drain); EXPORT_SYMBOL(__page_frag_drain);
void *__alloc_page_frag(struct page_frag_cache *nc, void *page_frag_alloc(struct page_frag_cache *nc,
unsigned int fragsz, gfp_t gfp_mask) unsigned int fragsz, gfp_t gfp_mask)
{ {
unsigned int size = PAGE_SIZE; unsigned int size = PAGE_SIZE;
struct page *page; struct page *page;
...@@ -3983,19 +3983,19 @@ void *__alloc_page_frag(struct page_frag_cache *nc, ...@@ -3983,19 +3983,19 @@ void *__alloc_page_frag(struct page_frag_cache *nc,
return nc->va + offset; return nc->va + offset;
} }
EXPORT_SYMBOL(__alloc_page_frag); EXPORT_SYMBOL(page_frag_alloc);
/* /*
* Frees a page fragment allocated out of either a compound or order 0 page. * Frees a page fragment allocated out of either a compound or order 0 page.
*/ */
void __free_page_frag(void *addr) void page_frag_free(void *addr)
{ {
struct page *page = virt_to_head_page(addr); struct page *page = virt_to_head_page(addr);
if (unlikely(put_page_testzero(page))) if (unlikely(put_page_testzero(page)))
__free_pages_ok(page, compound_order(page)); __free_pages_ok(page, compound_order(page));
} }
EXPORT_SYMBOL(__free_page_frag); EXPORT_SYMBOL(page_frag_free);
static void *make_alloc_exact(unsigned long addr, unsigned int order, static void *make_alloc_exact(unsigned long addr, unsigned int order,
size_t size) size_t size)
......
...@@ -369,7 +369,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) ...@@ -369,7 +369,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
local_irq_save(flags); local_irq_save(flags);
nc = this_cpu_ptr(&netdev_alloc_cache); nc = this_cpu_ptr(&netdev_alloc_cache);
data = __alloc_page_frag(nc, fragsz, gfp_mask); data = page_frag_alloc(nc, fragsz, gfp_mask);
local_irq_restore(flags); local_irq_restore(flags);
return data; return data;
} }
...@@ -391,7 +391,7 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) ...@@ -391,7 +391,7 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{ {
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
return __alloc_page_frag(&nc->page, fragsz, gfp_mask); return page_frag_alloc(&nc->page, fragsz, gfp_mask);
} }
void *napi_alloc_frag(unsigned int fragsz) void *napi_alloc_frag(unsigned int fragsz)
...@@ -441,7 +441,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, ...@@ -441,7 +441,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
local_irq_save(flags); local_irq_save(flags);
nc = this_cpu_ptr(&netdev_alloc_cache); nc = this_cpu_ptr(&netdev_alloc_cache);
data = __alloc_page_frag(nc, len, gfp_mask); data = page_frag_alloc(nc, len, gfp_mask);
pfmemalloc = nc->pfmemalloc; pfmemalloc = nc->pfmemalloc;
local_irq_restore(flags); local_irq_restore(flags);
...@@ -505,7 +505,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, ...@@ -505,7 +505,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
if (sk_memalloc_socks()) if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC; gfp_mask |= __GFP_MEMALLOC;
data = __alloc_page_frag(&nc->page, len, gfp_mask); data = page_frag_alloc(&nc->page, len, gfp_mask);
if (unlikely(!data)) if (unlikely(!data))
return NULL; return NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment