Commit 46e208e7 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'net-page_pool-add-netlink-based-introspection-part1'

Jakub Kicinski says:

====================
net: page_pool: plit the page_pool_params into fast and slow

Small refactoring in prep for adding more page pool params
which won't be needed on the fast path.

v1:  https://lore.kernel.org/all/20231024160220.3973311-1-kuba@kernel.org/
RFC: https://lore.kernel.org/all/20230816234303.3786178-1-kuba@kernel.org/
====================

Link: https://lore.kernel.org/r/20231121000048.789613-1-kuba@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 3a17ea77 2da0cac1
......@@ -54,18 +54,22 @@ struct pp_alloc_cache {
* @offset: DMA sync address offset for PP_FLAG_DMA_SYNC_DEV
*/
struct page_pool_params {
unsigned int flags;
unsigned int order;
unsigned int pool_size;
int nid;
struct device *dev;
struct napi_struct *napi;
enum dma_data_direction dma_dir;
unsigned int max_len;
unsigned int offset;
struct_group_tagged(page_pool_params_fast, fast,
unsigned int flags;
unsigned int order;
unsigned int pool_size;
int nid;
struct device *dev;
struct napi_struct *napi;
enum dma_data_direction dma_dir;
unsigned int max_len;
unsigned int offset;
);
struct_group_tagged(page_pool_params_slow, slow,
/* private: used by test code only */
void (*init_callback)(struct page *page, void *arg);
void *init_arg;
void (*init_callback)(struct page *page, void *arg);
void *init_arg;
);
};
#ifdef CONFIG_PAGE_POOL_STATS
......@@ -119,7 +123,9 @@ struct page_pool_stats {
#endif
struct page_pool {
struct page_pool_params p;
struct page_pool_params_fast p;
bool has_init_callback;
long frag_users;
struct page *frag_page;
......@@ -178,6 +184,9 @@ struct page_pool {
refcount_t user_cnt;
u64 destroy_cnt;
/* Slow/Control-path information follows */
struct page_pool_params_slow slow;
};
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
......
......@@ -173,7 +173,8 @@ static int page_pool_init(struct page_pool *pool,
{
unsigned int ring_qsize = 1024; /* Default */
memcpy(&pool->p, params, sizeof(pool->p));
memcpy(&pool->p, &params->fast, sizeof(pool->p));
memcpy(&pool->slow, &params->slow, sizeof(pool->slow));
/* Validate only known flags were used */
if (pool->p.flags & ~(PP_FLAG_ALL))
......@@ -211,6 +212,8 @@ static int page_pool_init(struct page_pool *pool,
*/
}
pool->has_init_callback = !!pool->slow.init_callback;
#ifdef CONFIG_PAGE_POOL_STATS
pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
if (!pool->recycle_stats)
......@@ -388,8 +391,8 @@ static void page_pool_set_pp_info(struct page_pool *pool,
* the overhead is negligible.
*/
page_pool_fragment_page(page, 1);
if (pool->p.init_callback)
pool->p.init_callback(page, pool->p.init_arg);
if (pool->has_init_callback)
pool->slow.init_callback(page, pool->slow.init_arg);
}
static void page_pool_clear_pp_info(struct page *page)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment