Commit 8c48eea3 authored by Jakub Kicinski's avatar Jakub Kicinski

page_pool: allow caching from safely localized NAPI

Recent patches to mlx5 mentioned a regression when moving from
driver local page pool to only using the generic page pool code.
Page pool has two recycling paths (1) direct one, which runs in
safe NAPI context (basically consumer context, so producing
can be lockless); and (2) via a ptr_ring, which takes a spin
lock because the freeing can happen from any CPU; producer
and consumer may run concurrently.

Since the page pool code was added, Eric introduced a revised version
of deferred skb freeing. TCP skbs are now usually returned to the CPU
which allocated them, and freed in softirq context. This places the
freeing (producing of pages back to the pool) enticingly close to
the allocation (consumer).

If we can prove that we're freeing in the same softirq context in which
the consumer NAPI will run - lockless use of the cache is perfectly fine,
no need for the lock.

Let drivers link the page pool to a NAPI instance. If the NAPI instance
is scheduled on the same CPU on which we're freeing - place the pages
in the direct cache.

With that and patched bnxt (XDP enabled to engage the page pool, sigh,
bnxt really needs page pool work :() I see a 2.6% perf boost with
a TCP stream test (app on a different physical core than softirq).

The CPU use of relevant functions decreases as expected:

  page_pool_refill_alloc_cache   1.17% -> 0%
  _raw_spin_lock                 2.41% -> 0.98%

Only consider lockless path to be safe when NAPI is scheduled
- in practice this should cover majority if not all of steady state
workloads. It's usually the NAPI kicking in that causes the skb flush.

The main case we'll miss out on is when application runs on the same
CPU as NAPI. In that case we don't use the deferred skb free path.
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Acked-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Tested-by: default avatarDragos Tatulea <dtatulea@nvidia.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent b07a2d97
...@@ -165,6 +165,7 @@ Registration ...@@ -165,6 +165,7 @@ Registration
pp_params.pool_size = DESC_NUM; pp_params.pool_size = DESC_NUM;
pp_params.nid = NUMA_NO_NODE; pp_params.nid = NUMA_NO_NODE;
pp_params.dev = priv->dev; pp_params.dev = priv->dev;
pp_params.napi = napi; /* only if locking is tied to NAPI */
pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
page_pool = page_pool_create(&pp_params); page_pool = page_pool_create(&pp_params);
......
...@@ -360,8 +360,11 @@ struct napi_struct { ...@@ -360,8 +360,11 @@ struct napi_struct {
unsigned long gro_bitmask; unsigned long gro_bitmask;
int (*poll)(struct napi_struct *, int); int (*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL #ifdef CONFIG_NETPOLL
/* CPU actively polling if netpoll is configured */
int poll_owner; int poll_owner;
#endif #endif
/* CPU on which NAPI has been scheduled for processing */
int list_owner;
struct net_device *dev; struct net_device *dev;
struct gro_list gro_hash[GRO_HASH_BUCKETS]; struct gro_list gro_hash[GRO_HASH_BUCKETS];
struct sk_buff *skb; struct sk_buff *skb;
......
...@@ -3386,6 +3386,18 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f) ...@@ -3386,6 +3386,18 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
__skb_frag_ref(&skb_shinfo(skb)->frags[f]); __skb_frag_ref(&skb_shinfo(skb)->frags[f]);
} }
static inline void
napi_frag_unref(skb_frag_t *frag, bool recycle, bool napi_safe)
{
struct page *page = skb_frag_page(frag);
#ifdef CONFIG_PAGE_POOL
if (recycle && page_pool_return_skb_page(page, napi_safe))
return;
#endif
put_page(page);
}
/** /**
* __skb_frag_unref - release a reference on a paged fragment. * __skb_frag_unref - release a reference on a paged fragment.
* @frag: the paged fragment * @frag: the paged fragment
...@@ -3396,13 +3408,7 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f) ...@@ -3396,13 +3408,7 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f)
*/ */
static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle)
{ {
struct page *page = skb_frag_page(frag); napi_frag_unref(frag, recycle, false);
#ifdef CONFIG_PAGE_POOL
if (recycle && page_pool_return_skb_page(page))
return;
#endif
put_page(page);
} }
/** /**
......
...@@ -77,6 +77,7 @@ struct page_pool_params { ...@@ -77,6 +77,7 @@ struct page_pool_params {
unsigned int pool_size; unsigned int pool_size;
int nid; /* Numa node id to allocate from pages from */ int nid; /* Numa node id to allocate from pages from */
struct device *dev; /* device, for DMA pre-mapping purposes */ struct device *dev; /* device, for DMA pre-mapping purposes */
struct napi_struct *napi; /* Sole consumer of pages, otherwise NULL */
enum dma_data_direction dma_dir; /* DMA mapping direction */ enum dma_data_direction dma_dir; /* DMA mapping direction */
unsigned int max_len; /* max DMA sync memory size */ unsigned int max_len; /* max DMA sync memory size */
unsigned int offset; /* DMA addr offset */ unsigned int offset; /* DMA addr offset */
...@@ -239,7 +240,7 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool) ...@@ -239,7 +240,7 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
return pool->p.dma_dir; return pool->p.dma_dir;
} }
bool page_pool_return_skb_page(struct page *page); bool page_pool_return_skb_page(struct page *page, bool napi_safe);
struct page_pool *page_pool_create(const struct page_pool_params *params); struct page_pool *page_pool_create(const struct page_pool_params *params);
......
...@@ -4359,6 +4359,7 @@ static inline void ____napi_schedule(struct softnet_data *sd, ...@@ -4359,6 +4359,7 @@ static inline void ____napi_schedule(struct softnet_data *sd,
} }
list_add_tail(&napi->poll_list, &sd->poll_list); list_add_tail(&napi->poll_list, &sd->poll_list);
WRITE_ONCE(napi->list_owner, smp_processor_id());
/* If not called from net_rx_action() /* If not called from net_rx_action()
* we have to raise NET_RX_SOFTIRQ. * we have to raise NET_RX_SOFTIRQ.
*/ */
...@@ -6069,6 +6070,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done) ...@@ -6069,6 +6070,7 @@ bool napi_complete_done(struct napi_struct *n, int work_done)
list_del_init(&n->poll_list); list_del_init(&n->poll_list);
local_irq_restore(flags); local_irq_restore(flags);
} }
WRITE_ONCE(n->list_owner, -1);
val = READ_ONCE(n->state); val = READ_ONCE(n->state);
do { do {
...@@ -6384,6 +6386,7 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi, ...@@ -6384,6 +6386,7 @@ void netif_napi_add_weight(struct net_device *dev, struct napi_struct *napi,
#ifdef CONFIG_NETPOLL #ifdef CONFIG_NETPOLL
napi->poll_owner = -1; napi->poll_owner = -1;
#endif #endif
napi->list_owner = -1;
set_bit(NAPI_STATE_SCHED, &napi->state); set_bit(NAPI_STATE_SCHED, &napi->state);
set_bit(NAPI_STATE_NPSVC, &napi->state); set_bit(NAPI_STATE_NPSVC, &napi->state);
list_add_rcu(&napi->dev_list, &dev->napi_list); list_add_rcu(&napi->dev_list, &dev->napi_list);
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/mm.h> /* for put_page() */ #include <linux/mm.h> /* for put_page() */
#include <linux/poison.h> #include <linux/poison.h>
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <trace/events/page_pool.h> #include <trace/events/page_pool.h>
...@@ -874,9 +875,11 @@ void page_pool_update_nid(struct page_pool *pool, int new_nid) ...@@ -874,9 +875,11 @@ void page_pool_update_nid(struct page_pool *pool, int new_nid)
} }
EXPORT_SYMBOL(page_pool_update_nid); EXPORT_SYMBOL(page_pool_update_nid);
bool page_pool_return_skb_page(struct page *page) bool page_pool_return_skb_page(struct page *page, bool napi_safe)
{ {
struct napi_struct *napi;
struct page_pool *pp; struct page_pool *pp;
bool allow_direct;
page = compound_head(page); page = compound_head(page);
...@@ -892,12 +895,20 @@ bool page_pool_return_skb_page(struct page *page) ...@@ -892,12 +895,20 @@ bool page_pool_return_skb_page(struct page *page)
pp = page->pp; pp = page->pp;
/* Allow direct recycle if we have reasons to believe that we are
* in the same context as the consumer would run, so there's
* no possible race.
*/
napi = pp->p.napi;
allow_direct = napi_safe && napi &&
READ_ONCE(napi->list_owner) == smp_processor_id();
/* Driver set this to memory recycling info. Reset it on recycle. /* Driver set this to memory recycling info. Reset it on recycle.
* This will *not* work for NIC using a split-page memory model. * This will *not* work for NIC using a split-page memory model.
* The page will be returned to the pool here regardless of the * The page will be returned to the pool here regardless of the
* 'flipped' fragment being in use or not. * 'flipped' fragment being in use or not.
*/ */
page_pool_put_full_page(pp, page, false); page_pool_put_full_page(pp, page, allow_direct);
return true; return true;
} }
......
...@@ -843,7 +843,7 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe) ...@@ -843,7 +843,7 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data, bool napi_safe)
{ {
if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle)
return false; return false;
return page_pool_return_skb_page(virt_to_page(data)); return page_pool_return_skb_page(virt_to_page(data), napi_safe);
} }
static void skb_kfree_head(void *head, unsigned int end_offset) static void skb_kfree_head(void *head, unsigned int end_offset)
...@@ -889,7 +889,7 @@ static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason, ...@@ -889,7 +889,7 @@ static void skb_release_data(struct sk_buff *skb, enum skb_drop_reason reason,
} }
for (i = 0; i < shinfo->nr_frags; i++) for (i = 0; i < shinfo->nr_frags; i++)
__skb_frag_unref(&shinfo->frags[i], skb->pp_recycle); napi_frag_unref(&shinfo->frags[i], skb->pp_recycle, napi_safe);
free_head: free_head:
if (shinfo->frag_list) if (shinfo->frag_list)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment