Commit 0fadc0a2 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by David S. Miller

net: socionext: get rid of huge dma sync in netsec_alloc_rx_data

Socionext driver can run on dma coherent and non-coherent devices.
Get rid of huge dma_sync_single_for_device in netsec_alloc_rx_data since
now the driver can let page_pool API to managed needed DMA sync
Reviewed-by: default avatarIlias Apalodimas <ilias.apalodimas@linaro.org>
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Acked-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 0c73ffc7
...@@ -243,6 +243,7 @@ ...@@ -243,6 +243,7 @@
NET_IP_ALIGN) NET_IP_ALIGN)
#define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \ #define NETSEC_RX_BUF_NON_DATA (NETSEC_RXBUF_HEADROOM + \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define NETSEC_RX_BUF_SIZE (PAGE_SIZE - NETSEC_RX_BUF_NON_DATA)
#define DESC_SZ sizeof(struct netsec_de) #define DESC_SZ sizeof(struct netsec_de)
...@@ -719,7 +720,6 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv, ...@@ -719,7 +720,6 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv,
{ {
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
enum dma_data_direction dma_dir;
struct page *page; struct page *page;
page = page_pool_dev_alloc_pages(dring->page_pool); page = page_pool_dev_alloc_pages(dring->page_pool);
...@@ -734,9 +734,7 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv, ...@@ -734,9 +734,7 @@ static void *netsec_alloc_rx_data(struct netsec_priv *priv,
/* Make sure the incoming payload fits in the page for XDP and non-XDP /* Make sure the incoming payload fits in the page for XDP and non-XDP
* cases and reserve enough space for headroom + skb_shared_info * cases and reserve enough space for headroom + skb_shared_info
*/ */
*desc_len = PAGE_SIZE - NETSEC_RX_BUF_NON_DATA; *desc_len = NETSEC_RX_BUF_SIZE;
dma_dir = page_pool_get_dma_dir(dring->page_pool);
dma_sync_single_for_device(priv->dev, *dma_handle, *desc_len, dma_dir);
return page_address(page); return page_address(page);
} }
...@@ -883,6 +881,8 @@ static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp) ...@@ -883,6 +881,8 @@ static u32 netsec_xdp_xmit_back(struct netsec_priv *priv, struct xdp_buff *xdp)
static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog, static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
struct xdp_buff *xdp) struct xdp_buff *xdp)
{ {
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
unsigned int len = xdp->data_end - xdp->data;
u32 ret = NETSEC_XDP_PASS; u32 ret = NETSEC_XDP_PASS;
int err; int err;
u32 act; u32 act;
...@@ -896,7 +896,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog, ...@@ -896,7 +896,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
case XDP_TX: case XDP_TX:
ret = netsec_xdp_xmit_back(priv, xdp); ret = netsec_xdp_xmit_back(priv, xdp);
if (ret != NETSEC_XDP_TX) if (ret != NETSEC_XDP_TX)
xdp_return_buff(xdp); __page_pool_put_page(dring->page_pool,
virt_to_head_page(xdp->data),
len, true);
break; break;
case XDP_REDIRECT: case XDP_REDIRECT:
err = xdp_do_redirect(priv->ndev, xdp, prog); err = xdp_do_redirect(priv->ndev, xdp, prog);
...@@ -904,7 +906,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog, ...@@ -904,7 +906,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
ret = NETSEC_XDP_REDIR; ret = NETSEC_XDP_REDIR;
} else { } else {
ret = NETSEC_XDP_CONSUMED; ret = NETSEC_XDP_CONSUMED;
xdp_return_buff(xdp); __page_pool_put_page(dring->page_pool,
virt_to_head_page(xdp->data),
len, true);
} }
break; break;
default: default:
...@@ -915,7 +919,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog, ...@@ -915,7 +919,9 @@ static u32 netsec_run_xdp(struct netsec_priv *priv, struct bpf_prog *prog,
/* fall through -- handle aborts by dropping packet */ /* fall through -- handle aborts by dropping packet */
case XDP_DROP: case XDP_DROP:
ret = NETSEC_XDP_CONSUMED; ret = NETSEC_XDP_CONSUMED;
xdp_return_buff(xdp); __page_pool_put_page(dring->page_pool,
virt_to_head_page(xdp->data),
len, true);
break; break;
} }
...@@ -1014,7 +1020,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) ...@@ -1014,7 +1020,8 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget)
* cache state. Since we paid the allocation cost if * cache state. Since we paid the allocation cost if
* building an skb fails try to put the page into cache * building an skb fails try to put the page into cache
*/ */
page_pool_recycle_direct(dring->page_pool, page); __page_pool_put_page(dring->page_pool, page,
pkt_len, true);
netif_err(priv, drv, priv->ndev, netif_err(priv, drv, priv->ndev,
"rx failed to build skb\n"); "rx failed to build skb\n");
break; break;
...@@ -1272,17 +1279,19 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv) ...@@ -1272,17 +1279,19 @@ static int netsec_setup_rx_dring(struct netsec_priv *priv)
{ {
struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX];
struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog); struct bpf_prog *xdp_prog = READ_ONCE(priv->xdp_prog);
struct page_pool_params pp_params = { 0 }; struct page_pool_params pp_params = {
.order = 0,
/* internal DMA mapping in page_pool */
.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
.pool_size = DESC_NUM,
.nid = NUMA_NO_NODE,
.dev = priv->dev,
.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE,
.offset = NETSEC_RXBUF_HEADROOM,
.max_len = NETSEC_RX_BUF_SIZE,
};
int i, err; int i, err;
pp_params.order = 0;
/* internal DMA mapping in page_pool */
pp_params.flags = PP_FLAG_DMA_MAP;
pp_params.pool_size = DESC_NUM;
pp_params.nid = NUMA_NO_NODE;
pp_params.dev = priv->dev;
pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
dring->page_pool = page_pool_create(&pp_params); dring->page_pool = page_pool_create(&pp_params);
if (IS_ERR(dring->page_pool)) { if (IS_ERR(dring->page_pool)) {
err = PTR_ERR(dring->page_pool); err = PTR_ERR(dring->page_pool);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment