Commit b411ef11 authored by Jesper Dangaard Brouer's avatar Jesper Dangaard Brouer Committed by David S. Miller

i40e: convert to use generic xdp_frame and xdp_return_frame API

Also convert driver i40e, which very recently got XDP_REDIRECT support
in commit d9314c47 ("i40e: add support for XDP_REDIRECT").

V7: This patch got added in V7 of this patchset.
Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 70280ed9
...@@ -638,7 +638,8 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring, ...@@ -638,7 +638,8 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB) if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
kfree(tx_buffer->raw_buf); kfree(tx_buffer->raw_buf);
else if (ring_is_xdp(ring)) else if (ring_is_xdp(ring))
page_frag_free(tx_buffer->raw_buf); xdp_return_frame(tx_buffer->xdpf->data,
&tx_buffer->xdpf->mem);
else else
dev_kfree_skb_any(tx_buffer->skb); dev_kfree_skb_any(tx_buffer->skb);
if (dma_unmap_len(tx_buffer, len)) if (dma_unmap_len(tx_buffer, len))
...@@ -841,7 +842,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi, ...@@ -841,7 +842,7 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
/* free the skb/XDP data */ /* free the skb/XDP data */
if (ring_is_xdp(tx_ring)) if (ring_is_xdp(tx_ring))
page_frag_free(tx_buf->raw_buf); xdp_return_frame(tx_buf->xdpf->data, &tx_buf->xdpf->mem);
else else
napi_consume_skb(tx_buf->skb, napi_budget); napi_consume_skb(tx_buf->skb, napi_budget);
...@@ -2225,6 +2226,8 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring, ...@@ -2225,6 +2226,8 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
if (!xdp_prog) if (!xdp_prog)
goto xdp_out; goto xdp_out;
prefetchw(xdp->data_hard_start); /* xdp_frame write */
act = bpf_prog_run_xdp(xdp_prog, xdp); act = bpf_prog_run_xdp(xdp_prog, xdp);
switch (act) { switch (act) {
case XDP_PASS: case XDP_PASS:
...@@ -3481,25 +3484,32 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -3481,25 +3484,32 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
static int i40e_xmit_xdp_ring(struct xdp_buff *xdp, static int i40e_xmit_xdp_ring(struct xdp_buff *xdp,
struct i40e_ring *xdp_ring) struct i40e_ring *xdp_ring)
{ {
u32 size = xdp->data_end - xdp->data;
u16 i = xdp_ring->next_to_use; u16 i = xdp_ring->next_to_use;
struct i40e_tx_buffer *tx_bi; struct i40e_tx_buffer *tx_bi;
struct i40e_tx_desc *tx_desc; struct i40e_tx_desc *tx_desc;
struct xdp_frame *xdpf;
dma_addr_t dma; dma_addr_t dma;
u32 size;
xdpf = convert_to_xdp_frame(xdp);
if (unlikely(!xdpf))
return I40E_XDP_CONSUMED;
size = xdpf->len;
if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) { if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
xdp_ring->tx_stats.tx_busy++; xdp_ring->tx_stats.tx_busy++;
return I40E_XDP_CONSUMED; return I40E_XDP_CONSUMED;
} }
dma = dma_map_single(xdp_ring->dev, xdp->data, size, DMA_TO_DEVICE); dma = dma_map_single(xdp_ring->dev, xdpf->data, size, DMA_TO_DEVICE);
if (dma_mapping_error(xdp_ring->dev, dma)) if (dma_mapping_error(xdp_ring->dev, dma))
return I40E_XDP_CONSUMED; return I40E_XDP_CONSUMED;
tx_bi = &xdp_ring->tx_bi[i]; tx_bi = &xdp_ring->tx_bi[i];
tx_bi->bytecount = size; tx_bi->bytecount = size;
tx_bi->gso_segs = 1; tx_bi->gso_segs = 1;
tx_bi->raw_buf = xdp->data; tx_bi->xdpf = xdpf;
/* record length, and DMA address */ /* record length, and DMA address */
dma_unmap_len_set(tx_bi, len, size); dma_unmap_len_set(tx_bi, len, size);
......
...@@ -306,6 +306,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size) ...@@ -306,6 +306,7 @@ static inline unsigned int i40e_txd_use_count(unsigned int size)
struct i40e_tx_buffer { struct i40e_tx_buffer {
struct i40e_tx_desc *next_to_watch; struct i40e_tx_desc *next_to_watch;
union { union {
struct xdp_frame *xdpf;
struct sk_buff *skb; struct sk_buff *skb;
void *raw_buf; void *raw_buf;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment