Commit 1614f06e authored by Somnath Kotur's avatar Somnath Kotur Committed by Jakub Kicinski

bnxt_en: Change bnxt_rx_xdp function prototype

Change bnxt_rx_xdp() to take a pointer to xdp instead of stack
variable.
This is in prepartion for the XDP metadata patch change where
the BPF program can change the value of the xdp.meta_data.
Signed-off-by: default avatarSomnath Kotur <somnath.kotur@broadcom.com>
Reviewed-by: default avatarAndy Gospodarek <andrew.gospodarek@broadcom.com>
Reviewed-by: default avatarMichael Chan <michael.chan@broadcom.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: Jesper Dangaard Brouer <hawk@kernel.org>
Cc: John Fastabend <john.fastabend@gmail.com>
Signed-off-by: default avatarPavan Chebbi <pavan.chebbi@broadcom.com>
Acked-by: default avatarPaolo Abeni <pabeni@redhat.com>
Link: https://lore.kernel.org/r/20240402093753.331120-5-pavan.chebbi@broadcom.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent fba2e4e5
...@@ -2104,7 +2104,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, ...@@ -2104,7 +2104,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
} }
if (xdp_active) { if (xdp_active) {
if (bnxt_rx_xdp(bp, rxr, cons, xdp, data, &data_ptr, &len, event)) { if (bnxt_rx_xdp(bp, rxr, cons, &xdp, data, &data_ptr, &len, event)) {
rc = 1; rc = 1;
goto next_rx; goto next_rx;
} }
......
...@@ -222,7 +222,7 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr, ...@@ -222,7 +222,7 @@ void bnxt_xdp_buff_frags_free(struct bnxt_rx_ring_info *rxr,
* false - packet should be passed to the stack. * false - packet should be passed to the stack.
*/ */
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct xdp_buff xdp, struct page *page, u8 **data_ptr, struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
unsigned int *len, u8 *event) unsigned int *len, u8 *event)
{ {
struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog);
...@@ -244,9 +244,9 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, ...@@ -244,9 +244,9 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
txr = rxr->bnapi->tx_ring[0]; txr = rxr->bnapi->tx_ring[0];
/* BNXT_RX_PAGE_MODE(bp) when XDP enabled */ /* BNXT_RX_PAGE_MODE(bp) when XDP enabled */
orig_data = xdp.data; orig_data = xdp->data;
act = bpf_prog_run_xdp(xdp_prog, &xdp); act = bpf_prog_run_xdp(xdp_prog, xdp);
tx_avail = bnxt_tx_avail(bp, txr); tx_avail = bnxt_tx_avail(bp, txr);
/* If the tx ring is not full, we must not update the rx producer yet /* If the tx ring is not full, we must not update the rx producer yet
...@@ -255,10 +255,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, ...@@ -255,10 +255,10 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
if (tx_avail != bp->tx_ring_size) if (tx_avail != bp->tx_ring_size)
*event &= ~BNXT_RX_EVENT; *event &= ~BNXT_RX_EVENT;
*len = xdp.data_end - xdp.data; *len = xdp->data_end - xdp->data;
if (orig_data != xdp.data) { if (orig_data != xdp->data) {
offset = xdp.data - xdp.data_hard_start; offset = xdp->data - xdp->data_hard_start;
*data_ptr = xdp.data_hard_start + offset; *data_ptr = xdp->data_hard_start + offset;
} }
switch (act) { switch (act) {
...@@ -270,8 +270,8 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, ...@@ -270,8 +270,8 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
mapping = rx_buf->mapping - bp->rx_dma_offset; mapping = rx_buf->mapping - bp->rx_dma_offset;
*event &= BNXT_TX_CMP_EVENT; *event &= BNXT_TX_CMP_EVENT;
if (unlikely(xdp_buff_has_frags(&xdp))) { if (unlikely(xdp_buff_has_frags(xdp))) {
struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(&xdp); struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
tx_needed += sinfo->nr_frags; tx_needed += sinfo->nr_frags;
*event = BNXT_AGG_EVENT; *event = BNXT_AGG_EVENT;
...@@ -279,7 +279,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, ...@@ -279,7 +279,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
if (tx_avail < tx_needed) { if (tx_avail < tx_needed) {
trace_xdp_exception(bp->dev, xdp_prog, act); trace_xdp_exception(bp->dev, xdp_prog, act);
bnxt_xdp_buff_frags_free(rxr, &xdp); bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page); bnxt_reuse_rx_data(rxr, cons, page);
return true; return true;
} }
...@@ -289,7 +289,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, ...@@ -289,7 +289,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
*event |= BNXT_TX_EVENT; *event |= BNXT_TX_EVENT;
__bnxt_xmit_xdp(bp, txr, mapping + offset, *len, __bnxt_xmit_xdp(bp, txr, mapping + offset, *len,
NEXT_RX(rxr->rx_prod), &xdp); NEXT_RX(rxr->rx_prod), xdp);
bnxt_reuse_rx_data(rxr, cons, page); bnxt_reuse_rx_data(rxr, cons, page);
return true; return true;
case XDP_REDIRECT: case XDP_REDIRECT:
...@@ -306,12 +306,12 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, ...@@ -306,12 +306,12 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
/* if we are unable to allocate a new buffer, abort and reuse */ /* if we are unable to allocate a new buffer, abort and reuse */
if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) { if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) {
trace_xdp_exception(bp->dev, xdp_prog, act); trace_xdp_exception(bp->dev, xdp_prog, act);
bnxt_xdp_buff_frags_free(rxr, &xdp); bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page); bnxt_reuse_rx_data(rxr, cons, page);
return true; return true;
} }
if (xdp_do_redirect(bp->dev, &xdp, xdp_prog)) { if (xdp_do_redirect(bp->dev, xdp, xdp_prog)) {
trace_xdp_exception(bp->dev, xdp_prog, act); trace_xdp_exception(bp->dev, xdp_prog, act);
page_pool_recycle_direct(rxr->page_pool, page); page_pool_recycle_direct(rxr->page_pool, page);
return true; return true;
...@@ -326,7 +326,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, ...@@ -326,7 +326,7 @@ bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
trace_xdp_exception(bp->dev, xdp_prog, act); trace_xdp_exception(bp->dev, xdp_prog, act);
fallthrough; fallthrough;
case XDP_DROP: case XDP_DROP:
bnxt_xdp_buff_frags_free(rxr, &xdp); bnxt_xdp_buff_frags_free(rxr, xdp);
bnxt_reuse_rx_data(rxr, cons, page); bnxt_reuse_rx_data(rxr, cons, page);
break; break;
} }
......
...@@ -18,7 +18,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp, ...@@ -18,7 +18,7 @@ struct bnxt_sw_tx_bd *bnxt_xmit_bd(struct bnxt *bp,
struct xdp_buff *xdp); struct xdp_buff *xdp);
void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget); void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget);
bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons, bool bnxt_rx_xdp(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, u16 cons,
struct xdp_buff xdp, struct page *page, u8 **data_ptr, struct xdp_buff *xdp, struct page *page, u8 **data_ptr,
unsigned int *len, u8 *event); unsigned int *len, u8 *event);
int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp); int bnxt_xdp(struct net_device *dev, struct netdev_bpf *xdp);
int bnxt_xdp_xmit(struct net_device *dev, int num_frames, int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment