Commit afda408b authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by David S. Miller

net: mvneta: move mvneta_run_xdp after descriptors processing

Move mvneta_run_xdp routine after all descriptor processing. This is a
preliminary patch to enable multi-buffers and JUMBO frames support for
XDP
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ca0e0146
...@@ -2230,12 +2230,11 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, ...@@ -2230,12 +2230,11 @@ mvneta_run_xdp(struct mvneta_port *pp, struct mvneta_rx_queue *rxq,
return ret; return ret;
} }
static int static void
mvneta_swbm_rx_frame(struct mvneta_port *pp, mvneta_swbm_rx_frame(struct mvneta_port *pp,
struct mvneta_rx_desc *rx_desc, struct mvneta_rx_desc *rx_desc,
struct mvneta_rx_queue *rxq, struct mvneta_rx_queue *rxq,
struct xdp_buff *xdp, struct xdp_buff *xdp,
struct bpf_prog *xdp_prog,
struct page *page, struct page *page,
struct mvneta_stats *stats) struct mvneta_stats *stats)
{ {
...@@ -2244,7 +2243,6 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp, ...@@ -2244,7 +2243,6 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
struct net_device *dev = pp->dev; struct net_device *dev = pp->dev;
enum dma_data_direction dma_dir; enum dma_data_direction dma_dir;
struct skb_shared_info *sinfo; struct skb_shared_info *sinfo;
int ret = 0;
if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) { if (MVNETA_SKB_SIZE(rx_desc->data_size) > PAGE_SIZE) {
len = MVNETA_MAX_RX_BUF_SIZE; len = MVNETA_MAX_RX_BUF_SIZE;
...@@ -2270,13 +2268,8 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp, ...@@ -2270,13 +2268,8 @@ mvneta_swbm_rx_frame(struct mvneta_port *pp,
sinfo = xdp_get_shared_info_from_buff(xdp); sinfo = xdp_get_shared_info_from_buff(xdp);
sinfo->nr_frags = 0; sinfo->nr_frags = 0;
if (xdp_prog)
ret = mvneta_run_xdp(pp, rxq, xdp_prog, xdp, stats);
rxq->left_size = rx_desc->data_size - len; rxq->left_size = rx_desc->data_size - len;
rx_desc->buf_phys_addr = 0; rx_desc->buf_phys_addr = 0;
return ret;
} }
static void static void
...@@ -2385,20 +2378,15 @@ static int mvneta_rx_swbm(struct napi_struct *napi, ...@@ -2385,20 +2378,15 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
rxq->refill_num++; rxq->refill_num++;
if (rx_status & MVNETA_RXD_FIRST_DESC) { if (rx_status & MVNETA_RXD_FIRST_DESC) {
int err;
/* Check errors only for FIRST descriptor */ /* Check errors only for FIRST descriptor */
if (rx_status & MVNETA_RXD_ERR_SUMMARY) { if (rx_status & MVNETA_RXD_ERR_SUMMARY) {
mvneta_rx_error(pp, rx_desc); mvneta_rx_error(pp, rx_desc);
goto next; goto next;
} }
err = mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf,
xdp_prog, page, &ps);
if (err)
continue;
desc_status = rx_desc->status; desc_status = rx_desc->status;
mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, page,
&ps);
} else { } else {
if (unlikely(!xdp_buf.data_hard_start)) if (unlikely(!xdp_buf.data_hard_start))
continue; continue;
...@@ -2417,6 +2405,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi, ...@@ -2417,6 +2405,10 @@ static int mvneta_rx_swbm(struct napi_struct *napi,
goto next; goto next;
} }
if (xdp_prog &&
mvneta_run_xdp(pp, rxq, xdp_prog, &xdp_buf, &ps))
goto next;
skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status); skb = mvneta_swbm_build_skb(pp, rxq, &xdp_buf, desc_status);
if (IS_ERR(skb)) { if (IS_ERR(skb)) {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment