Commit 19c6f534 authored by Horatiu Vultur's avatar Horatiu Vultur Committed by David S. Miller

net: lan966x: Add support for XDP_TX

Extend lan966x XDP support with the action XDP_TX. In this case when the
received buffer needs to execute XDP_TX, the buffer will be moved to the
TX buffers. So a new RX buffer will be allocated.
When the TX finish with the frame, it would give back the buffer to the
page pool.
Signed-off-by: default avatarHoratiu Vultur <horatiu.vultur@microchip.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 560c7223
...@@ -410,12 +410,17 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight) ...@@ -410,12 +410,17 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
dcb_buf->dev->stats.tx_bytes += dcb_buf->len; dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
dcb_buf->used = false; dcb_buf->used = false;
if (dcb_buf->use_skb) {
dma_unmap_single(lan966x->dev, dma_unmap_single(lan966x->dev,
dcb_buf->dma_addr, dcb_buf->dma_addr,
dcb_buf->len, dcb_buf->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (!dcb_buf->ptp) if (!dcb_buf->ptp)
dev_kfree_skb_any(dcb_buf->skb); napi_consume_skb(dcb_buf->data.skb, weight);
} else {
xdp_return_frame_rx_napi(dcb_buf->data.xdpf);
}
clear = true; clear = true;
} }
...@@ -548,6 +553,9 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight) ...@@ -548,6 +553,9 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
lan966x_fdma_rx_free_page(rx); lan966x_fdma_rx_free_page(rx);
lan966x_fdma_rx_advance_dcb(rx); lan966x_fdma_rx_advance_dcb(rx);
goto allocate_new; goto allocate_new;
case FDMA_TX:
lan966x_fdma_rx_advance_dcb(rx);
continue;
case FDMA_DROP: case FDMA_DROP:
lan966x_fdma_rx_free_page(rx); lan966x_fdma_rx_free_page(rx);
lan966x_fdma_rx_advance_dcb(rx); lan966x_fdma_rx_advance_dcb(rx);
...@@ -669,6 +677,62 @@ static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use) ...@@ -669,6 +677,62 @@ static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
tx->last_in_use = next_to_use; tx->last_in_use = next_to_use;
} }
int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
struct xdp_frame *xdpf,
struct page *page)
{
struct lan966x *lan966x = port->lan966x;
struct lan966x_tx_dcb_buf *next_dcb_buf;
struct lan966x_tx *tx = &lan966x->tx;
dma_addr_t dma_addr;
int next_to_use;
__be32 *ifh;
int ret = 0;
spin_lock(&lan966x->tx_lock);
/* Get next index */
next_to_use = lan966x_fdma_get_next_dcb(tx);
if (next_to_use < 0) {
netif_stop_queue(port->dev);
ret = NETDEV_TX_BUSY;
goto out;
}
/* Generate new IFH */
ifh = page_address(page) + XDP_PACKET_HEADROOM;
memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
lan966x_ifh_set_bypass(ifh, 1);
lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
dma_addr = page_pool_get_dma_addr(page);
dma_sync_single_for_device(lan966x->dev, dma_addr + XDP_PACKET_HEADROOM,
xdpf->len + IFH_LEN_BYTES,
DMA_TO_DEVICE);
/* Setup next dcb */
lan966x_fdma_tx_setup_dcb(tx, next_to_use, xdpf->len + IFH_LEN_BYTES,
dma_addr + XDP_PACKET_HEADROOM);
/* Fill up the buffer */
next_dcb_buf = &tx->dcbs_buf[next_to_use];
next_dcb_buf->use_skb = false;
next_dcb_buf->data.xdpf = xdpf;
next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
next_dcb_buf->dma_addr = dma_addr;
next_dcb_buf->used = true;
next_dcb_buf->ptp = false;
next_dcb_buf->dev = port->dev;
/* Start the transmission */
lan966x_fdma_tx_start(tx, next_to_use);
out:
spin_unlock(&lan966x->tx_lock);
return ret;
}
int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev) int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
{ {
struct lan966x_port *port = netdev_priv(dev); struct lan966x_port *port = netdev_priv(dev);
...@@ -724,7 +788,8 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev) ...@@ -724,7 +788,8 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
/* Fill up the buffer */ /* Fill up the buffer */
next_dcb_buf = &tx->dcbs_buf[next_to_use]; next_dcb_buf = &tx->dcbs_buf[next_to_use];
next_dcb_buf->skb = skb; next_dcb_buf->use_skb = true;
next_dcb_buf->data.skb = skb;
next_dcb_buf->len = skb->len; next_dcb_buf->len = skb->len;
next_dcb_buf->dma_addr = dma_addr; next_dcb_buf->dma_addr = dma_addr;
next_dcb_buf->used = true; next_dcb_buf->used = true;
......
...@@ -302,13 +302,13 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb, ...@@ -302,13 +302,13 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
} }
static void lan966x_ifh_set_bypass(void *ifh, u64 bypass) void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
{ {
packing(ifh, &bypass, IFH_POS_BYPASS + IFH_WID_BYPASS - 1, packing(ifh, &bypass, IFH_POS_BYPASS + IFH_WID_BYPASS - 1,
IFH_POS_BYPASS, IFH_LEN * 4, PACK, 0); IFH_POS_BYPASS, IFH_LEN * 4, PACK, 0);
} }
static void lan966x_ifh_set_port(void *ifh, u64 bypass) void lan966x_ifh_set_port(void *ifh, u64 bypass)
{ {
packing(ifh, &bypass, IFH_POS_DSTS + IFH_WID_DSTS - 1, packing(ifh, &bypass, IFH_POS_DSTS + IFH_WID_DSTS - 1,
IFH_POS_DSTS, IFH_LEN * 4, PACK, 0); IFH_POS_DSTS, IFH_LEN * 4, PACK, 0);
......
...@@ -105,11 +105,13 @@ enum macaccess_entry_type { ...@@ -105,11 +105,13 @@ enum macaccess_entry_type {
* FDMA_PASS, frame is valid and can be used * FDMA_PASS, frame is valid and can be used
* FDMA_ERROR, something went wrong, stop getting more frames * FDMA_ERROR, something went wrong, stop getting more frames
* FDMA_DROP, frame is dropped, but continue to get more frames * FDMA_DROP, frame is dropped, but continue to get more frames
* FDMA_TX, frame is given to TX, but continue to get more frames
*/ */
enum lan966x_fdma_action { enum lan966x_fdma_action {
FDMA_PASS = 0, FDMA_PASS = 0,
FDMA_ERROR, FDMA_ERROR,
FDMA_DROP, FDMA_DROP,
FDMA_TX,
}; };
struct lan966x_port; struct lan966x_port;
...@@ -175,10 +177,14 @@ struct lan966x_rx { ...@@ -175,10 +177,14 @@ struct lan966x_rx {
struct lan966x_tx_dcb_buf { struct lan966x_tx_dcb_buf {
dma_addr_t dma_addr; dma_addr_t dma_addr;
struct net_device *dev; struct net_device *dev;
union {
struct sk_buff *skb; struct sk_buff *skb;
struct xdp_frame *xdpf;
} data;
u32 len; u32 len;
u32 used : 1; u32 used : 1;
u32 ptp : 1; u32 ptp : 1;
u32 use_skb : 1;
}; };
struct lan966x_tx { struct lan966x_tx {
...@@ -360,6 +366,8 @@ bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb); ...@@ -360,6 +366,8 @@ bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb);
void lan966x_ifh_get_src_port(void *ifh, u64 *src_port); void lan966x_ifh_get_src_port(void *ifh, u64 *src_port);
void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp); void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp);
void lan966x_ifh_set_bypass(void *ifh, u64 bypass);
void lan966x_ifh_set_port(void *ifh, u64 bypass);
void lan966x_stats_get(struct net_device *dev, void lan966x_stats_get(struct net_device *dev,
struct rtnl_link_stats64 *stats); struct rtnl_link_stats64 *stats);
...@@ -460,6 +468,9 @@ u32 lan966x_ptp_get_period_ps(void); ...@@ -460,6 +468,9 @@ u32 lan966x_ptp_get_period_ps(void);
int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts); int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev); int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev);
int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
struct xdp_frame *frame,
struct page *page);
int lan966x_fdma_change_mtu(struct lan966x *lan966x); int lan966x_fdma_change_mtu(struct lan966x *lan966x);
void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev); void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev);
void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev); void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev);
......
...@@ -54,6 +54,7 @@ int lan966x_xdp_run(struct lan966x_port *port, struct page *page, u32 data_len) ...@@ -54,6 +54,7 @@ int lan966x_xdp_run(struct lan966x_port *port, struct page *page, u32 data_len)
{ {
struct bpf_prog *xdp_prog = port->xdp_prog; struct bpf_prog *xdp_prog = port->xdp_prog;
struct lan966x *lan966x = port->lan966x; struct lan966x *lan966x = port->lan966x;
struct xdp_frame *xdpf;
struct xdp_buff xdp; struct xdp_buff xdp;
u32 act; u32 act;
...@@ -66,6 +67,13 @@ int lan966x_xdp_run(struct lan966x_port *port, struct page *page, u32 data_len) ...@@ -66,6 +67,13 @@ int lan966x_xdp_run(struct lan966x_port *port, struct page *page, u32 data_len)
switch (act) { switch (act) {
case XDP_PASS: case XDP_PASS:
return FDMA_PASS; return FDMA_PASS;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(&xdp);
if (!xdpf)
return FDMA_DROP;
return lan966x_fdma_xmit_xdpf(port, xdpf, page) ?
FDMA_DROP : FDMA_TX;
default: default:
bpf_warn_invalid_xdp_action(port->dev, xdp_prog, act); bpf_warn_invalid_xdp_action(port->dev, xdp_prog, act);
fallthrough; fallthrough;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment