Commit 8781994a authored by David S. Miller's avatar David S. Miller

Merge branch 'lan966x-extend-xdp-support'

Horatiu Vultur says:

====================
net: lan966x: Extend xdp support

Extend the current support of XDP in lan966x with the action XDP_TX and
XDP_REDIRECT.
The first patches just prepare the things such that it would be easier
to add XDP_TX and XDP_REDIRECT actions. Like adding XDP_PACKET_HEADROOM,
introduce helper functions, use the correct dma_dir for the page pool
The last 2 patches introduce the XDP actions XDP_TX and XDP_REDIRECT.

v4->v5:
- add iterator declaration inside for loops
- move the scope of port inside the function lan966x_fdma_rx_alloc_page_pool
- create union for skb and xdpf inside struct lan966x_tx_dcb_buf

v3->v4:
- use napi_consume_skb instead of dev_kfree_skb_any
- arrange members in struct lan966x_tx_dcb_buf not to have holes
- fix when xdp program is added the check for determining if page pool
  needs to be recreated was wrong
- change type for len in lan966x_tx_dcb_buf to u32

v2->v3:
- make sure to update rxq memory model
- update the page pool direction if there is any xdp program
- in case of action XDP_TX give back to reuse the page
- in case of action XDP_REDIRECT, remap the frame and make sure to
  unmap it when is transmitted.

v1->v2:
- use skb_reserve of using skb_put and skb_pull
- make sure that data_len doesn't include XDP_PACKET_HEADROOM
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 46da4aa2 a825b611
// SPDX-License-Identifier: GPL-2.0+
#include <linux/bpf.h>
#include <linux/filter.h>
#include "lan966x_main.h"
static int lan966x_fdma_channel_active(struct lan966x *lan966x)
......@@ -16,7 +19,7 @@ static struct page *lan966x_fdma_rx_alloc_page(struct lan966x_rx *rx,
if (unlikely(!page))
return NULL;
db->dataptr = page_pool_get_dma_addr(page);
db->dataptr = page_pool_get_dma_addr(page) + XDP_PACKET_HEADROOM;
return page;
}
......@@ -72,12 +75,28 @@ static int lan966x_fdma_rx_alloc_page_pool(struct lan966x_rx *rx)
.nid = NUMA_NO_NODE,
.dev = lan966x->dev,
.dma_dir = DMA_FROM_DEVICE,
.offset = 0,
.offset = XDP_PACKET_HEADROOM,
.max_len = rx->max_mtu -
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)),
};
if (lan966x_xdp_present(lan966x))
pp_params.dma_dir = DMA_BIDIRECTIONAL;
rx->page_pool = page_pool_create(&pp_params);
for (int i = 0; i < lan966x->num_phys_ports; ++i) {
struct lan966x_port *port;
if (!lan966x->ports[i])
continue;
port = lan966x->ports[i];
xdp_rxq_info_unreg_mem_model(&port->xdp_rxq);
xdp_rxq_info_reg_mem_model(&port->xdp_rxq, MEM_TYPE_PAGE_POOL,
rx->page_pool);
}
return PTR_ERR_OR_ZERO(rx->page_pool);
}
......@@ -372,11 +391,14 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
{
struct lan966x_tx *tx = &lan966x->tx;
struct lan966x_tx_dcb_buf *dcb_buf;
struct xdp_frame_bulk bq;
struct lan966x_db *db;
unsigned long flags;
bool clear = false;
int i;
xdp_frame_bulk_init(&bq);
spin_lock_irqsave(&lan966x->tx_lock, flags);
for (i = 0; i < FDMA_DCB_MAX; ++i) {
dcb_buf = &tx->dcbs_buf[i];
......@@ -389,19 +411,35 @@ static void lan966x_fdma_tx_clear_buf(struct lan966x *lan966x, int weight)
continue;
dcb_buf->dev->stats.tx_packets++;
dcb_buf->dev->stats.tx_bytes += dcb_buf->skb->len;
dcb_buf->dev->stats.tx_bytes += dcb_buf->len;
dcb_buf->used = false;
dma_unmap_single(lan966x->dev,
dcb_buf->dma_addr,
dcb_buf->skb->len,
DMA_TO_DEVICE);
if (!dcb_buf->ptp)
dev_kfree_skb_any(dcb_buf->skb);
if (dcb_buf->use_skb) {
dma_unmap_single(lan966x->dev,
dcb_buf->dma_addr,
dcb_buf->len,
DMA_TO_DEVICE);
if (!dcb_buf->ptp)
napi_consume_skb(dcb_buf->data.skb, weight);
} else {
if (dcb_buf->xdp_ndo)
dma_unmap_single(lan966x->dev,
dcb_buf->dma_addr,
dcb_buf->len,
DMA_TO_DEVICE);
if (dcb_buf->xdp_ndo)
xdp_return_frame_bulk(dcb_buf->data.xdpf, &bq);
else
xdp_return_frame_rx_napi(dcb_buf->data.xdpf);
}
clear = true;
}
xdp_flush_frame_bulk(&bq);
if (clear)
lan966x_fdma_wakeup_netdev(lan966x);
......@@ -432,11 +470,13 @@ static int lan966x_fdma_rx_check_frame(struct lan966x_rx *rx, u64 *src_port)
if (unlikely(!page))
return FDMA_ERROR;
dma_sync_single_for_cpu(lan966x->dev, (dma_addr_t)db->dataptr,
dma_sync_single_for_cpu(lan966x->dev,
(dma_addr_t)db->dataptr + XDP_PACKET_HEADROOM,
FDMA_DCB_STATUS_BLOCKL(db->status),
DMA_FROM_DEVICE);
lan966x_ifh_get_src_port(page_address(page), src_port);
lan966x_ifh_get_src_port(page_address(page) + XDP_PACKET_HEADROOM,
src_port);
if (WARN_ON(*src_port >= lan966x->num_phys_ports))
return FDMA_ERROR;
......@@ -466,6 +506,7 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx,
skb_mark_for_recycle(skb);
skb_reserve(skb, XDP_PACKET_HEADROOM);
skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status));
lan966x_ifh_get_timestamp(skb->data, &timestamp);
......@@ -505,6 +546,7 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
int dcb_reload = rx->dcb_index;
struct lan966x_rx_dcb *old_dcb;
struct lan966x_db *db;
bool redirect = false;
struct sk_buff *skb;
struct page *page;
int counter = 0;
......@@ -527,6 +569,12 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
lan966x_fdma_rx_free_page(rx);
lan966x_fdma_rx_advance_dcb(rx);
goto allocate_new;
case FDMA_REDIRECT:
redirect = true;
fallthrough;
case FDMA_TX:
lan966x_fdma_rx_advance_dcb(rx);
continue;
case FDMA_DROP:
lan966x_fdma_rx_free_page(rx);
lan966x_fdma_rx_advance_dcb(rx);
......@@ -563,6 +611,9 @@ static int lan966x_fdma_napi_poll(struct napi_struct *napi, int weight)
if (counter < weight && napi_complete_done(napi, counter))
lan_wr(0xff, lan966x, FDMA_INTR_DB_ENA);
if (redirect)
xdp_do_flush();
return counter;
}
......@@ -607,14 +658,139 @@ static int lan966x_fdma_get_next_dcb(struct lan966x_tx *tx)
return -1;
}
static void lan966x_fdma_tx_setup_dcb(struct lan966x_tx *tx,
int next_to_use, int len,
dma_addr_t dma_addr)
{
struct lan966x_tx_dcb *next_dcb;
struct lan966x_db *next_db;
next_dcb = &tx->dcbs[next_to_use];
next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
next_db = &next_dcb->db[0];
next_db->dataptr = dma_addr;
next_db->status = FDMA_DCB_STATUS_SOF |
FDMA_DCB_STATUS_EOF |
FDMA_DCB_STATUS_INTR |
FDMA_DCB_STATUS_BLOCKO(0) |
FDMA_DCB_STATUS_BLOCKL(len);
}
static void lan966x_fdma_tx_start(struct lan966x_tx *tx, int next_to_use)
{
struct lan966x *lan966x = tx->lan966x;
struct lan966x_tx_dcb *dcb;
if (likely(lan966x->tx.activated)) {
/* Connect current dcb to the next db */
dcb = &tx->dcbs[tx->last_in_use];
dcb->nextptr = tx->dma + (next_to_use *
sizeof(struct lan966x_tx_dcb));
lan966x_fdma_tx_reload(tx);
} else {
/* Because it is first time, then just activate */
lan966x->tx.activated = true;
lan966x_fdma_tx_activate(tx);
}
/* Move to next dcb because this last in use */
tx->last_in_use = next_to_use;
}
int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
struct xdp_frame *xdpf,
struct page *page,
bool dma_map)
{
struct lan966x *lan966x = port->lan966x;
struct lan966x_tx_dcb_buf *next_dcb_buf;
struct lan966x_tx *tx = &lan966x->tx;
dma_addr_t dma_addr;
int next_to_use;
__be32 *ifh;
int ret = 0;
spin_lock(&lan966x->tx_lock);
/* Get next index */
next_to_use = lan966x_fdma_get_next_dcb(tx);
if (next_to_use < 0) {
netif_stop_queue(port->dev);
ret = NETDEV_TX_BUSY;
goto out;
}
/* Generate new IFH */
if (dma_map) {
if (xdpf->headroom < IFH_LEN_BYTES) {
ret = NETDEV_TX_OK;
goto out;
}
ifh = xdpf->data - IFH_LEN_BYTES;
memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
lan966x_ifh_set_bypass(ifh, 1);
lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
dma_addr = dma_map_single(lan966x->dev,
xdpf->data - IFH_LEN_BYTES,
xdpf->len + IFH_LEN_BYTES,
DMA_TO_DEVICE);
if (dma_mapping_error(lan966x->dev, dma_addr)) {
ret = NETDEV_TX_OK;
goto out;
}
/* Setup next dcb */
lan966x_fdma_tx_setup_dcb(tx, next_to_use,
xdpf->len + IFH_LEN_BYTES,
dma_addr);
} else {
ifh = page_address(page) + XDP_PACKET_HEADROOM;
memset(ifh, 0x0, sizeof(__be32) * IFH_LEN);
lan966x_ifh_set_bypass(ifh, 1);
lan966x_ifh_set_port(ifh, BIT_ULL(port->chip_port));
dma_addr = page_pool_get_dma_addr(page);
dma_sync_single_for_device(lan966x->dev,
dma_addr + XDP_PACKET_HEADROOM,
xdpf->len + IFH_LEN_BYTES,
DMA_TO_DEVICE);
/* Setup next dcb */
lan966x_fdma_tx_setup_dcb(tx, next_to_use,
xdpf->len + IFH_LEN_BYTES,
dma_addr + XDP_PACKET_HEADROOM);
}
/* Fill up the buffer */
next_dcb_buf = &tx->dcbs_buf[next_to_use];
next_dcb_buf->use_skb = false;
next_dcb_buf->data.xdpf = xdpf;
next_dcb_buf->xdp_ndo = dma_map;
next_dcb_buf->len = xdpf->len + IFH_LEN_BYTES;
next_dcb_buf->dma_addr = dma_addr;
next_dcb_buf->used = true;
next_dcb_buf->ptp = false;
next_dcb_buf->dev = port->dev;
/* Start the transmission */
lan966x_fdma_tx_start(tx, next_to_use);
out:
spin_unlock(&lan966x->tx_lock);
return ret;
}
int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
{
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
struct lan966x_tx_dcb_buf *next_dcb_buf;
struct lan966x_tx_dcb *next_dcb, *dcb;
struct lan966x_tx *tx = &lan966x->tx;
struct lan966x_db *next_db;
int needed_headroom;
int needed_tailroom;
dma_addr_t dma_addr;
......@@ -660,20 +836,14 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
}
/* Setup next dcb */
next_dcb = &tx->dcbs[next_to_use];
next_dcb->nextptr = FDMA_DCB_INVALID_DATA;
next_db = &next_dcb->db[0];
next_db->dataptr = dma_addr;
next_db->status = FDMA_DCB_STATUS_SOF |
FDMA_DCB_STATUS_EOF |
FDMA_DCB_STATUS_INTR |
FDMA_DCB_STATUS_BLOCKO(0) |
FDMA_DCB_STATUS_BLOCKL(skb->len);
lan966x_fdma_tx_setup_dcb(tx, next_to_use, skb->len, dma_addr);
/* Fill up the buffer */
next_dcb_buf = &tx->dcbs_buf[next_to_use];
next_dcb_buf->skb = skb;
next_dcb_buf->use_skb = true;
next_dcb_buf->data.skb = skb;
next_dcb_buf->xdp_ndo = false;
next_dcb_buf->len = skb->len;
next_dcb_buf->dma_addr = dma_addr;
next_dcb_buf->used = true;
next_dcb_buf->ptp = false;
......@@ -683,21 +853,8 @@ int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev)
LAN966X_SKB_CB(skb)->rew_op == IFH_REW_OP_TWO_STEP_PTP)
next_dcb_buf->ptp = true;
if (likely(lan966x->tx.activated)) {
/* Connect current dcb to the next db */
dcb = &tx->dcbs[tx->last_in_use];
dcb->nextptr = tx->dma + (next_to_use *
sizeof(struct lan966x_tx_dcb));
lan966x_fdma_tx_reload(tx);
} else {
/* Because it is first time, then just activate */
lan966x->tx.activated = true;
lan966x_fdma_tx_activate(tx);
}
/* Move to next dcb because this last in use */
tx->last_in_use = next_to_use;
/* Start the transmission */
lan966x_fdma_tx_start(tx, next_to_use);
return NETDEV_TX_OK;
......@@ -786,19 +943,15 @@ static int lan966x_fdma_get_max_frame(struct lan966x *lan966x)
return lan966x_fdma_get_max_mtu(lan966x) +
IFH_LEN_BYTES +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
VLAN_HLEN * 2;
VLAN_HLEN * 2 +
XDP_PACKET_HEADROOM;
}
int lan966x_fdma_change_mtu(struct lan966x *lan966x)
static int __lan966x_fdma_reload(struct lan966x *lan966x, int max_mtu)
{
int max_mtu;
int err;
u32 val;
max_mtu = lan966x_fdma_get_max_frame(lan966x);
if (max_mtu == lan966x->rx.max_mtu)
return 0;
/* Disable the CPU port */
lan_rmw(QSYS_SW_PORT_MODE_PORT_ENA_SET(0),
QSYS_SW_PORT_MODE_PORT_ENA,
......@@ -824,6 +977,25 @@ int lan966x_fdma_change_mtu(struct lan966x *lan966x)
return err;
}
int lan966x_fdma_change_mtu(struct lan966x *lan966x)
{
int max_mtu;
max_mtu = lan966x_fdma_get_max_frame(lan966x);
if (max_mtu == lan966x->rx.max_mtu)
return 0;
return __lan966x_fdma_reload(lan966x, max_mtu);
}
int lan966x_fdma_reload_page_pool(struct lan966x *lan966x)
{
int max_mtu;
max_mtu = lan966x_fdma_get_max_frame(lan966x);
return __lan966x_fdma_reload(lan966x, max_mtu);
}
void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev)
{
if (lan966x->fdma_ndev)
......
......@@ -302,13 +302,13 @@ static int lan966x_port_ifh_xmit(struct sk_buff *skb,
return NETDEV_TX_BUSY;
}
static void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
void lan966x_ifh_set_bypass(void *ifh, u64 bypass)
{
packing(ifh, &bypass, IFH_POS_BYPASS + IFH_WID_BYPASS - 1,
IFH_POS_BYPASS, IFH_LEN * 4, PACK, 0);
}
static void lan966x_ifh_set_port(void *ifh, u64 bypass)
void lan966x_ifh_set_port(void *ifh, u64 bypass)
{
packing(ifh, &bypass, IFH_POS_DSTS + IFH_WID_DSTS - 1,
IFH_POS_DSTS, IFH_LEN * 4, PACK, 0);
......@@ -469,6 +469,7 @@ static const struct net_device_ops lan966x_port_netdev_ops = {
.ndo_eth_ioctl = lan966x_port_ioctl,
.ndo_setup_tc = lan966x_tc_setup,
.ndo_bpf = lan966x_xdp,
.ndo_xdp_xmit = lan966x_xdp_xmit,
};
bool lan966x_netdevice_check(const struct net_device *dev)
......
......@@ -105,11 +105,15 @@ enum macaccess_entry_type {
* FDMA_PASS, frame is valid and can be used
* FDMA_ERROR, something went wrong, stop getting more frames
* FDMA_DROP, frame is dropped, but continue to get more frames
* FDMA_TX, frame is given to TX, but continue to get more frames
* FDMA_REDIRECT, frame is given to TX, but continue to get more frames
*/
enum lan966x_fdma_action {
FDMA_PASS = 0,
FDMA_ERROR,
FDMA_DROP,
FDMA_TX,
FDMA_REDIRECT,
};
struct lan966x_port;
......@@ -173,11 +177,17 @@ struct lan966x_rx {
};
struct lan966x_tx_dcb_buf {
struct net_device *dev;
struct sk_buff *skb;
dma_addr_t dma_addr;
bool used;
bool ptp;
struct net_device *dev;
union {
struct sk_buff *skb;
struct xdp_frame *xdpf;
} data;
u32 len;
u32 used : 1;
u32 ptp : 1;
u32 use_skb : 1;
u32 xdp_ndo : 1;
};
struct lan966x_tx {
......@@ -359,6 +369,8 @@ bool lan966x_hw_offload(struct lan966x *lan966x, u32 port, struct sk_buff *skb);
void lan966x_ifh_get_src_port(void *ifh, u64 *src_port);
void lan966x_ifh_get_timestamp(void *ifh, u64 *timestamp);
void lan966x_ifh_set_bypass(void *ifh, u64 bypass);
void lan966x_ifh_set_port(void *ifh, u64 bypass);
void lan966x_stats_get(struct net_device *dev,
struct rtnl_link_stats64 *stats);
......@@ -459,12 +471,17 @@ u32 lan966x_ptp_get_period_ps(void);
int lan966x_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
int lan966x_fdma_xmit(struct sk_buff *skb, __be32 *ifh, struct net_device *dev);
int lan966x_fdma_xmit_xdpf(struct lan966x_port *port,
struct xdp_frame *frame,
struct page *page,
bool dma_map);
int lan966x_fdma_change_mtu(struct lan966x *lan966x);
void lan966x_fdma_netdev_init(struct lan966x *lan966x, struct net_device *dev);
void lan966x_fdma_netdev_deinit(struct lan966x *lan966x, struct net_device *dev);
int lan966x_fdma_init(struct lan966x *lan966x);
void lan966x_fdma_deinit(struct lan966x *lan966x);
irqreturn_t lan966x_fdma_irq_handler(int irq, void *args);
int lan966x_fdma_reload_page_pool(struct lan966x *lan966x);
int lan966x_lag_port_join(struct lan966x_port *port,
struct net_device *brport_dev,
......@@ -555,6 +572,11 @@ int lan966x_xdp(struct net_device *dev, struct netdev_bpf *xdp);
int lan966x_xdp_run(struct lan966x_port *port,
struct page *page,
u32 data_len);
int lan966x_xdp_xmit(struct net_device *dev,
int n,
struct xdp_frame **frames,
u32 flags);
bool lan966x_xdp_present(struct lan966x *lan966x);
static inline bool lan966x_xdp_port_present(struct lan966x_port *port)
{
return !!port->xdp_prog;
......
......@@ -11,6 +11,8 @@ static int lan966x_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp)
struct lan966x_port *port = netdev_priv(dev);
struct lan966x *lan966x = port->lan966x;
struct bpf_prog *old_prog;
bool old_xdp, new_xdp;
int err;
if (!lan966x->fdma) {
NL_SET_ERR_MSG_MOD(xdp->extack,
......@@ -18,7 +20,20 @@ static int lan966x_xdp_setup(struct net_device *dev, struct netdev_bpf *xdp)
return -EOPNOTSUPP;
}
old_xdp = lan966x_xdp_present(lan966x);
old_prog = xchg(&port->xdp_prog, xdp->prog);
new_xdp = lan966x_xdp_present(lan966x);
if (old_xdp == new_xdp)
goto out;
err = lan966x_fdma_reload_page_pool(lan966x);
if (err) {
xchg(&port->xdp_prog, old_prog);
return err;
}
out:
if (old_prog)
bpf_prog_put(old_prog);
......@@ -35,21 +50,57 @@ int lan966x_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
int lan966x_xdp_xmit(struct net_device *dev,
int n,
struct xdp_frame **frames,
u32 flags)
{
struct lan966x_port *port = netdev_priv(dev);
int nxmit = 0;
for (int i = 0; i < n; ++i) {
struct xdp_frame *xdpf = frames[i];
int err;
err = lan966x_fdma_xmit_xdpf(port, xdpf, NULL, true);
if (err)
break;
nxmit++;
}
return nxmit;
}
int lan966x_xdp_run(struct lan966x_port *port, struct page *page, u32 data_len)
{
struct bpf_prog *xdp_prog = port->xdp_prog;
struct lan966x *lan966x = port->lan966x;
struct xdp_frame *xdpf;
struct xdp_buff xdp;
u32 act;
xdp_init_buff(&xdp, PAGE_SIZE << lan966x->rx.page_order,
&port->xdp_rxq);
xdp_prepare_buff(&xdp, page_address(page), IFH_LEN_BYTES,
xdp_prepare_buff(&xdp, page_address(page),
IFH_LEN_BYTES + XDP_PACKET_HEADROOM,
data_len - IFH_LEN_BYTES, false);
act = bpf_prog_run_xdp(xdp_prog, &xdp);
switch (act) {
case XDP_PASS:
return FDMA_PASS;
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(&xdp);
if (!xdpf)
return FDMA_DROP;
return lan966x_fdma_xmit_xdpf(port, xdpf, page, false) ?
FDMA_DROP : FDMA_TX;
case XDP_REDIRECT:
if (xdp_do_redirect(port->dev, &xdp, xdp_prog))
return FDMA_DROP;
return FDMA_REDIRECT;
default:
bpf_warn_invalid_xdp_action(port->dev, xdp_prog, act);
fallthrough;
......@@ -61,6 +112,19 @@ int lan966x_xdp_run(struct lan966x_port *port, struct page *page, u32 data_len)
}
}
bool lan966x_xdp_present(struct lan966x *lan966x)
{
for (int p = 0; p < lan966x->num_phys_ports; ++p) {
if (!lan966x->ports[p])
continue;
if (lan966x_xdp_port_present(lan966x->ports[p]))
return true;
}
return false;
}
int lan966x_xdp_port_init(struct lan966x_port *port)
{
struct lan966x *lan966x = port->lan966x;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment