Commit 95ae31a9 authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-ring-reconfig-and-xdp-support'

Jakub Kicinski says:

====================
ring reconfiguration and XDP support

This set adds support for ethtool channel API and XDP.

I kick off with ethtool get_channels() implementation.
set_channels() needs some preparations to get right.  I follow
the prepare/commit paradigm and allocate all resources before
stopping the device.  It has already been done for ndo_change_mtu
and ethtool set_ringparam(), it makes sense now to consolidate all
the required logic in one place.

XDP support requires splitting TX rings into two classes -
for the stack and for XDP.  The ring structures are identical.
The differences are in how they are connected to IRQ vector
structs and how the completion/cleanup works.  When XDP is enabled
I switch from the frag allocator to page-per-packet and map buffers
BIDIRECTIONALly.

Last but not least XDP offload is added (the patch just takes
care of the small formal differences between cls_bpf and XDP).

There is a tiny & trivial DebugFS patch in the mix, I hope it can
be taken via net-next provided we have the right Acks.

Resending with improved commit message and CCing more people on patch 10.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 013724e9 6d677075
...@@ -62,6 +62,7 @@ enum nfp_bpf_action_type { ...@@ -62,6 +62,7 @@ enum nfp_bpf_action_type {
NN_ACT_TC_DROP, NN_ACT_TC_DROP,
NN_ACT_TC_REDIR, NN_ACT_TC_REDIR,
NN_ACT_DIRECT, NN_ACT_DIRECT,
NN_ACT_XDP,
}; };
/* Software register representation, hardware encoding in asm.h */ /* Software register representation, hardware encoding in asm.h */
......
...@@ -1126,7 +1126,7 @@ static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) ...@@ -1126,7 +1126,7 @@ static int data_ind_ld4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
meta->insn.src_reg * 2, true, 4); meta->insn.src_reg * 2, true, 4);
} }
static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int mem_ldx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{ {
if (meta->insn.off == offsetof(struct sk_buff, len)) if (meta->insn.off == offsetof(struct sk_buff, len))
emit_alu(nfp_prog, reg_both(meta->insn.dst_reg * 2), emit_alu(nfp_prog, reg_both(meta->insn.dst_reg * 2),
...@@ -1134,12 +1134,42 @@ static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) ...@@ -1134,12 +1134,42 @@ static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
else else
return -ENOTSUPP; return -ENOTSUPP;
wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0); return 0;
}
static int mem_ldx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
u32 dst = reg_both(meta->insn.dst_reg * 2);
if (meta->insn.off != offsetof(struct xdp_md, data) &&
meta->insn.off != offsetof(struct xdp_md, data_end))
return -ENOTSUPP;
emit_alu(nfp_prog, dst, reg_none(), ALU_OP_NONE, NFP_BPF_ABI_PKT);
if (meta->insn.off == offsetof(struct xdp_md, data))
return 0;
emit_alu(nfp_prog, dst, dst, ALU_OP_ADD, NFP_BPF_ABI_LEN);
return 0; return 0;
} }
static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int mem_ldx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
int ret;
if (nfp_prog->act == NN_ACT_XDP)
ret = mem_ldx4_xdp(nfp_prog, meta);
else
ret = mem_ldx4_skb(nfp_prog, meta);
wrp_immed(nfp_prog, reg_both(meta->insn.dst_reg * 2 + 1), 0);
return ret;
}
static int mem_stx4_skb(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{ {
if (meta->insn.off == offsetof(struct sk_buff, mark)) if (meta->insn.off == offsetof(struct sk_buff, mark))
return wrp_set_mark(nfp_prog, meta->insn.src_reg * 2); return wrp_set_mark(nfp_prog, meta->insn.src_reg * 2);
...@@ -1147,6 +1177,18 @@ static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) ...@@ -1147,6 +1177,18 @@ static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
return -ENOTSUPP; return -ENOTSUPP;
} }
static int mem_stx4_xdp(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
return -ENOTSUPP;
}
static int mem_stx4(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{
if (nfp_prog->act == NN_ACT_XDP)
return mem_stx4_xdp(nfp_prog, meta);
return mem_stx4_skb(nfp_prog, meta);
}
static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) static int jump(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta)
{ {
if (meta->insn.off < 0) /* TODO */ if (meta->insn.off < 0) /* TODO */
...@@ -1530,6 +1572,47 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog) ...@@ -1530,6 +1572,47 @@ static void nfp_outro_tc_da(struct nfp_prog *nfp_prog)
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16); emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
} }
static void nfp_outro_xdp(struct nfp_prog *nfp_prog)
{
/* XDP return codes:
* 0 aborted 0x82 -> drop, count as stat3
* 1 drop 0x22 -> drop, count as stat1
* 2 pass 0x11 -> pass, count as stat0
* 3 tx 0x44 -> redir, count as stat2
* * unknown 0x82 -> drop, count as stat3
*/
/* Target for aborts */
nfp_prog->tgt_abort = nfp_prog_current_offset(nfp_prog);
emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
emit_alu(nfp_prog, reg_a(0),
reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_imm(0x82), SHF_SC_L_SHF, 16);
/* Target for normal exits */
nfp_prog->tgt_out = nfp_prog_current_offset(nfp_prog);
/* if R0 > 3 jump to abort */
emit_alu(nfp_prog, reg_none(), reg_imm(3), ALU_OP_SUB, reg_b(0));
emit_br(nfp_prog, BR_BLO, nfp_prog->tgt_abort, 0);
wrp_immed(nfp_prog, reg_b(2), 0x44112282);
emit_shf(nfp_prog, reg_a(1),
reg_none(), SHF_OP_NONE, reg_b(0), SHF_SC_L_SHF, 3);
emit_alu(nfp_prog, reg_none(), reg_a(1), ALU_OP_OR, reg_imm(0));
emit_shf(nfp_prog, reg_b(2),
reg_imm(0xff), SHF_OP_AND, reg_b(2), SHF_SC_R_SHF, 0);
emit_br_def(nfp_prog, nfp_prog->tgt_done, 2);
emit_alu(nfp_prog, reg_a(0),
reg_none(), ALU_OP_NONE, NFP_BPF_ABI_FLAGS);
emit_ld_field(nfp_prog, reg_a(0), 0xc, reg_b(2), SHF_SC_L_SHF, 16);
}
static void nfp_outro(struct nfp_prog *nfp_prog) static void nfp_outro(struct nfp_prog *nfp_prog)
{ {
switch (nfp_prog->act) { switch (nfp_prog->act) {
...@@ -1540,6 +1623,9 @@ static void nfp_outro(struct nfp_prog *nfp_prog) ...@@ -1540,6 +1623,9 @@ static void nfp_outro(struct nfp_prog *nfp_prog)
case NN_ACT_TC_REDIR: case NN_ACT_TC_REDIR:
nfp_outro_tc_legacy(nfp_prog); nfp_outro_tc_legacy(nfp_prog);
break; break;
case NN_ACT_XDP:
nfp_outro_xdp(nfp_prog);
break;
} }
} }
......
...@@ -80,6 +80,9 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog, ...@@ -80,6 +80,9 @@ nfp_bpf_check_exit(struct nfp_prog *nfp_prog,
{ {
const struct bpf_reg_state *reg0 = &env->cur_state.regs[0]; const struct bpf_reg_state *reg0 = &env->cur_state.regs[0];
if (nfp_prog->act == NN_ACT_XDP)
return 0;
if (reg0->type != CONST_IMM) { if (reg0->type != CONST_IMM) {
pr_info("unsupported exit state: %d, imm: %llx\n", pr_info("unsupported exit state: %d, imm: %llx\n",
reg0->type, reg0->imm); reg0->type, reg0->imm);
......
...@@ -171,7 +171,10 @@ struct nfp_net_tx_desc { ...@@ -171,7 +171,10 @@ struct nfp_net_tx_desc {
* on the head's buffer). Equal to skb->len for non-TSO packets. * on the head's buffer). Equal to skb->len for non-TSO packets.
*/ */
struct nfp_net_tx_buf { struct nfp_net_tx_buf {
struct sk_buff *skb; union {
struct sk_buff *skb;
void *frag;
};
dma_addr_t dma_addr; dma_addr_t dma_addr;
short int fidx; short int fidx;
u16 pkt_cnt; u16 pkt_cnt;
...@@ -341,6 +344,7 @@ struct nfp_net_rx_ring { ...@@ -341,6 +344,7 @@ struct nfp_net_rx_ring {
* @napi: NAPI structure for this ring vec * @napi: NAPI structure for this ring vec
* @tx_ring: Pointer to TX ring * @tx_ring: Pointer to TX ring
* @rx_ring: Pointer to RX ring * @rx_ring: Pointer to RX ring
* @xdp_ring: Pointer to an extra TX ring for XDP
* @irq_idx: Index into MSI-X table * @irq_idx: Index into MSI-X table
* @rx_sync: Seqlock for atomic updates of RX stats * @rx_sync: Seqlock for atomic updates of RX stats
* @rx_pkts: Number of received packets * @rx_pkts: Number of received packets
...@@ -384,6 +388,8 @@ struct nfp_net_r_vector { ...@@ -384,6 +388,8 @@ struct nfp_net_r_vector {
u64 hw_csum_rx_inner_ok; u64 hw_csum_rx_inner_ok;
u64 hw_csum_rx_error; u64 hw_csum_rx_error;
struct nfp_net_tx_ring *xdp_ring;
struct u64_stats_sync tx_sync; struct u64_stats_sync tx_sync;
u64 tx_pkts; u64 tx_pkts;
u64 tx_bytes; u64 tx_bytes;
...@@ -429,9 +435,11 @@ struct nfp_stat_pair { ...@@ -429,9 +435,11 @@ struct nfp_stat_pair {
* @is_vf: Is the driver attached to a VF? * @is_vf: Is the driver attached to a VF?
* @fw_loaded: Is the firmware loaded? * @fw_loaded: Is the firmware loaded?
* @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf * @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf
* @bpf_offload_xdp: Offloaded BPF program is XDP
* @ctrl: Local copy of the control register/word. * @ctrl: Local copy of the control register/word.
* @fl_bufsz: Currently configured size of the freelist buffers * @fl_bufsz: Currently configured size of the freelist buffers
* @rx_offset: Offset in the RX buffers where packet data starts * @rx_offset: Offset in the RX buffers where packet data starts
* @xdp_prog: Installed XDP program
* @cpp: Pointer to the CPP handle * @cpp: Pointer to the CPP handle
* @nfp_dev_cpp: Pointer to the NFP Device handle * @nfp_dev_cpp: Pointer to the NFP Device handle
* @ctrl_area: Pointer to the CPP area for the control BAR * @ctrl_area: Pointer to the CPP area for the control BAR
...@@ -451,6 +459,7 @@ struct nfp_stat_pair { ...@@ -451,6 +459,7 @@ struct nfp_stat_pair {
* @max_tx_rings: Maximum number of TX rings supported by the Firmware * @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware * @max_rx_rings: Maximum number of RX rings supported by the Firmware
* @num_tx_rings: Currently configured number of TX rings * @num_tx_rings: Currently configured number of TX rings
* @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
* @num_rx_rings: Currently configured number of RX rings * @num_rx_rings: Currently configured number of RX rings
* @txd_cnt: Size of the TX ring in number of descriptors * @txd_cnt: Size of the TX ring in number of descriptors
* @rxd_cnt: Size of the RX ring in number of descriptors * @rxd_cnt: Size of the RX ring in number of descriptors
...@@ -494,12 +503,15 @@ struct nfp_net { ...@@ -494,12 +503,15 @@ struct nfp_net {
unsigned is_vf:1; unsigned is_vf:1;
unsigned fw_loaded:1; unsigned fw_loaded:1;
unsigned bpf_offload_skip_sw:1; unsigned bpf_offload_skip_sw:1;
unsigned bpf_offload_xdp:1;
u32 ctrl; u32 ctrl;
u32 fl_bufsz; u32 fl_bufsz;
u32 rx_offset; u32 rx_offset;
struct bpf_prog *xdp_prog;
struct nfp_net_tx_ring *tx_rings; struct nfp_net_tx_ring *tx_rings;
struct nfp_net_rx_ring *rx_rings; struct nfp_net_rx_ring *rx_rings;
...@@ -532,6 +544,7 @@ struct nfp_net { ...@@ -532,6 +544,7 @@ struct nfp_net {
unsigned int max_rx_rings; unsigned int max_rx_rings;
unsigned int num_tx_rings; unsigned int num_tx_rings;
unsigned int num_stack_tx_rings;
unsigned int num_rx_rings; unsigned int num_rx_rings;
int stride_tx; int stride_tx;
...@@ -583,6 +596,13 @@ struct nfp_net { ...@@ -583,6 +596,13 @@ struct nfp_net {
struct dentry *debugfs_dir; struct dentry *debugfs_dir;
}; };
struct nfp_net_ring_set {
unsigned int n_rings;
unsigned int mtu;
unsigned int dcnt;
void *rings;
};
/* Functions to read/write from/to a BAR /* Functions to read/write from/to a BAR
* Performs any endian conversion necessary. * Performs any endian conversion necessary.
*/ */
...@@ -771,7 +791,9 @@ void nfp_net_rss_write_key(struct nfp_net *nn); ...@@ -771,7 +791,9 @@ void nfp_net_rss_write_key(struct nfp_net *nn);
void nfp_net_coalesce_write_cfg(struct nfp_net *nn); void nfp_net_coalesce_write_cfg(struct nfp_net *nn);
int nfp_net_irqs_alloc(struct nfp_net *nn); int nfp_net_irqs_alloc(struct nfp_net *nn);
void nfp_net_irqs_disable(struct nfp_net *nn); void nfp_net_irqs_disable(struct nfp_net *nn);
int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt); int
nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog,
struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx);
#ifdef CONFIG_NFP_NET_DEBUG #ifdef CONFIG_NFP_NET_DEBUG
void nfp_net_debugfs_create(void); void nfp_net_debugfs_create(void);
...@@ -797,8 +819,6 @@ static inline void nfp_net_debugfs_adapter_del(struct nfp_net *nn) ...@@ -797,8 +819,6 @@ static inline void nfp_net_debugfs_adapter_del(struct nfp_net *nn)
#endif /* CONFIG_NFP_NET_DEBUG */ #endif /* CONFIG_NFP_NET_DEBUG */
void nfp_net_filter_stats_timer(unsigned long data); void nfp_net_filter_stats_timer(unsigned long data);
int int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf);
nfp_net_bpf_offload(struct nfp_net *nn, u32 handle, __be16 proto,
struct tc_cls_bpf_offload *cls_bpf);
#endif /* _NFP_NET_H_ */ #endif /* _NFP_NET_H_ */
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
* Chris Telfer <chris.telfer@netronome.com> * Chris Telfer <chris.telfer@netronome.com>
*/ */
#include <linux/bpf.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -490,11 +491,12 @@ static void nfp_net_irqs_assign(struct net_device *netdev) ...@@ -490,11 +491,12 @@ static void nfp_net_irqs_assign(struct net_device *netdev)
nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings); nn->num_rx_rings = min(nn->num_r_vecs, nn->num_rx_rings);
nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings); nn->num_tx_rings = min(nn->num_r_vecs, nn->num_tx_rings);
nn->num_stack_tx_rings = nn->num_tx_rings;
nn->lsc_handler = nfp_net_irq_lsc; nn->lsc_handler = nfp_net_irq_lsc;
nn->exn_handler = nfp_net_irq_exn; nn->exn_handler = nfp_net_irq_exn;
for (r = 0; r < nn->num_r_vecs; r++) { for (r = 0; r < nn->max_r_vecs; r++) {
r_vec = &nn->r_vecs[r]; r_vec = &nn->r_vecs[r];
r_vec->nfp_net = nn; r_vec->nfp_net = nn;
r_vec->handler = nfp_net_irq_rxtx; r_vec->handler = nfp_net_irq_rxtx;
...@@ -713,6 +715,13 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, ...@@ -713,6 +715,13 @@ static void nfp_net_tx_csum(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
u64_stats_update_end(&r_vec->tx_sync); u64_stats_update_end(&r_vec->tx_sync);
} }
static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
{
wmb();
nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
tx_ring->wr_ptr_add = 0;
}
/** /**
* nfp_net_tx() - Main transmit entry point * nfp_net_tx() - Main transmit entry point
* @skb: SKB to transmit * @skb: SKB to transmit
...@@ -827,12 +836,8 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) ...@@ -827,12 +836,8 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
nfp_net_tx_ring_stop(nd_q, tx_ring); nfp_net_tx_ring_stop(nd_q, tx_ring);
tx_ring->wr_ptr_add += nr_frags + 1; tx_ring->wr_ptr_add += nr_frags + 1;
if (!skb->xmit_more || netif_xmit_stopped(nd_q)) { if (!skb->xmit_more || netif_xmit_stopped(nd_q))
/* force memory write before we let HW know */ nfp_net_tx_xmit_more_flush(tx_ring);
wmb();
nfp_qcp_wr_ptr_add(tx_ring->qcp_q, tx_ring->wr_ptr_add);
tx_ring->wr_ptr_add = 0;
}
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
...@@ -954,6 +959,56 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) ...@@ -954,6 +959,56 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt); tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
} }
static void nfp_net_xdp_complete(struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net;
u32 done_pkts = 0, done_bytes = 0;
int idx, todo;
u32 qcp_rd_p;
/* Work out how many descriptors have been transmitted */
qcp_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
if (qcp_rd_p == tx_ring->qcp_rd_p)
return;
if (qcp_rd_p > tx_ring->qcp_rd_p)
todo = qcp_rd_p - tx_ring->qcp_rd_p;
else
todo = qcp_rd_p + tx_ring->cnt - tx_ring->qcp_rd_p;
while (todo--) {
idx = tx_ring->rd_p & (tx_ring->cnt - 1);
tx_ring->rd_p++;
if (!tx_ring->txbufs[idx].frag)
continue;
nfp_net_dma_unmap_rx(nn, tx_ring->txbufs[idx].dma_addr,
nn->fl_bufsz, DMA_BIDIRECTIONAL);
__free_page(virt_to_page(tx_ring->txbufs[idx].frag));
done_pkts++;
done_bytes += tx_ring->txbufs[idx].real_len;
tx_ring->txbufs[idx].dma_addr = 0;
tx_ring->txbufs[idx].frag = NULL;
tx_ring->txbufs[idx].fidx = -2;
}
tx_ring->qcp_rd_p = qcp_rd_p;
u64_stats_update_begin(&r_vec->tx_sync);
r_vec->tx_bytes += done_bytes;
r_vec->tx_pkts += done_pkts;
u64_stats_update_end(&r_vec->tx_sync);
WARN_ONCE(tx_ring->wr_p - tx_ring->rd_p > tx_ring->cnt,
"TX ring corruption rd_p=%u wr_p=%u cnt=%u\n",
tx_ring->rd_p, tx_ring->wr_p, tx_ring->cnt);
}
/** /**
* nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers * nfp_net_tx_ring_reset() - Free any untransmitted buffers and reset pointers
* @nn: NFP Net device * @nn: NFP Net device
...@@ -964,39 +1019,47 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring) ...@@ -964,39 +1019,47 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring)
static void static void
nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring) nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
{ {
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
const struct skb_frag_struct *frag; const struct skb_frag_struct *frag;
struct netdev_queue *nd_q;
struct pci_dev *pdev = nn->pdev; struct pci_dev *pdev = nn->pdev;
struct netdev_queue *nd_q;
while (tx_ring->rd_p != tx_ring->wr_p) { while (tx_ring->rd_p != tx_ring->wr_p) {
int nr_frags, fidx, idx; struct nfp_net_tx_buf *tx_buf;
struct sk_buff *skb; int idx;
idx = tx_ring->rd_p & (tx_ring->cnt - 1); idx = tx_ring->rd_p & (tx_ring->cnt - 1);
skb = tx_ring->txbufs[idx].skb; tx_buf = &tx_ring->txbufs[idx];
nr_frags = skb_shinfo(skb)->nr_frags;
fidx = tx_ring->txbufs[idx].fidx;
if (fidx == -1) { if (tx_ring == r_vec->xdp_ring) {
/* unmap head */ nfp_net_dma_unmap_rx(nn, tx_buf->dma_addr,
dma_unmap_single(&pdev->dev, nn->fl_bufsz, DMA_BIDIRECTIONAL);
tx_ring->txbufs[idx].dma_addr, __free_page(virt_to_page(tx_ring->txbufs[idx].frag));
skb_headlen(skb), DMA_TO_DEVICE);
} else { } else {
/* unmap fragment */ struct sk_buff *skb = tx_ring->txbufs[idx].skb;
frag = &skb_shinfo(skb)->frags[fidx]; int nr_frags = skb_shinfo(skb)->nr_frags;
dma_unmap_page(&pdev->dev,
tx_ring->txbufs[idx].dma_addr, if (tx_buf->fidx == -1) {
skb_frag_size(frag), DMA_TO_DEVICE); /* unmap head */
} dma_unmap_single(&pdev->dev, tx_buf->dma_addr,
skb_headlen(skb),
DMA_TO_DEVICE);
} else {
/* unmap fragment */
frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
dma_unmap_page(&pdev->dev, tx_buf->dma_addr,
skb_frag_size(frag),
DMA_TO_DEVICE);
}
/* check for last gather fragment */ /* check for last gather fragment */
if (fidx == nr_frags - 1) if (tx_buf->fidx == nr_frags - 1)
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
}
tx_ring->txbufs[idx].dma_addr = 0; tx_buf->dma_addr = 0;
tx_ring->txbufs[idx].skb = NULL; tx_buf->skb = NULL;
tx_ring->txbufs[idx].fidx = -2; tx_buf->fidx = -2;
tx_ring->qcp_rd_p++; tx_ring->qcp_rd_p++;
tx_ring->rd_p++; tx_ring->rd_p++;
...@@ -1008,6 +1071,9 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring) ...@@ -1008,6 +1071,9 @@ nfp_net_tx_ring_reset(struct nfp_net *nn, struct nfp_net_tx_ring *tx_ring)
tx_ring->qcp_rd_p = 0; tx_ring->qcp_rd_p = 0;
tx_ring->wr_ptr_add = 0; tx_ring->wr_ptr_add = 0;
if (tx_ring == r_vec->xdp_ring)
return;
nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx); nd_q = netdev_get_tx_queue(nn->netdev, tx_ring->idx);
netdev_tx_reset_queue(nd_q); netdev_tx_reset_queue(nd_q);
} }
...@@ -1017,7 +1083,7 @@ static void nfp_net_tx_timeout(struct net_device *netdev) ...@@ -1017,7 +1083,7 @@ static void nfp_net_tx_timeout(struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev); struct nfp_net *nn = netdev_priv(netdev);
int i; int i;
for (i = 0; i < nn->num_tx_rings; i++) { for (i = 0; i < nn->netdev->real_num_tx_queues; i++) {
if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i))) if (!netif_tx_queue_stopped(netdev_get_tx_queue(netdev, i)))
continue; continue;
nn_warn(nn, "TX timeout on ring: %d\n", i); nn_warn(nn, "TX timeout on ring: %d\n", i);
...@@ -1045,11 +1111,21 @@ nfp_net_calc_fl_bufsz(struct nfp_net *nn, unsigned int mtu) ...@@ -1045,11 +1111,21 @@ nfp_net_calc_fl_bufsz(struct nfp_net *nn, unsigned int mtu)
return fl_bufsz; return fl_bufsz;
} }
static void
nfp_net_free_frag(void *frag, bool xdp)
{
if (!xdp)
skb_free_frag(frag);
else
__free_page(virt_to_page(frag));
}
/** /**
* nfp_net_rx_alloc_one() - Allocate and map page frag for RX * nfp_net_rx_alloc_one() - Allocate and map page frag for RX
* @rx_ring: RX ring structure of the skb * @rx_ring: RX ring structure of the skb
* @dma_addr: Pointer to storage for DMA address (output param) * @dma_addr: Pointer to storage for DMA address (output param)
* @fl_bufsz: size of freelist buffers * @fl_bufsz: size of freelist buffers
* @xdp: Whether XDP is enabled
* *
* This function will allcate a new page frag, map it for DMA. * This function will allcate a new page frag, map it for DMA.
* *
...@@ -1057,20 +1133,26 @@ nfp_net_calc_fl_bufsz(struct nfp_net *nn, unsigned int mtu) ...@@ -1057,20 +1133,26 @@ nfp_net_calc_fl_bufsz(struct nfp_net *nn, unsigned int mtu)
*/ */
static void * static void *
nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr, nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
unsigned int fl_bufsz) unsigned int fl_bufsz, bool xdp)
{ {
struct nfp_net *nn = rx_ring->r_vec->nfp_net; struct nfp_net *nn = rx_ring->r_vec->nfp_net;
int direction;
void *frag; void *frag;
frag = netdev_alloc_frag(fl_bufsz); if (!xdp)
frag = netdev_alloc_frag(fl_bufsz);
else
frag = page_address(alloc_page(GFP_KERNEL | __GFP_COLD));
if (!frag) { if (!frag) {
nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n"); nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n");
return NULL; return NULL;
} }
*dma_addr = nfp_net_dma_map_rx(nn, frag, fl_bufsz, DMA_FROM_DEVICE); direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
*dma_addr = nfp_net_dma_map_rx(nn, frag, fl_bufsz, direction);
if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) { if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
skb_free_frag(frag); nfp_net_free_frag(frag, xdp);
nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n"); nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
return NULL; return NULL;
} }
...@@ -1078,19 +1160,23 @@ nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr, ...@@ -1078,19 +1160,23 @@ nfp_net_rx_alloc_one(struct nfp_net_rx_ring *rx_ring, dma_addr_t *dma_addr,
return frag; return frag;
} }
static void *nfp_net_napi_alloc_one(struct nfp_net *nn, dma_addr_t *dma_addr) static void *
nfp_net_napi_alloc_one(struct nfp_net *nn, int direction, dma_addr_t *dma_addr)
{ {
void *frag; void *frag;
frag = napi_alloc_frag(nn->fl_bufsz); if (!nn->xdp_prog)
frag = napi_alloc_frag(nn->fl_bufsz);
else
frag = page_address(alloc_page(GFP_ATOMIC | __GFP_COLD));
if (!frag) { if (!frag) {
nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n"); nn_warn_ratelimit(nn, "Failed to alloc receive page frag\n");
return NULL; return NULL;
} }
*dma_addr = nfp_net_dma_map_rx(nn, frag, nn->fl_bufsz, DMA_FROM_DEVICE); *dma_addr = nfp_net_dma_map_rx(nn, frag, nn->fl_bufsz, direction);
if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) { if (dma_mapping_error(&nn->pdev->dev, *dma_addr)) {
skb_free_frag(frag); nfp_net_free_frag(frag, nn->xdp_prog);
nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n"); nn_warn_ratelimit(nn, "Failed to map DMA RX buffer\n");
return NULL; return NULL;
} }
...@@ -1161,14 +1247,17 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring) ...@@ -1161,14 +1247,17 @@ static void nfp_net_rx_ring_reset(struct nfp_net_rx_ring *rx_ring)
* nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring * nfp_net_rx_ring_bufs_free() - Free any buffers currently on the RX ring
* @nn: NFP Net device * @nn: NFP Net device
* @rx_ring: RX ring to remove buffers from * @rx_ring: RX ring to remove buffers from
* @xdp: Whether XDP is enabled
* *
* Assumes that the device is stopped and buffers are in [0, ring->cnt - 1) * Assumes that the device is stopped and buffers are in [0, ring->cnt - 1)
* entries. After device is disabled nfp_net_rx_ring_reset() must be called * entries. After device is disabled nfp_net_rx_ring_reset() must be called
* to restore required ring geometry. * to restore required ring geometry.
*/ */
static void static void
nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring) nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
bool xdp)
{ {
int direction = xdp ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
unsigned int i; unsigned int i;
for (i = 0; i < rx_ring->cnt - 1; i++) { for (i = 0; i < rx_ring->cnt - 1; i++) {
...@@ -1180,8 +1269,8 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring) ...@@ -1180,8 +1269,8 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
continue; continue;
nfp_net_dma_unmap_rx(nn, rx_ring->rxbufs[i].dma_addr, nfp_net_dma_unmap_rx(nn, rx_ring->rxbufs[i].dma_addr,
rx_ring->bufsz, DMA_FROM_DEVICE); rx_ring->bufsz, direction);
skb_free_frag(rx_ring->rxbufs[i].frag); nfp_net_free_frag(rx_ring->rxbufs[i].frag, xdp);
rx_ring->rxbufs[i].dma_addr = 0; rx_ring->rxbufs[i].dma_addr = 0;
rx_ring->rxbufs[i].frag = NULL; rx_ring->rxbufs[i].frag = NULL;
} }
...@@ -1191,9 +1280,11 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring) ...@@ -1191,9 +1280,11 @@ nfp_net_rx_ring_bufs_free(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
* nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW) * nfp_net_rx_ring_bufs_alloc() - Fill RX ring with buffers (don't give to FW)
* @nn: NFP Net device * @nn: NFP Net device
* @rx_ring: RX ring to remove buffers from * @rx_ring: RX ring to remove buffers from
* @xdp: Whether XDP is enabled
*/ */
static int static int
nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring) nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
bool xdp)
{ {
struct nfp_net_rx_buf *rxbufs; struct nfp_net_rx_buf *rxbufs;
unsigned int i; unsigned int i;
...@@ -1203,9 +1294,9 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring) ...@@ -1203,9 +1294,9 @@ nfp_net_rx_ring_bufs_alloc(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring)
for (i = 0; i < rx_ring->cnt - 1; i++) { for (i = 0; i < rx_ring->cnt - 1; i++) {
rxbufs[i].frag = rxbufs[i].frag =
nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr, nfp_net_rx_alloc_one(rx_ring, &rxbufs[i].dma_addr,
rx_ring->bufsz); rx_ring->bufsz, xdp);
if (!rxbufs[i].frag) { if (!rxbufs[i].frag) {
nfp_net_rx_ring_bufs_free(nn, rx_ring); nfp_net_rx_ring_bufs_free(nn, rx_ring, xdp);
return -ENOMEM; return -ENOMEM;
} }
} }
...@@ -1368,6 +1459,68 @@ nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring, ...@@ -1368,6 +1459,68 @@ nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring,
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
static void
nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
struct nfp_net_tx_ring *tx_ring,
struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off,
unsigned int pkt_len)
{
struct nfp_net_tx_buf *txbuf;
struct nfp_net_tx_desc *txd;
dma_addr_t new_dma_addr;
void *new_frag;
int wr_idx;
if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
return;
}
new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr);
if (unlikely(!new_frag)) {
nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
return;
}
nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
wr_idx = tx_ring->wr_p & (tx_ring->cnt - 1);
/* Stash the soft descriptor of the head then initialize it */
txbuf = &tx_ring->txbufs[wr_idx];
txbuf->frag = rxbuf->frag;
txbuf->dma_addr = rxbuf->dma_addr;
txbuf->fidx = -1;
txbuf->pkt_cnt = 1;
txbuf->real_len = pkt_len;
dma_sync_single_for_device(&nn->pdev->dev, rxbuf->dma_addr + pkt_off,
pkt_len, DMA_TO_DEVICE);
/* Build TX descriptor */
txd = &tx_ring->txds[wr_idx];
txd->offset_eop = PCIE_DESC_TX_EOP;
txd->dma_len = cpu_to_le16(pkt_len);
nfp_desc_set_dma_addr(txd, rxbuf->dma_addr + pkt_off);
txd->data_len = cpu_to_le16(pkt_len);
txd->flags = 0;
txd->mss = 0;
txd->l4_offset = 0;
tx_ring->wr_p++;
tx_ring->wr_ptr_add++;
}
static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
{
struct xdp_buff xdp;
xdp.data = data;
xdp.data_end = data + len;
return BPF_PROG_RUN(prog, (void *)&xdp);
}
/** /**
* nfp_net_rx() - receive up to @budget packets on @rx_ring * nfp_net_rx() - receive up to @budget packets on @rx_ring
* @rx_ring: RX ring to receive from * @rx_ring: RX ring to receive from
...@@ -1383,16 +1536,27 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) ...@@ -1383,16 +1536,27 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
{ {
struct nfp_net_r_vector *r_vec = rx_ring->r_vec; struct nfp_net_r_vector *r_vec = rx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net; struct nfp_net *nn = r_vec->nfp_net;
unsigned int data_len, meta_len; struct nfp_net_tx_ring *tx_ring;
struct nfp_net_rx_buf *rxbuf; struct bpf_prog *xdp_prog;
struct nfp_net_rx_desc *rxd; unsigned int true_bufsz;
dma_addr_t new_dma_addr;
struct sk_buff *skb; struct sk_buff *skb;
int pkts_polled = 0; int pkts_polled = 0;
void *new_frag; int rx_dma_map_dir;
int idx; int idx;
rcu_read_lock();
xdp_prog = READ_ONCE(nn->xdp_prog);
rx_dma_map_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
true_bufsz = xdp_prog ? PAGE_SIZE : nn->fl_bufsz;
tx_ring = r_vec->xdp_ring;
while (pkts_polled < budget) { while (pkts_polled < budget) {
unsigned int meta_len, data_len, data_off, pkt_len, pkt_off;
struct nfp_net_rx_buf *rxbuf;
struct nfp_net_rx_desc *rxd;
dma_addr_t new_dma_addr;
void *new_frag;
idx = rx_ring->rd_p & (rx_ring->cnt - 1); idx = rx_ring->rd_p & (rx_ring->cnt - 1);
rxd = &rx_ring->rxds[idx]; rxd = &rx_ring->rxds[idx];
...@@ -1408,22 +1572,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) ...@@ -1408,22 +1572,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
pkts_polled++; pkts_polled++;
rxbuf = &rx_ring->rxbufs[idx]; rxbuf = &rx_ring->rxbufs[idx];
skb = build_skb(rxbuf->frag, nn->fl_bufsz);
if (unlikely(!skb)) {
nfp_net_rx_drop(r_vec, rx_ring, rxbuf, NULL);
continue;
}
new_frag = nfp_net_napi_alloc_one(nn, &new_dma_addr);
if (unlikely(!new_frag)) {
nfp_net_rx_drop(r_vec, rx_ring, rxbuf, skb);
continue;
}
nfp_net_dma_unmap_rx(nn, rx_ring->rxbufs[idx].dma_addr,
nn->fl_bufsz, DMA_FROM_DEVICE);
nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
/* < meta_len > /* < meta_len >
* <-- [rx_offset] --> * <-- [rx_offset] -->
* --------------------------------------------------------- * ---------------------------------------------------------
...@@ -1438,20 +1586,66 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) ...@@ -1438,20 +1586,66 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
*/ */
meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK; meta_len = rxd->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK;
data_len = le16_to_cpu(rxd->rxd.data_len); data_len = le16_to_cpu(rxd->rxd.data_len);
pkt_len = data_len - meta_len;
if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC) if (nn->rx_offset == NFP_NET_CFG_RX_OFFSET_DYNAMIC)
skb_reserve(skb, NFP_NET_RX_BUF_HEADROOM + meta_len); pkt_off = meta_len;
else else
skb_reserve(skb, pkt_off = nn->rx_offset;
NFP_NET_RX_BUF_HEADROOM + nn->rx_offset); data_off = NFP_NET_RX_BUF_HEADROOM + pkt_off;
skb_put(skb, data_len - meta_len);
/* Stats update */ /* Stats update */
u64_stats_update_begin(&r_vec->rx_sync); u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_pkts++; r_vec->rx_pkts++;
r_vec->rx_bytes += skb->len; r_vec->rx_bytes += pkt_len;
u64_stats_update_end(&r_vec->rx_sync); u64_stats_update_end(&r_vec->rx_sync);
if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
nn->bpf_offload_xdp)) {
int act;
dma_sync_single_for_cpu(&nn->pdev->dev,
rxbuf->dma_addr + pkt_off,
pkt_len, DMA_FROM_DEVICE);
act = nfp_net_run_xdp(xdp_prog, rxbuf->frag + data_off,
pkt_len);
switch (act) {
case XDP_PASS:
break;
case XDP_TX:
nfp_net_tx_xdp_buf(nn, rx_ring, tx_ring, rxbuf,
pkt_off, pkt_len);
continue;
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
case XDP_DROP:
nfp_net_rx_give_one(rx_ring, rxbuf->frag,
rxbuf->dma_addr);
continue;
}
}
skb = build_skb(rxbuf->frag, true_bufsz);
if (unlikely(!skb)) {
nfp_net_rx_drop(r_vec, rx_ring, rxbuf, NULL);
continue;
}
new_frag = nfp_net_napi_alloc_one(nn, rx_dma_map_dir,
&new_dma_addr);
if (unlikely(!new_frag)) {
nfp_net_rx_drop(r_vec, rx_ring, rxbuf, skb);
continue;
}
nfp_net_dma_unmap_rx(nn, rxbuf->dma_addr, nn->fl_bufsz,
rx_dma_map_dir);
nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
skb_reserve(skb, data_off);
skb_put(skb, pkt_len);
if (nn->fw_ver.major <= 3) { if (nn->fw_ver.major <= 3) {
nfp_net_set_hash_desc(nn->netdev, skb, rxd); nfp_net_set_hash_desc(nn->netdev, skb, rxd);
} else if (meta_len) { } else if (meta_len) {
...@@ -1477,6 +1671,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget) ...@@ -1477,6 +1671,10 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
napi_gro_receive(&rx_ring->r_vec->napi, skb); napi_gro_receive(&rx_ring->r_vec->napi, skb);
} }
if (xdp_prog && tx_ring->wr_ptr_add)
nfp_net_tx_xmit_more_flush(tx_ring);
rcu_read_unlock();
return pkts_polled; return pkts_polled;
} }
...@@ -1495,8 +1693,11 @@ static int nfp_net_poll(struct napi_struct *napi, int budget) ...@@ -1495,8 +1693,11 @@ static int nfp_net_poll(struct napi_struct *napi, int budget)
if (r_vec->tx_ring) if (r_vec->tx_ring)
nfp_net_tx_complete(r_vec->tx_ring); nfp_net_tx_complete(r_vec->tx_ring);
if (r_vec->rx_ring) if (r_vec->rx_ring) {
pkts_polled = nfp_net_rx(r_vec->rx_ring, budget); pkts_polled = nfp_net_rx(r_vec->rx_ring, budget);
if (r_vec->xdp_ring)
nfp_net_xdp_complete(r_vec->xdp_ring);
}
if (pkts_polled < budget) { if (pkts_polled < budget) {
napi_complete_done(napi, pkts_polled); napi_complete_done(napi, pkts_polled);
...@@ -1536,10 +1737,12 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring) ...@@ -1536,10 +1737,12 @@ static void nfp_net_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
* nfp_net_tx_ring_alloc() - Allocate resource for a TX ring * nfp_net_tx_ring_alloc() - Allocate resource for a TX ring
* @tx_ring: TX Ring structure to allocate * @tx_ring: TX Ring structure to allocate
* @cnt: Ring buffer count * @cnt: Ring buffer count
* @is_xdp: True if ring will be used for XDP
* *
* Return: 0 on success, negative errno otherwise. * Return: 0 on success, negative errno otherwise.
*/ */
static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt) static int
nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt, bool is_xdp)
{ {
struct nfp_net_r_vector *r_vec = tx_ring->r_vec; struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net *nn = r_vec->nfp_net; struct nfp_net *nn = r_vec->nfp_net;
...@@ -1559,11 +1762,14 @@ static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt) ...@@ -1559,11 +1762,14 @@ static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt)
if (!tx_ring->txbufs) if (!tx_ring->txbufs)
goto err_alloc; goto err_alloc;
netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask, tx_ring->idx); if (!is_xdp)
netif_set_xps_queue(nn->netdev, &r_vec->affinity_mask,
tx_ring->idx);
nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p\n", nn_dbg(nn, "TxQ%02d: QCidx=%02d cnt=%d dma=%#llx host=%p %s\n",
tx_ring->idx, tx_ring->qcidx, tx_ring->idx, tx_ring->qcidx,
tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds); tx_ring->cnt, (unsigned long long)tx_ring->dma, tx_ring->txds,
is_xdp ? "XDP" : "");
return 0; return 0;
...@@ -1573,23 +1779,29 @@ static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt) ...@@ -1573,23 +1779,29 @@ static int nfp_net_tx_ring_alloc(struct nfp_net_tx_ring *tx_ring, u32 cnt)
} }
static struct nfp_net_tx_ring * static struct nfp_net_tx_ring *
nfp_net_shadow_tx_rings_prepare(struct nfp_net *nn, u32 buf_cnt) nfp_net_tx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s,
unsigned int num_stack_tx_rings)
{ {
struct nfp_net_tx_ring *rings; struct nfp_net_tx_ring *rings;
unsigned int r; unsigned int r;
rings = kcalloc(nn->num_tx_rings, sizeof(*rings), GFP_KERNEL); rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL);
if (!rings) if (!rings)
return NULL; return NULL;
for (r = 0; r < nn->num_tx_rings; r++) { for (r = 0; r < s->n_rings; r++) {
nfp_net_tx_ring_init(&rings[r], nn->tx_rings[r].r_vec, r); int bias = 0;
if (r >= num_stack_tx_rings)
bias = num_stack_tx_rings;
nfp_net_tx_ring_init(&rings[r], &nn->r_vecs[r - bias], r);
if (nfp_net_tx_ring_alloc(&rings[r], buf_cnt)) if (nfp_net_tx_ring_alloc(&rings[r], s->dcnt, bias))
goto err_free_prev; goto err_free_prev;
} }
return rings; return s->rings = rings;
err_free_prev: err_free_prev:
while (r--) while (r--)
...@@ -1598,28 +1810,27 @@ nfp_net_shadow_tx_rings_prepare(struct nfp_net *nn, u32 buf_cnt) ...@@ -1598,28 +1810,27 @@ nfp_net_shadow_tx_rings_prepare(struct nfp_net *nn, u32 buf_cnt)
return NULL; return NULL;
} }
static struct nfp_net_tx_ring * static void
nfp_net_shadow_tx_rings_swap(struct nfp_net *nn, struct nfp_net_tx_ring *rings) nfp_net_tx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
{ {
struct nfp_net_tx_ring *old = nn->tx_rings; struct nfp_net_ring_set new = *s;
unsigned int r;
for (r = 0; r < nn->num_tx_rings; r++) s->dcnt = nn->txd_cnt;
old[r].r_vec->tx_ring = &rings[r]; s->rings = nn->tx_rings;
s->n_rings = nn->num_tx_rings;
nn->tx_rings = rings; nn->txd_cnt = new.dcnt;
return old; nn->tx_rings = new.rings;
nn->num_tx_rings = new.n_rings;
} }
static void static void
nfp_net_shadow_tx_rings_free(struct nfp_net *nn, struct nfp_net_tx_ring *rings) nfp_net_tx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s)
{ {
struct nfp_net_tx_ring *rings = s->rings;
unsigned int r; unsigned int r;
if (!rings) for (r = 0; r < s->n_rings; r++)
return;
for (r = 0; r < nn->num_tx_rings; r++)
nfp_net_tx_ring_free(&rings[r]); nfp_net_tx_ring_free(&rings[r]);
kfree(rings); kfree(rings);
...@@ -1691,31 +1902,32 @@ nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz, ...@@ -1691,31 +1902,32 @@ nfp_net_rx_ring_alloc(struct nfp_net_rx_ring *rx_ring, unsigned int fl_bufsz,
} }
static struct nfp_net_rx_ring * static struct nfp_net_rx_ring *
nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz, nfp_net_rx_ring_set_prepare(struct nfp_net *nn, struct nfp_net_ring_set *s,
u32 buf_cnt) bool xdp)
{ {
unsigned int fl_bufsz = nfp_net_calc_fl_bufsz(nn, s->mtu);
struct nfp_net_rx_ring *rings; struct nfp_net_rx_ring *rings;
unsigned int r; unsigned int r;
rings = kcalloc(nn->num_rx_rings, sizeof(*rings), GFP_KERNEL); rings = kcalloc(s->n_rings, sizeof(*rings), GFP_KERNEL);
if (!rings) if (!rings)
return NULL; return NULL;
for (r = 0; r < nn->num_rx_rings; r++) { for (r = 0; r < s->n_rings; r++) {
nfp_net_rx_ring_init(&rings[r], nn->rx_rings[r].r_vec, r); nfp_net_rx_ring_init(&rings[r], &nn->r_vecs[r], r);
if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, buf_cnt)) if (nfp_net_rx_ring_alloc(&rings[r], fl_bufsz, s->dcnt))
goto err_free_prev; goto err_free_prev;
if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r])) if (nfp_net_rx_ring_bufs_alloc(nn, &rings[r], xdp))
goto err_free_ring; goto err_free_ring;
} }
return rings; return s->rings = rings;
err_free_prev: err_free_prev:
while (r--) { while (r--) {
nfp_net_rx_ring_bufs_free(nn, &rings[r]); nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp);
err_free_ring: err_free_ring:
nfp_net_rx_ring_free(&rings[r]); nfp_net_rx_ring_free(&rings[r]);
} }
...@@ -1723,35 +1935,50 @@ nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz, ...@@ -1723,35 +1935,50 @@ nfp_net_shadow_rx_rings_prepare(struct nfp_net *nn, unsigned int fl_bufsz,
return NULL; return NULL;
} }
static struct nfp_net_rx_ring * static void
nfp_net_shadow_rx_rings_swap(struct nfp_net *nn, struct nfp_net_rx_ring *rings) nfp_net_rx_ring_set_swap(struct nfp_net *nn, struct nfp_net_ring_set *s)
{ {
struct nfp_net_rx_ring *old = nn->rx_rings; struct nfp_net_ring_set new = *s;
unsigned int r;
for (r = 0; r < nn->num_rx_rings; r++) s->mtu = nn->netdev->mtu;
old[r].r_vec->rx_ring = &rings[r]; s->dcnt = nn->rxd_cnt;
s->rings = nn->rx_rings;
s->n_rings = nn->num_rx_rings;
nn->rx_rings = rings; nn->netdev->mtu = new.mtu;
return old; nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, new.mtu);
nn->rxd_cnt = new.dcnt;
nn->rx_rings = new.rings;
nn->num_rx_rings = new.n_rings;
} }
static void static void
nfp_net_shadow_rx_rings_free(struct nfp_net *nn, struct nfp_net_rx_ring *rings) nfp_net_rx_ring_set_free(struct nfp_net *nn, struct nfp_net_ring_set *s,
bool xdp)
{ {
struct nfp_net_rx_ring *rings = s->rings;
unsigned int r; unsigned int r;
if (!rings) for (r = 0; r < s->n_rings; r++) {
return; nfp_net_rx_ring_bufs_free(nn, &rings[r], xdp);
for (r = 0; r < nn->num_rx_rings; r++) {
nfp_net_rx_ring_bufs_free(nn, &rings[r]);
nfp_net_rx_ring_free(&rings[r]); nfp_net_rx_ring_free(&rings[r]);
} }
kfree(rings); kfree(rings);
} }
static void
nfp_net_vector_assign_rings(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
int idx)
{
r_vec->rx_ring = idx < nn->num_rx_rings ? &nn->rx_rings[idx] : NULL;
r_vec->tx_ring =
idx < nn->num_stack_tx_rings ? &nn->tx_rings[idx] : NULL;
r_vec->xdp_ring = idx < nn->num_tx_rings - nn->num_stack_tx_rings ?
&nn->tx_rings[nn->num_stack_tx_rings + idx] : NULL;
}
static int static int
nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
int idx) int idx)
...@@ -1759,33 +1986,20 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec, ...@@ -1759,33 +1986,20 @@ nfp_net_prepare_vector(struct nfp_net *nn, struct nfp_net_r_vector *r_vec,
struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx]; struct msix_entry *entry = &nn->irq_entries[r_vec->irq_idx];
int err; int err;
if (idx < nn->num_tx_rings) { /* Setup NAPI */
r_vec->tx_ring = &nn->tx_rings[idx]; netif_napi_add(nn->netdev, &r_vec->napi,
nfp_net_tx_ring_init(r_vec->tx_ring, r_vec, idx); nfp_net_poll, NAPI_POLL_WEIGHT);
} else {
r_vec->tx_ring = NULL;
}
if (idx < nn->num_rx_rings) {
r_vec->rx_ring = &nn->rx_rings[idx];
nfp_net_rx_ring_init(r_vec->rx_ring, r_vec, idx);
} else {
r_vec->rx_ring = NULL;
}
snprintf(r_vec->name, sizeof(r_vec->name), snprintf(r_vec->name, sizeof(r_vec->name),
"%s-rxtx-%d", nn->netdev->name, idx); "%s-rxtx-%d", nn->netdev->name, idx);
err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec); err = request_irq(entry->vector, r_vec->handler, 0, r_vec->name, r_vec);
if (err) { if (err) {
netif_napi_del(&r_vec->napi);
nn_err(nn, "Error requesting IRQ %d\n", entry->vector); nn_err(nn, "Error requesting IRQ %d\n", entry->vector);
return err; return err;
} }
disable_irq(entry->vector); disable_irq(entry->vector);
/* Setup NAPI */
netif_napi_add(nn->netdev, &r_vec->napi,
nfp_net_poll, NAPI_POLL_WEIGHT);
irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask); irq_set_affinity_hint(entry->vector, &r_vec->affinity_mask);
nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry); nn_dbg(nn, "RV%02d: irq=%03d/%03d\n", idx, entry->vector, entry->entry);
...@@ -1913,9 +2127,9 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn) ...@@ -1913,9 +2127,9 @@ static void nfp_net_clear_config_and_disable(struct nfp_net *nn)
nn_err(nn, "Could not disable device: %d\n", err); nn_err(nn, "Could not disable device: %d\n", err);
for (r = 0; r < nn->num_rx_rings; r++) for (r = 0; r < nn->num_rx_rings; r++)
nfp_net_rx_ring_reset(nn->r_vecs[r].rx_ring); nfp_net_rx_ring_reset(&nn->rx_rings[r]);
for (r = 0; r < nn->num_tx_rings; r++) for (r = 0; r < nn->num_tx_rings; r++)
nfp_net_tx_ring_reset(nn, nn->r_vecs[r].tx_ring); nfp_net_tx_ring_reset(nn, &nn->tx_rings[r]);
for (r = 0; r < nn->num_r_vecs; r++) for (r = 0; r < nn->num_r_vecs; r++)
nfp_net_vec_clear_ring_data(nn, r); nfp_net_vec_clear_ring_data(nn, r);
...@@ -1993,7 +2207,7 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn) ...@@ -1993,7 +2207,7 @@ static int __nfp_net_set_config_and_enable(struct nfp_net *nn)
nn->ctrl = new_ctrl; nn->ctrl = new_ctrl;
for (r = 0; r < nn->num_rx_rings; r++) for (r = 0; r < nn->num_rx_rings; r++)
nfp_net_rx_ring_fill_freelist(nn->r_vecs[r].rx_ring); nfp_net_rx_ring_fill_freelist(&nn->rx_rings[r]);
/* Since reconfiguration requests while NFP is down are ignored we /* Since reconfiguration requests while NFP is down are ignored we
* have to wipe the entire VXLAN configuration and reinitialize it. * have to wipe the entire VXLAN configuration and reinitialize it.
...@@ -2044,6 +2258,15 @@ static void nfp_net_open_stack(struct nfp_net *nn) ...@@ -2044,6 +2258,15 @@ static void nfp_net_open_stack(struct nfp_net *nn)
static int nfp_net_netdev_open(struct net_device *netdev) static int nfp_net_netdev_open(struct net_device *netdev)
{ {
struct nfp_net *nn = netdev_priv(netdev); struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_ring_set rx = {
.n_rings = nn->num_rx_rings,
.mtu = nn->netdev->mtu,
.dcnt = nn->rxd_cnt,
};
struct nfp_net_ring_set tx = {
.n_rings = nn->num_tx_rings,
.dcnt = nn->txd_cnt,
};
int err, r; int err, r;
if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) { if (nn->ctrl & NFP_NET_CFG_CTRL_ENABLE) {
...@@ -2068,41 +2291,29 @@ static int nfp_net_netdev_open(struct net_device *netdev) ...@@ -2068,41 +2291,29 @@ static int nfp_net_netdev_open(struct net_device *netdev)
goto err_free_exn; goto err_free_exn;
disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector);
nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
GFP_KERNEL);
if (!nn->rx_rings) {
err = -ENOMEM;
goto err_free_lsc;
}
nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
GFP_KERNEL);
if (!nn->tx_rings) {
err = -ENOMEM;
goto err_free_rx_rings;
}
for (r = 0; r < nn->num_r_vecs; r++) { for (r = 0; r < nn->num_r_vecs; r++) {
err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r); err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
if (err) if (err)
goto err_cleanup_vec_p; goto err_cleanup_vec_p;
} }
for (r = 0; r < nn->num_tx_rings; r++) {
err = nfp_net_tx_ring_alloc(nn->r_vecs[r].tx_ring, nn->txd_cnt); nn->rx_rings = nfp_net_rx_ring_set_prepare(nn, &rx, nn->xdp_prog);
if (err) if (!nn->rx_rings) {
goto err_free_tx_ring_p; err = -ENOMEM;
goto err_cleanup_vec;
} }
for (r = 0; r < nn->num_rx_rings; r++) {
err = nfp_net_rx_ring_alloc(nn->r_vecs[r].rx_ring,
nn->fl_bufsz, nn->rxd_cnt);
if (err)
goto err_flush_free_rx_ring_p;
err = nfp_net_rx_ring_bufs_alloc(nn, nn->r_vecs[r].rx_ring); nn->tx_rings = nfp_net_tx_ring_set_prepare(nn, &tx,
if (err) nn->num_stack_tx_rings);
goto err_free_rx_ring_p; if (!nn->tx_rings) {
err = -ENOMEM;
goto err_free_rx_rings;
} }
err = netif_set_real_num_tx_queues(netdev, nn->num_tx_rings); for (r = 0; r < nn->max_r_vecs; r++)
nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r);
err = netif_set_real_num_tx_queues(netdev, nn->num_stack_tx_rings);
if (err) if (err)
goto err_free_rings; goto err_free_rings;
...@@ -2132,25 +2343,14 @@ static int nfp_net_netdev_open(struct net_device *netdev) ...@@ -2132,25 +2343,14 @@ static int nfp_net_netdev_open(struct net_device *netdev)
return 0; return 0;
err_free_rings: err_free_rings:
r = nn->num_rx_rings; nfp_net_tx_ring_set_free(nn, &tx);
err_flush_free_rx_ring_p: err_free_rx_rings:
while (r--) { nfp_net_rx_ring_set_free(nn, &rx, nn->xdp_prog);
nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring); err_cleanup_vec:
err_free_rx_ring_p:
nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring);
}
r = nn->num_tx_rings;
err_free_tx_ring_p:
while (r--)
nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring);
r = nn->num_r_vecs; r = nn->num_r_vecs;
err_cleanup_vec_p: err_cleanup_vec_p:
while (r--) while (r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
kfree(nn->tx_rings);
err_free_rx_rings:
kfree(nn->rx_rings);
err_free_lsc:
nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX); nfp_net_aux_irq_free(nn, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
err_free_exn: err_free_exn:
nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX); nfp_net_aux_irq_free(nn, NFP_NET_CFG_EXN, NFP_NET_IRQ_EXN_IDX);
...@@ -2186,11 +2386,11 @@ static void nfp_net_close_free_all(struct nfp_net *nn) ...@@ -2186,11 +2386,11 @@ static void nfp_net_close_free_all(struct nfp_net *nn)
unsigned int r; unsigned int r;
for (r = 0; r < nn->num_rx_rings; r++) { for (r = 0; r < nn->num_rx_rings; r++) {
nfp_net_rx_ring_bufs_free(nn, nn->r_vecs[r].rx_ring); nfp_net_rx_ring_bufs_free(nn, &nn->rx_rings[r], nn->xdp_prog);
nfp_net_rx_ring_free(nn->r_vecs[r].rx_ring); nfp_net_rx_ring_free(&nn->rx_rings[r]);
} }
for (r = 0; r < nn->num_tx_rings; r++) for (r = 0; r < nn->num_tx_rings; r++)
nfp_net_tx_ring_free(nn->r_vecs[r].tx_ring); nfp_net_tx_ring_free(&nn->tx_rings[r]);
for (r = 0; r < nn->num_r_vecs; r++) for (r = 0; r < nn->num_r_vecs; r++)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]); nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
...@@ -2255,89 +2455,137 @@ static void nfp_net_set_rx_mode(struct net_device *netdev) ...@@ -2255,89 +2455,137 @@ static void nfp_net_set_rx_mode(struct net_device *netdev)
nn->ctrl = new_ctrl; nn->ctrl = new_ctrl;
} }
static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu) static void nfp_net_rss_init_itbl(struct nfp_net *nn)
{ {
unsigned int old_mtu, old_fl_bufsz, new_fl_bufsz; int i;
struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_rx_ring *tmp_rings;
int err;
old_mtu = netdev->mtu;
old_fl_bufsz = nn->fl_bufsz;
new_fl_bufsz = nfp_net_calc_fl_bufsz(nn, new_mtu);
if (!netif_running(netdev)) {
netdev->mtu = new_mtu;
nn->fl_bufsz = new_fl_bufsz;
return 0;
}
/* Prepare new rings */ for (i = 0; i < sizeof(nn->rss_itbl); i++)
tmp_rings = nfp_net_shadow_rx_rings_prepare(nn, new_fl_bufsz, nn->rss_itbl[i] =
nn->rxd_cnt); ethtool_rxfh_indir_default(i, nn->num_rx_rings);
if (!tmp_rings) }
return -ENOMEM;
/* Stop device, swap in new rings, try to start the firmware */ static int
nfp_net_close_stack(nn); nfp_net_ring_swap_enable(struct nfp_net *nn, unsigned int *num_vecs,
nfp_net_clear_config_and_disable(nn); unsigned int *stack_tx_rings,
struct bpf_prog **xdp_prog,
struct nfp_net_ring_set *rx,
struct nfp_net_ring_set *tx)
{
unsigned int r;
int err;
tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings); if (rx)
nfp_net_rx_ring_set_swap(nn, rx);
if (tx)
nfp_net_tx_ring_set_swap(nn, tx);
netdev->mtu = new_mtu; swap(*num_vecs, nn->num_r_vecs);
nn->fl_bufsz = new_fl_bufsz; swap(*stack_tx_rings, nn->num_stack_tx_rings);
*xdp_prog = xchg(&nn->xdp_prog, *xdp_prog);
err = nfp_net_set_config_and_enable(nn); for (r = 0; r < nn->max_r_vecs; r++)
if (err) { nfp_net_vector_assign_rings(nn, &nn->r_vecs[r], r);
const int err_new = err;
/* Try with old configuration and old rings */ if (nn->netdev->real_num_rx_queues != nn->num_rx_rings) {
tmp_rings = nfp_net_shadow_rx_rings_swap(nn, tmp_rings); if (!netif_is_rxfh_configured(nn->netdev))
nfp_net_rss_init_itbl(nn);
netdev->mtu = old_mtu; err = netif_set_real_num_rx_queues(nn->netdev,
nn->fl_bufsz = old_fl_bufsz; nn->num_rx_rings);
if (err)
return err;
}
err = __nfp_net_set_config_and_enable(nn); if (nn->netdev->real_num_tx_queues != nn->num_stack_tx_rings) {
err = netif_set_real_num_tx_queues(nn->netdev,
nn->num_stack_tx_rings);
if (err) if (err)
nn_err(nn, "Can't restore MTU - FW communication failed (%d,%d)\n", return err;
err_new, err);
} }
nfp_net_shadow_rx_rings_free(nn, tmp_rings); return __nfp_net_set_config_and_enable(nn);
}
nfp_net_open_stack(nn); static int
nfp_net_check_config(struct nfp_net *nn, struct bpf_prog *xdp_prog,
struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx)
{
/* XDP-enabled tests */
if (!xdp_prog)
return 0;
if (rx && nfp_net_calc_fl_bufsz(nn, rx->mtu) > PAGE_SIZE) {
nn_warn(nn, "MTU too large w/ XDP enabled\n");
return -EINVAL;
}
if (tx && tx->n_rings > nn->max_tx_rings) {
nn_warn(nn, "Insufficient number of TX rings w/ XDP enabled\n");
return -EINVAL;
}
return err; return 0;
} }
int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt) static void
{ nfp_net_ring_reconfig_down(struct nfp_net *nn, struct bpf_prog **xdp_prog,
struct nfp_net_tx_ring *tx_rings = NULL; struct nfp_net_ring_set *rx,
struct nfp_net_rx_ring *rx_rings = NULL; struct nfp_net_ring_set *tx,
u32 old_rxd_cnt, old_txd_cnt; unsigned int stack_tx_rings, unsigned int num_vecs)
{
nn->netdev->mtu = rx ? rx->mtu : nn->netdev->mtu;
nn->fl_bufsz = nfp_net_calc_fl_bufsz(nn, nn->netdev->mtu);
nn->rxd_cnt = rx ? rx->dcnt : nn->rxd_cnt;
nn->txd_cnt = tx ? tx->dcnt : nn->txd_cnt;
nn->num_rx_rings = rx ? rx->n_rings : nn->num_rx_rings;
nn->num_tx_rings = tx ? tx->n_rings : nn->num_tx_rings;
nn->num_stack_tx_rings = stack_tx_rings;
nn->num_r_vecs = num_vecs;
*xdp_prog = xchg(&nn->xdp_prog, *xdp_prog);
if (!netif_is_rxfh_configured(nn->netdev))
nfp_net_rss_init_itbl(nn);
}
int
nfp_net_ring_reconfig(struct nfp_net *nn, struct bpf_prog **xdp_prog,
struct nfp_net_ring_set *rx, struct nfp_net_ring_set *tx)
{
unsigned int stack_tx_rings, num_vecs, r;
int err; int err;
stack_tx_rings = tx ? tx->n_rings : nn->num_tx_rings;
if (*xdp_prog)
stack_tx_rings -= rx ? rx->n_rings : nn->num_rx_rings;
num_vecs = max(rx ? rx->n_rings : nn->num_rx_rings, stack_tx_rings);
err = nfp_net_check_config(nn, *xdp_prog, rx, tx);
if (err)
return err;
if (!netif_running(nn->netdev)) { if (!netif_running(nn->netdev)) {
nn->rxd_cnt = rxd_cnt; nfp_net_ring_reconfig_down(nn, xdp_prog, rx, tx,
nn->txd_cnt = txd_cnt; stack_tx_rings, num_vecs);
return 0; return 0;
} }
old_rxd_cnt = nn->rxd_cnt;
old_txd_cnt = nn->txd_cnt;
/* Prepare new rings */ /* Prepare new rings */
if (nn->rxd_cnt != rxd_cnt) { for (r = nn->num_r_vecs; r < num_vecs; r++) {
rx_rings = nfp_net_shadow_rx_rings_prepare(nn, nn->fl_bufsz, err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
rxd_cnt); if (err) {
if (!rx_rings) num_vecs = r;
return -ENOMEM; goto err_cleanup_vecs;
}
} }
if (nn->txd_cnt != txd_cnt) { if (rx) {
tx_rings = nfp_net_shadow_tx_rings_prepare(nn, txd_cnt); if (!nfp_net_rx_ring_set_prepare(nn, rx, *xdp_prog)) {
if (!tx_rings) { err = -ENOMEM;
nfp_net_shadow_rx_rings_free(nn, rx_rings); goto err_cleanup_vecs;
return -ENOMEM; }
}
if (tx) {
if (!nfp_net_tx_ring_set_prepare(nn, tx, stack_tx_rings)) {
err = -ENOMEM;
goto err_free_rx;
} }
} }
...@@ -2345,39 +2593,51 @@ int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt) ...@@ -2345,39 +2593,51 @@ int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
nfp_net_close_stack(nn); nfp_net_close_stack(nn);
nfp_net_clear_config_and_disable(nn); nfp_net_clear_config_and_disable(nn);
if (rx_rings) err = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings,
rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings); xdp_prog, rx, tx);
if (tx_rings)
tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
nn->rxd_cnt = rxd_cnt;
nn->txd_cnt = txd_cnt;
err = nfp_net_set_config_and_enable(nn);
if (err) { if (err) {
const int err_new = err; int err2;
/* Try with old configuration and old rings */
if (rx_rings)
rx_rings = nfp_net_shadow_rx_rings_swap(nn, rx_rings);
if (tx_rings)
tx_rings = nfp_net_shadow_tx_rings_swap(nn, tx_rings);
nn->rxd_cnt = old_rxd_cnt; nfp_net_clear_config_and_disable(nn);
nn->txd_cnt = old_txd_cnt;
err = __nfp_net_set_config_and_enable(nn); /* Try with old configuration and old rings */
if (err) err2 = nfp_net_ring_swap_enable(nn, &num_vecs, &stack_tx_rings,
xdp_prog, rx, tx);
if (err2)
nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n", nn_err(nn, "Can't restore ring config - FW communication failed (%d,%d)\n",
err_new, err); err, err2);
} }
for (r = num_vecs - 1; r >= nn->num_r_vecs; r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
nfp_net_shadow_rx_rings_free(nn, rx_rings); if (rx)
nfp_net_shadow_tx_rings_free(nn, tx_rings); nfp_net_rx_ring_set_free(nn, rx, *xdp_prog);
if (tx)
nfp_net_tx_ring_set_free(nn, tx);
nfp_net_open_stack(nn); nfp_net_open_stack(nn);
return err; return err;
err_free_rx:
if (rx)
nfp_net_rx_ring_set_free(nn, rx, *xdp_prog);
err_cleanup_vecs:
for (r = num_vecs - 1; r >= nn->num_r_vecs; r--)
nfp_net_cleanup_vector(nn, &nn->r_vecs[r]);
return err;
}
static int nfp_net_change_mtu(struct net_device *netdev, int new_mtu)
{
struct nfp_net *nn = netdev_priv(netdev);
struct nfp_net_ring_set rx = {
.n_rings = nn->num_rx_rings,
.mtu = new_mtu,
.dcnt = nn->rxd_cnt,
};
return nfp_net_ring_reconfig(nn, &nn->xdp_prog, &rx, NULL);
} }
static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev, static struct rtnl_link_stats64 *nfp_net_stat64(struct net_device *netdev,
...@@ -2434,8 +2694,12 @@ nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto, ...@@ -2434,8 +2694,12 @@ nfp_net_setup_tc(struct net_device *netdev, u32 handle, __be16 proto,
if (proto != htons(ETH_P_ALL)) if (proto != htons(ETH_P_ALL))
return -ENOTSUPP; return -ENOTSUPP;
if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) if (tc->type == TC_SETUP_CLSBPF && nfp_net_ebpf_capable(nn)) {
return nfp_net_bpf_offload(nn, handle, proto, tc->cls_bpf); if (!nn->bpf_offload_xdp)
return nfp_net_bpf_offload(nn, tc->cls_bpf);
else
return -EBUSY;
}
return -EINVAL; return -EINVAL;
} }
...@@ -2643,6 +2907,87 @@ static void nfp_net_del_vxlan_port(struct net_device *netdev, ...@@ -2643,6 +2907,87 @@ static void nfp_net_del_vxlan_port(struct net_device *netdev,
nfp_net_set_vxlan_port(nn, idx, 0); nfp_net_set_vxlan_port(nn, idx, 0);
} }
static int nfp_net_xdp_offload(struct nfp_net *nn, struct bpf_prog *prog)
{
struct tc_cls_bpf_offload cmd = {
.prog = prog,
};
int ret;
if (!nfp_net_ebpf_capable(nn))
return -EINVAL;
if (nn->ctrl & NFP_NET_CFG_CTRL_BPF) {
if (!nn->bpf_offload_xdp)
return prog ? -EBUSY : 0;
cmd.command = prog ? TC_CLSBPF_REPLACE : TC_CLSBPF_DESTROY;
} else {
if (!prog)
return 0;
cmd.command = TC_CLSBPF_ADD;
}
ret = nfp_net_bpf_offload(nn, &cmd);
/* Stop offload if replace not possible */
if (ret && cmd.command == TC_CLSBPF_REPLACE)
nfp_net_xdp_offload(nn, NULL);
nn->bpf_offload_xdp = prog && !ret;
return ret;
}
static int nfp_net_xdp_setup(struct nfp_net *nn, struct bpf_prog *prog)
{
struct nfp_net_ring_set rx = {
.n_rings = nn->num_rx_rings,
.mtu = nn->netdev->mtu,
.dcnt = nn->rxd_cnt,
};
struct nfp_net_ring_set tx = {
.n_rings = nn->num_tx_rings,
.dcnt = nn->txd_cnt,
};
int err;
if (!prog && !nn->xdp_prog)
return 0;
if (prog && nn->xdp_prog) {
prog = xchg(&nn->xdp_prog, prog);
bpf_prog_put(prog);
nfp_net_xdp_offload(nn, nn->xdp_prog);
return 0;
}
tx.n_rings += prog ? nn->num_rx_rings : -nn->num_rx_rings;
/* We need RX reconfig to remap the buffers (BIDIR vs FROM_DEV) */
err = nfp_net_ring_reconfig(nn, &prog, &rx, &tx);
if (err)
return err;
/* @prog got swapped and is now the old one */
if (prog)
bpf_prog_put(prog);
nfp_net_xdp_offload(nn, nn->xdp_prog);
return 0;
}
static int nfp_net_xdp(struct net_device *netdev, struct netdev_xdp *xdp)
{
struct nfp_net *nn = netdev_priv(netdev);
switch (xdp->command) {
case XDP_SETUP_PROG:
return nfp_net_xdp_setup(nn, xdp->prog);
case XDP_QUERY_PROG:
xdp->prog_attached = !!nn->xdp_prog;
return 0;
default:
return -EINVAL;
}
}
static const struct net_device_ops nfp_net_netdev_ops = { static const struct net_device_ops nfp_net_netdev_ops = {
.ndo_open = nfp_net_netdev_open, .ndo_open = nfp_net_netdev_open,
.ndo_stop = nfp_net_netdev_close, .ndo_stop = nfp_net_netdev_close,
...@@ -2657,6 +3002,7 @@ static const struct net_device_ops nfp_net_netdev_ops = { ...@@ -2657,6 +3002,7 @@ static const struct net_device_ops nfp_net_netdev_ops = {
.ndo_features_check = nfp_net_features_check, .ndo_features_check = nfp_net_features_check,
.ndo_udp_tunnel_add = nfp_net_add_vxlan_port, .ndo_udp_tunnel_add = nfp_net_add_vxlan_port,
.ndo_udp_tunnel_del = nfp_net_del_vxlan_port, .ndo_udp_tunnel_del = nfp_net_del_vxlan_port,
.ndo_xdp = nfp_net_xdp,
}; };
/** /**
...@@ -2763,13 +3109,9 @@ void nfp_net_netdev_free(struct nfp_net *nn) ...@@ -2763,13 +3109,9 @@ void nfp_net_netdev_free(struct nfp_net *nn)
*/ */
static void nfp_net_rss_init(struct nfp_net *nn) static void nfp_net_rss_init(struct nfp_net *nn)
{ {
int i;
netdev_rss_key_fill(nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ); netdev_rss_key_fill(nn->rss_key, NFP_NET_CFG_RSS_KEY_SZ);
for (i = 0; i < sizeof(nn->rss_itbl); i++) nfp_net_rss_init_itbl(nn);
nn->rss_itbl[i] =
ethtool_rxfh_indir_default(i, nn->num_rx_rings);
/* Enable IPv4/IPv6 TCP by default */ /* Enable IPv4/IPv6 TCP by default */
nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP | nn->rss_cfg = NFP_NET_CFG_RSS_IPV4_TCP |
...@@ -2923,5 +3265,11 @@ int nfp_net_netdev_init(struct net_device *netdev) ...@@ -2923,5 +3265,11 @@ int nfp_net_netdev_init(struct net_device *netdev)
*/ */
void nfp_net_netdev_clean(struct net_device *netdev) void nfp_net_netdev_clean(struct net_device *netdev)
{ {
unregister_netdev(netdev); struct nfp_net *nn = netdev_priv(netdev);
if (nn->xdp_prog)
bpf_prog_put(nn->xdp_prog);
if (nn->bpf_offload_xdp)
nfp_net_xdp_offload(nn, NULL);
unregister_netdev(nn->netdev);
} }
...@@ -114,6 +114,16 @@ static const struct file_operations nfp_rx_q_fops = { ...@@ -114,6 +114,16 @@ static const struct file_operations nfp_rx_q_fops = {
.llseek = seq_lseek .llseek = seq_lseek
}; };
static int nfp_net_debugfs_tx_q_open(struct inode *inode, struct file *f);
static const struct file_operations nfp_tx_q_fops = {
.owner = THIS_MODULE,
.open = nfp_net_debugfs_tx_q_open,
.release = single_release,
.read = seq_read,
.llseek = seq_lseek
};
static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
{ {
struct nfp_net_r_vector *r_vec = file->private; struct nfp_net_r_vector *r_vec = file->private;
...@@ -126,10 +136,13 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) ...@@ -126,10 +136,13 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
rtnl_lock(); rtnl_lock();
if (!r_vec->nfp_net || !r_vec->tx_ring) if (debugfs_real_fops(file->file) == &nfp_tx_q_fops)
tx_ring = r_vec->tx_ring;
else
tx_ring = r_vec->xdp_ring;
if (!r_vec->nfp_net || !tx_ring)
goto out; goto out;
nn = r_vec->nfp_net; nn = r_vec->nfp_net;
tx_ring = r_vec->tx_ring;
if (!netif_running(nn->netdev)) if (!netif_running(nn->netdev))
goto out; goto out;
...@@ -148,9 +161,14 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) ...@@ -148,9 +161,14 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
txd->vals[2], txd->vals[3]); txd->vals[2], txd->vals[3]);
skb = READ_ONCE(tx_ring->txbufs[i].skb); skb = READ_ONCE(tx_ring->txbufs[i].skb);
if (skb) if (skb) {
seq_printf(file, " skb->head=%p skb->data=%p", if (tx_ring == r_vec->tx_ring)
skb->head, skb->data); seq_printf(file, " skb->head=%p skb->data=%p",
skb->head, skb->data);
else
seq_printf(file, " frag=%p", skb);
}
if (tx_ring->txbufs[i].dma_addr) if (tx_ring->txbufs[i].dma_addr)
seq_printf(file, " dma_addr=%pad", seq_printf(file, " dma_addr=%pad",
&tx_ring->txbufs[i].dma_addr); &tx_ring->txbufs[i].dma_addr);
...@@ -176,7 +194,7 @@ static int nfp_net_debugfs_tx_q_open(struct inode *inode, struct file *f) ...@@ -176,7 +194,7 @@ static int nfp_net_debugfs_tx_q_open(struct inode *inode, struct file *f)
return single_open(f, nfp_net_debugfs_tx_q_read, inode->i_private); return single_open(f, nfp_net_debugfs_tx_q_read, inode->i_private);
} }
static const struct file_operations nfp_tx_q_fops = { static const struct file_operations nfp_xdp_q_fops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = nfp_net_debugfs_tx_q_open, .open = nfp_net_debugfs_tx_q_open,
.release = single_release, .release = single_release,
...@@ -186,7 +204,7 @@ static const struct file_operations nfp_tx_q_fops = { ...@@ -186,7 +204,7 @@ static const struct file_operations nfp_tx_q_fops = {
void nfp_net_debugfs_adapter_add(struct nfp_net *nn) void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
{ {
struct dentry *queues, *tx, *rx; struct dentry *queues, *tx, *rx, *xdp;
char int_name[16]; char int_name[16];
int i; int i;
...@@ -204,16 +222,19 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn) ...@@ -204,16 +222,19 @@ void nfp_net_debugfs_adapter_add(struct nfp_net *nn)
rx = debugfs_create_dir("rx", queues); rx = debugfs_create_dir("rx", queues);
tx = debugfs_create_dir("tx", queues); tx = debugfs_create_dir("tx", queues);
if (IS_ERR_OR_NULL(rx) || IS_ERR_OR_NULL(tx)) xdp = debugfs_create_dir("xdp", queues);
if (IS_ERR_OR_NULL(rx) || IS_ERR_OR_NULL(tx) || IS_ERR_OR_NULL(xdp))
return; return;
for (i = 0; i < nn->num_rx_rings; i++) { for (i = 0; i < min(nn->max_rx_rings, nn->max_r_vecs); i++) {
sprintf(int_name, "%d", i); sprintf(int_name, "%d", i);
debugfs_create_file(int_name, S_IRUSR, rx, debugfs_create_file(int_name, S_IRUSR, rx,
&nn->r_vecs[i], &nfp_rx_q_fops); &nn->r_vecs[i], &nfp_rx_q_fops);
debugfs_create_file(int_name, S_IRUSR, xdp,
&nn->r_vecs[i], &nfp_xdp_q_fops);
} }
for (i = 0; i < nn->num_tx_rings; i++) { for (i = 0; i < min(nn->max_tx_rings, nn->max_r_vecs); i++) {
sprintf(int_name, "%d", i); sprintf(int_name, "%d", i);
debugfs_create_file(int_name, S_IRUSR, tx, debugfs_create_file(int_name, S_IRUSR, tx,
&nn->r_vecs[i], &nfp_tx_q_fops); &nn->r_vecs[i], &nfp_tx_q_fops);
......
...@@ -158,6 +158,28 @@ static void nfp_net_get_ringparam(struct net_device *netdev, ...@@ -158,6 +158,28 @@ static void nfp_net_get_ringparam(struct net_device *netdev,
ring->tx_pending = nn->txd_cnt; ring->tx_pending = nn->txd_cnt;
} }
static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
{
struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
struct nfp_net_ring_set rx = {
.n_rings = nn->num_rx_rings,
.mtu = nn->netdev->mtu,
.dcnt = rxd_cnt,
};
struct nfp_net_ring_set tx = {
.n_rings = nn->num_tx_rings,
.dcnt = txd_cnt,
};
if (nn->rxd_cnt != rxd_cnt)
reconfig_rx = &rx;
if (nn->txd_cnt != txd_cnt)
reconfig_tx = &tx;
return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
reconfig_rx, reconfig_tx);
}
static int nfp_net_set_ringparam(struct net_device *netdev, static int nfp_net_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring) struct ethtool_ringparam *ring)
{ {
...@@ -614,6 +636,76 @@ static int nfp_net_set_coalesce(struct net_device *netdev, ...@@ -614,6 +636,76 @@ static int nfp_net_set_coalesce(struct net_device *netdev,
return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD); return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD);
} }
static void nfp_net_get_channels(struct net_device *netdev,
struct ethtool_channels *channel)
{
struct nfp_net *nn = netdev_priv(netdev);
unsigned int num_tx_rings;
num_tx_rings = nn->num_tx_rings;
if (nn->xdp_prog)
num_tx_rings -= nn->num_rx_rings;
channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs);
channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs);
channel->max_combined = min(channel->max_rx, channel->max_tx);
channel->max_other = NFP_NET_NON_Q_VECTORS;
channel->combined_count = min(nn->num_rx_rings, num_tx_rings);
channel->rx_count = nn->num_rx_rings - channel->combined_count;
channel->tx_count = num_tx_rings - channel->combined_count;
channel->other_count = NFP_NET_NON_Q_VECTORS;
}
static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx,
unsigned int total_tx)
{
struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
struct nfp_net_ring_set rx = {
.n_rings = total_rx,
.mtu = nn->netdev->mtu,
.dcnt = nn->rxd_cnt,
};
struct nfp_net_ring_set tx = {
.n_rings = total_tx,
.dcnt = nn->txd_cnt,
};
if (nn->num_rx_rings != total_rx)
reconfig_rx = &rx;
if (nn->num_stack_tx_rings != total_tx ||
(nn->xdp_prog && reconfig_rx))
reconfig_tx = &tx;
/* nfp_net_check_config() will catch tx.n_rings > nn->max_tx_rings */
if (nn->xdp_prog)
tx.n_rings += total_rx;
return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
reconfig_rx, reconfig_tx);
}
static int nfp_net_set_channels(struct net_device *netdev,
struct ethtool_channels *channel)
{
struct nfp_net *nn = netdev_priv(netdev);
unsigned int total_rx, total_tx;
/* Reject unsupported */
if (!channel->combined_count ||
channel->other_count != NFP_NET_NON_Q_VECTORS ||
(channel->rx_count && channel->tx_count))
return -EINVAL;
total_rx = channel->combined_count + channel->rx_count;
total_tx = channel->combined_count + channel->tx_count;
if (total_rx > min(nn->max_rx_rings, nn->max_r_vecs) ||
total_tx > min(nn->max_tx_rings, nn->max_r_vecs))
return -EINVAL;
return nfp_net_set_num_rings(nn, total_rx, total_tx);
}
static const struct ethtool_ops nfp_net_ethtool_ops = { static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_drvinfo = nfp_net_get_drvinfo, .get_drvinfo = nfp_net_get_drvinfo,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
...@@ -632,6 +724,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = { ...@@ -632,6 +724,8 @@ static const struct ethtool_ops nfp_net_ethtool_ops = {
.get_regs = nfp_net_get_regs, .get_regs = nfp_net_get_regs,
.get_coalesce = nfp_net_get_coalesce, .get_coalesce = nfp_net_get_coalesce,
.set_coalesce = nfp_net_set_coalesce, .set_coalesce = nfp_net_set_coalesce,
.get_channels = nfp_net_get_channels,
.set_channels = nfp_net_set_channels,
}; };
void nfp_net_set_ethtool_ops(struct net_device *netdev) void nfp_net_set_ethtool_ops(struct net_device *netdev)
......
...@@ -111,6 +111,9 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf) ...@@ -111,6 +111,9 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
const struct tc_action *a; const struct tc_action *a;
LIST_HEAD(actions); LIST_HEAD(actions);
if (!cls_bpf->exts)
return NN_ACT_XDP;
/* TC direct action */ /* TC direct action */
if (cls_bpf->exts_integrated) { if (cls_bpf->exts_integrated) {
if (tc_no_actions(cls_bpf->exts)) if (tc_no_actions(cls_bpf->exts))
...@@ -233,9 +236,7 @@ static int nfp_net_bpf_stop(struct nfp_net *nn) ...@@ -233,9 +236,7 @@ static int nfp_net_bpf_stop(struct nfp_net *nn)
return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN); return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
} }
int int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
nfp_net_bpf_offload(struct nfp_net *nn, u32 handle, __be16 proto,
struct tc_cls_bpf_offload *cls_bpf)
{ {
struct nfp_bpf_result res; struct nfp_bpf_result res;
dma_addr_t dma_addr; dma_addr_t dma_addr;
......
...@@ -52,7 +52,8 @@ extern struct srcu_struct debugfs_srcu; ...@@ -52,7 +52,8 @@ extern struct srcu_struct debugfs_srcu;
* Must only be called under the protection established by * Must only be called under the protection established by
* debugfs_use_file_start(). * debugfs_use_file_start().
*/ */
static inline const struct file_operations *debugfs_real_fops(struct file *filp) static inline const struct file_operations *
debugfs_real_fops(const struct file *filp)
__must_hold(&debugfs_srcu) __must_hold(&debugfs_srcu)
{ {
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment