Commit 62d03330 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: move the fast path code to separate files

In preparation for support for a new datapath format move all
ring and fast path logic into separate files. It is basically
a verbatim move with some wrapping functions, no new structures
and functions added.

The current data path is called NFD3 from the initial version
of the driver ABI it used. The non-fast path, but ring related
functions are moved to nfp_net_dp.c file.

Changes to Jakub's work:
* Rebase on xsk related code.
* Split the patch, move the callback changes to next commit.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarFei Qin <fei.qin@corigine.com>
Signed-off-by: default avatarSimon Horman <simon.horman@corigine.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent fc9769f6
...@@ -20,12 +20,16 @@ nfp-objs := \ ...@@ -20,12 +20,16 @@ nfp-objs := \
ccm_mbox.o \ ccm_mbox.o \
devlink_param.o \ devlink_param.o \
nfp_asm.o \ nfp_asm.o \
nfd3/dp.o \
nfd3/rings.o \
nfd3/xsk.o \
nfp_app.o \ nfp_app.o \
nfp_app_nic.o \ nfp_app_nic.o \
nfp_devlink.o \ nfp_devlink.o \
nfp_hwmon.o \ nfp_hwmon.o \
nfp_main.o \ nfp_main.o \
nfp_net_common.o \ nfp_net_common.o \
nfp_net_dp.o \
nfp_net_ctrl.o \ nfp_net_ctrl.o \
nfp_net_debugdump.o \ nfp_net_debugdump.o \
nfp_net_ethtool.o \ nfp_net_ethtool.o \
......
This diff is collapsed.
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
#ifndef _NFP_DP_NFD3_H_
#define _NFP_DP_NFD3_H_
struct sk_buff;
struct net_device;
/* TX descriptor format */
#define NFD3_DESC_TX_EOP BIT(7)
#define NFD3_DESC_TX_OFFSET_MASK GENMASK(6, 0)
#define NFD3_DESC_TX_MSS_MASK GENMASK(13, 0)
/* Flags in the host TX descriptor */
#define NFD3_DESC_TX_CSUM BIT(7)
#define NFD3_DESC_TX_IP4_CSUM BIT(6)
#define NFD3_DESC_TX_TCP_CSUM BIT(5)
#define NFD3_DESC_TX_UDP_CSUM BIT(4)
#define NFD3_DESC_TX_VLAN BIT(3)
#define NFD3_DESC_TX_LSO BIT(2)
#define NFD3_DESC_TX_ENCAP BIT(1)
#define NFD3_DESC_TX_O_IP4_CSUM BIT(0)
struct nfp_nfd3_tx_desc {
union {
struct {
u8 dma_addr_hi; /* High bits of host buf address */
__le16 dma_len; /* Length to DMA for this desc */
u8 offset_eop; /* Offset in buf where pkt starts +
* highest bit is eop flag.
*/
__le32 dma_addr_lo; /* Low 32bit of host buf addr */
__le16 mss; /* MSS to be used for LSO */
u8 lso_hdrlen; /* LSO, TCP payload offset */
u8 flags; /* TX Flags, see @NFD3_DESC_TX_* */
union {
struct {
u8 l3_offset; /* L3 header offset */
u8 l4_offset; /* L4 header offset */
};
__le16 vlan; /* VLAN tag to add if indicated */
};
__le16 data_len; /* Length of frame + meta data */
} __packed;
__le32 vals[4];
__le64 vals8[2];
};
};
/**
* struct nfp_nfd3_tx_buf - software TX buffer descriptor
* @skb: normal ring, sk_buff associated with this buffer
* @frag: XDP ring, page frag associated with this buffer
* @xdp: XSK buffer pool handle (for AF_XDP)
* @dma_addr: DMA mapping address of the buffer
* @fidx: Fragment index (-1 for the head and [0..nr_frags-1] for frags)
* @pkt_cnt: Number of packets to be produced out of the skb associated
* with this buffer (valid only on the head's buffer).
* Will be 1 for all non-TSO packets.
* @is_xsk_tx: Flag if buffer is a RX buffer after a XDP_TX action and not a
* buffer from the TX queue (for AF_XDP).
* @real_len: Number of bytes which to be produced out of the skb (valid only
* on the head's buffer). Equal to skb->len for non-TSO packets.
*/
struct nfp_nfd3_tx_buf {
union {
struct sk_buff *skb;
void *frag;
struct xdp_buff *xdp;
};
dma_addr_t dma_addr;
union {
struct {
short int fidx;
u16 pkt_cnt;
};
struct {
bool is_xsk_tx;
};
};
u32 real_len;
};
void
nfp_nfd3_rx_csum(const struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
const struct nfp_net_rx_desc *rxd,
const struct nfp_meta_parsed *meta, struct sk_buff *skb);
bool
nfp_nfd3_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
void *data, void *pkt, unsigned int pkt_len, int meta_len);
void nfp_nfd3_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget);
int nfp_nfd3_poll(struct napi_struct *napi, int budget);
void nfp_nfd3_ctrl_poll(struct tasklet_struct *t);
void nfp_nfd3_rx_ring_fill_freelist(struct nfp_net_dp *dp,
struct nfp_net_rx_ring *rx_ring);
void nfp_nfd3_xsk_tx_free(struct nfp_nfd3_tx_buf *txbuf);
int nfp_nfd3_xsk_poll(struct napi_struct *napi, int budget);
void
nfp_nfd3_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring);
void
nfp_nfd3_rx_ring_fill_freelist(struct nfp_net_dp *dp,
struct nfp_net_rx_ring *rx_ring);
int
nfp_nfd3_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring);
void
nfp_nfd3_tx_ring_free(struct nfp_net_tx_ring *tx_ring);
int
nfp_nfd3_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring);
void
nfp_nfd3_tx_ring_bufs_free(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring);
void
nfp_nfd3_print_tx_descs(struct seq_file *file,
struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_ring *tx_ring,
u32 d_rd_p, u32 d_wr_p);
netdev_tx_t nfp_nfd3_tx(struct sk_buff *skb, struct net_device *netdev);
bool nfp_nfd3_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
bool __nfp_nfd3_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
#endif
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2015-2019 Netronome Systems, Inc. */
#include <linux/seq_file.h>
#include "../nfp_net.h"
#include "../nfp_net_dp.h"
#include "../nfp_net_xsk.h"
#include "nfd3.h"
static void nfp_nfd3_xsk_tx_bufs_free(struct nfp_net_tx_ring *tx_ring)
{
struct nfp_nfd3_tx_buf *txbuf;
unsigned int idx;
while (tx_ring->rd_p != tx_ring->wr_p) {
idx = D_IDX(tx_ring, tx_ring->rd_p);
txbuf = &tx_ring->txbufs[idx];
txbuf->real_len = 0;
tx_ring->qcp_rd_p++;
tx_ring->rd_p++;
if (tx_ring->r_vec->xsk_pool) {
if (txbuf->is_xsk_tx)
nfp_nfd3_xsk_tx_free(txbuf);
xsk_tx_completed(tx_ring->r_vec->xsk_pool, 1);
}
}
}
/**
* nfp_nfd3_tx_ring_reset() - Free any untransmitted buffers and reset pointers
* @dp: NFP Net data path struct
* @tx_ring: TX ring structure
*
* Assumes that the device is stopped, must be idempotent.
*/
void
nfp_nfd3_tx_ring_reset(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
struct netdev_queue *nd_q;
const skb_frag_t *frag;
while (!tx_ring->is_xdp && tx_ring->rd_p != tx_ring->wr_p) {
struct nfp_nfd3_tx_buf *tx_buf;
struct sk_buff *skb;
int idx, nr_frags;
idx = D_IDX(tx_ring, tx_ring->rd_p);
tx_buf = &tx_ring->txbufs[idx];
skb = tx_ring->txbufs[idx].skb;
nr_frags = skb_shinfo(skb)->nr_frags;
if (tx_buf->fidx == -1) {
/* unmap head */
dma_unmap_single(dp->dev, tx_buf->dma_addr,
skb_headlen(skb), DMA_TO_DEVICE);
} else {
/* unmap fragment */
frag = &skb_shinfo(skb)->frags[tx_buf->fidx];
dma_unmap_page(dp->dev, tx_buf->dma_addr,
skb_frag_size(frag), DMA_TO_DEVICE);
}
/* check for last gather fragment */
if (tx_buf->fidx == nr_frags - 1)
dev_kfree_skb_any(skb);
tx_buf->dma_addr = 0;
tx_buf->skb = NULL;
tx_buf->fidx = -2;
tx_ring->qcp_rd_p++;
tx_ring->rd_p++;
}
if (tx_ring->is_xdp)
nfp_nfd3_xsk_tx_bufs_free(tx_ring);
memset(tx_ring->txds, 0, tx_ring->size);
tx_ring->wr_p = 0;
tx_ring->rd_p = 0;
tx_ring->qcp_rd_p = 0;
tx_ring->wr_ptr_add = 0;
if (tx_ring->is_xdp || !dp->netdev)
return;
nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx);
netdev_tx_reset_queue(nd_q);
}
/**
* nfp_nfd3_tx_ring_free() - Free resources allocated to a TX ring
* @tx_ring: TX ring to free
*/
void nfp_nfd3_tx_ring_free(struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
struct nfp_net_dp *dp = &r_vec->nfp_net->dp;
kvfree(tx_ring->txbufs);
if (tx_ring->txds)
dma_free_coherent(dp->dev, tx_ring->size,
tx_ring->txds, tx_ring->dma);
tx_ring->cnt = 0;
tx_ring->txbufs = NULL;
tx_ring->txds = NULL;
tx_ring->dma = 0;
tx_ring->size = 0;
}
/**
* nfp_nfd3_tx_ring_alloc() - Allocate resource for a TX ring
* @dp: NFP Net data path struct
* @tx_ring: TX Ring structure to allocate
*
* Return: 0 on success, negative errno otherwise.
*/
int
nfp_nfd3_tx_ring_alloc(struct nfp_net_dp *dp, struct nfp_net_tx_ring *tx_ring)
{
struct nfp_net_r_vector *r_vec = tx_ring->r_vec;
tx_ring->cnt = dp->txd_cnt;
tx_ring->size = array_size(tx_ring->cnt, sizeof(*tx_ring->txds));
tx_ring->txds = dma_alloc_coherent(dp->dev, tx_ring->size,
&tx_ring->dma,
GFP_KERNEL | __GFP_NOWARN);
if (!tx_ring->txds) {
netdev_warn(dp->netdev, "failed to allocate TX descriptor ring memory, requested descriptor count: %d, consider lowering descriptor count\n",
tx_ring->cnt);
goto err_alloc;
}
tx_ring->txbufs = kvcalloc(tx_ring->cnt, sizeof(*tx_ring->txbufs),
GFP_KERNEL);
if (!tx_ring->txbufs)
goto err_alloc;
if (!tx_ring->is_xdp && dp->netdev)
netif_set_xps_queue(dp->netdev, &r_vec->affinity_mask,
tx_ring->idx);
return 0;
err_alloc:
nfp_nfd3_tx_ring_free(tx_ring);
return -ENOMEM;
}
void
nfp_nfd3_tx_ring_bufs_free(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring)
{
unsigned int i;
if (!tx_ring->is_xdp)
return;
for (i = 0; i < tx_ring->cnt; i++) {
if (!tx_ring->txbufs[i].frag)
return;
nfp_net_dma_unmap_rx(dp, tx_ring->txbufs[i].dma_addr);
__free_page(virt_to_page(tx_ring->txbufs[i].frag));
}
}
int
nfp_nfd3_tx_ring_bufs_alloc(struct nfp_net_dp *dp,
struct nfp_net_tx_ring *tx_ring)
{
struct nfp_nfd3_tx_buf *txbufs = tx_ring->txbufs;
unsigned int i;
if (!tx_ring->is_xdp)
return 0;
for (i = 0; i < tx_ring->cnt; i++) {
txbufs[i].frag = nfp_net_rx_alloc_one(dp, &txbufs[i].dma_addr);
if (!txbufs[i].frag) {
nfp_nfd3_tx_ring_bufs_free(dp, tx_ring);
return -ENOMEM;
}
}
return 0;
}
void
nfp_nfd3_print_tx_descs(struct seq_file *file,
struct nfp_net_r_vector *r_vec,
struct nfp_net_tx_ring *tx_ring,
u32 d_rd_p, u32 d_wr_p)
{
struct nfp_nfd3_tx_desc *txd;
u32 txd_cnt = tx_ring->cnt;
int i;
for (i = 0; i < txd_cnt; i++) {
struct xdp_buff *xdp;
struct sk_buff *skb;
txd = &tx_ring->txds[i];
seq_printf(file, "%04d: 0x%08x 0x%08x 0x%08x 0x%08x", i,
txd->vals[0], txd->vals[1],
txd->vals[2], txd->vals[3]);
if (!tx_ring->is_xdp) {
skb = READ_ONCE(tx_ring->txbufs[i].skb);
if (skb)
seq_printf(file, " skb->head=%p skb->data=%p",
skb->head, skb->data);
} else {
xdp = READ_ONCE(tx_ring->txbufs[i].xdp);
if (xdp)
seq_printf(file, " xdp->data=%p", xdp->data);
}
if (tx_ring->txbufs[i].dma_addr)
seq_printf(file, " dma_addr=%pad",
&tx_ring->txbufs[i].dma_addr);
if (i == tx_ring->rd_p % txd_cnt)
seq_puts(file, " H_RD");
if (i == tx_ring->wr_p % txd_cnt)
seq_puts(file, " H_WR");
if (i == d_rd_p % txd_cnt)
seq_puts(file, " D_RD");
if (i == d_wr_p % txd_cnt)
seq_puts(file, " D_WR");
seq_putc(file, '\n');
}
}
This diff is collapsed.
...@@ -104,6 +104,9 @@ struct nfp_net_r_vector; ...@@ -104,6 +104,9 @@ struct nfp_net_r_vector;
struct nfp_port; struct nfp_port;
struct xsk_buff_pool; struct xsk_buff_pool;
struct nfp_nfd3_tx_desc;
struct nfp_nfd3_tx_buf;
/* Convenience macro for wrapping descriptor index on ring size */ /* Convenience macro for wrapping descriptor index on ring size */
#define D_IDX(ring, idx) ((idx) & ((ring)->cnt - 1)) #define D_IDX(ring, idx) ((idx) & ((ring)->cnt - 1))
...@@ -117,83 +120,6 @@ struct xsk_buff_pool; ...@@ -117,83 +120,6 @@ struct xsk_buff_pool;
__d->dma_addr_hi = upper_32_bits(__addr) & 0xff; \ __d->dma_addr_hi = upper_32_bits(__addr) & 0xff; \
} while (0) } while (0)
/* TX descriptor format */
#define PCIE_DESC_TX_EOP BIT(7)
#define PCIE_DESC_TX_OFFSET_MASK GENMASK(6, 0)
#define PCIE_DESC_TX_MSS_MASK GENMASK(13, 0)
/* Flags in the host TX descriptor */
#define PCIE_DESC_TX_CSUM BIT(7)
#define PCIE_DESC_TX_IP4_CSUM BIT(6)
#define PCIE_DESC_TX_TCP_CSUM BIT(5)
#define PCIE_DESC_TX_UDP_CSUM BIT(4)
#define PCIE_DESC_TX_VLAN BIT(3)
#define PCIE_DESC_TX_LSO BIT(2)
#define PCIE_DESC_TX_ENCAP BIT(1)
#define PCIE_DESC_TX_O_IP4_CSUM BIT(0)
struct nfp_net_tx_desc {
union {
struct {
u8 dma_addr_hi; /* High bits of host buf address */
__le16 dma_len; /* Length to DMA for this desc */
u8 offset_eop; /* Offset in buf where pkt starts +
* highest bit is eop flag.
*/
__le32 dma_addr_lo; /* Low 32bit of host buf addr */
__le16 mss; /* MSS to be used for LSO */
u8 lso_hdrlen; /* LSO, TCP payload offset */
u8 flags; /* TX Flags, see @PCIE_DESC_TX_* */
union {
struct {
u8 l3_offset; /* L3 header offset */
u8 l4_offset; /* L4 header offset */
};
__le16 vlan; /* VLAN tag to add if indicated */
};
__le16 data_len; /* Length of frame + meta data */
} __packed;
__le32 vals[4];
__le64 vals8[2];
};
};
/**
* struct nfp_net_tx_buf - software TX buffer descriptor
* @skb: normal ring, sk_buff associated with this buffer
* @frag: XDP ring, page frag associated with this buffer
* @xdp: XSK buffer pool handle (for AF_XDP)
* @dma_addr: DMA mapping address of the buffer
* @fidx: Fragment index (-1 for the head and [0..nr_frags-1] for frags)
* @pkt_cnt: Number of packets to be produced out of the skb associated
* with this buffer (valid only on the head's buffer).
* Will be 1 for all non-TSO packets.
* @is_xsk_tx: Flag if buffer is a RX buffer after a XDP_TX action and not a
* buffer from the TX queue (for AF_XDP).
* @real_len: Number of bytes which to be produced out of the skb (valid only
* on the head's buffer). Equal to skb->len for non-TSO packets.
*/
struct nfp_net_tx_buf {
union {
struct sk_buff *skb;
void *frag;
struct xdp_buff *xdp;
};
dma_addr_t dma_addr;
union {
struct {
short int fidx;
u16 pkt_cnt;
};
struct {
bool is_xsk_tx;
};
};
u32 real_len;
};
/** /**
* struct nfp_net_tx_ring - TX ring structure * struct nfp_net_tx_ring - TX ring structure
* @r_vec: Back pointer to ring vector structure * @r_vec: Back pointer to ring vector structure
...@@ -226,8 +152,8 @@ struct nfp_net_tx_ring { ...@@ -226,8 +152,8 @@ struct nfp_net_tx_ring {
u32 wr_ptr_add; u32 wr_ptr_add;
struct nfp_net_tx_buf *txbufs; struct nfp_nfd3_tx_buf *txbufs;
struct nfp_net_tx_desc *txds; struct nfp_nfd3_tx_desc *txds;
dma_addr_t dma; dma_addr_t dma;
size_t size; size_t size;
...@@ -960,7 +886,6 @@ int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd); ...@@ -960,7 +886,6 @@ int nfp_net_mbox_reconfig_and_unlock(struct nfp_net *nn, u32 mbox_cmd);
void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 update); void nfp_net_mbox_reconfig_post(struct nfp_net *nn, u32 update);
int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn); int nfp_net_mbox_reconfig_wait_posted(struct nfp_net *nn);
void nfp_net_irq_unmask(struct nfp_net *nn, unsigned int entry_nr);
unsigned int unsigned int
nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries, nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries,
unsigned int min_irqs, unsigned int want_irqs); unsigned int min_irqs, unsigned int want_irqs);
...@@ -968,19 +893,10 @@ void nfp_net_irqs_disable(struct pci_dev *pdev); ...@@ -968,19 +893,10 @@ void nfp_net_irqs_disable(struct pci_dev *pdev);
void void
nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries, nfp_net_irqs_assign(struct nfp_net *nn, struct msix_entry *irq_entries,
unsigned int n); unsigned int n);
struct sk_buff *
void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring); nfp_net_tls_tx(struct nfp_net_dp *dp, struct nfp_net_r_vector *r_vec,
void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget); struct sk_buff *skb, u64 *tls_handle, int *nr_frags);
void nfp_net_tls_tx_undo(struct sk_buff *skb, u64 tls_handle);
bool
nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
void *data, void *pkt, unsigned int pkt_len, int meta_len);
void nfp_net_rx_csum(const struct nfp_net_dp *dp,
struct nfp_net_r_vector *r_vec,
const struct nfp_net_rx_desc *rxd,
const struct nfp_meta_parsed *meta,
struct sk_buff *skb);
struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn); struct nfp_net_dp *nfp_net_clone_dp(struct nfp_net *nn);
int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new, int nfp_net_ring_reconfig(struct nfp_net *nn, struct nfp_net_dp *new,
......
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* Copyright (C) 2015-2019 Netronome Systems, Inc. */
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include "nfp_net.h" #include "nfp_net.h"
#include "nfp_net_dp.h"
static struct dentry *nfp_dir; static struct dentry *nfp_dir;
...@@ -80,10 +81,8 @@ static int nfp_tx_q_show(struct seq_file *file, void *data) ...@@ -80,10 +81,8 @@ static int nfp_tx_q_show(struct seq_file *file, void *data)
{ {
struct nfp_net_r_vector *r_vec = file->private; struct nfp_net_r_vector *r_vec = file->private;
struct nfp_net_tx_ring *tx_ring; struct nfp_net_tx_ring *tx_ring;
struct nfp_net_tx_desc *txd;
int d_rd_p, d_wr_p, txd_cnt;
struct nfp_net *nn; struct nfp_net *nn;
int i; int d_rd_p, d_wr_p;
rtnl_lock(); rtnl_lock();
...@@ -97,8 +96,6 @@ static int nfp_tx_q_show(struct seq_file *file, void *data) ...@@ -97,8 +96,6 @@ static int nfp_tx_q_show(struct seq_file *file, void *data)
if (!nfp_net_running(nn)) if (!nfp_net_running(nn))
goto out; goto out;
txd_cnt = tx_ring->cnt;
d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q);
d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q); d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q);
...@@ -108,41 +105,8 @@ static int nfp_tx_q_show(struct seq_file *file, void *data) ...@@ -108,41 +105,8 @@ static int nfp_tx_q_show(struct seq_file *file, void *data)
tx_ring->cnt, &tx_ring->dma, tx_ring->txds, tx_ring->cnt, &tx_ring->dma, tx_ring->txds,
tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p); tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p);
for (i = 0; i < txd_cnt; i++) { nfp_net_debugfs_print_tx_descs(file, r_vec, tx_ring,
struct xdp_buff *xdp; d_rd_p, d_wr_p);
struct sk_buff *skb;
txd = &tx_ring->txds[i];
seq_printf(file, "%04d: 0x%08x 0x%08x 0x%08x 0x%08x", i,
txd->vals[0], txd->vals[1],
txd->vals[2], txd->vals[3]);
if (!tx_ring->is_xdp) {
skb = READ_ONCE(tx_ring->txbufs[i].skb);
if (skb)
seq_printf(file, " skb->head=%p skb->data=%p",
skb->head, skb->data);
} else {
xdp = READ_ONCE(tx_ring->txbufs[i].xdp);
if (xdp)
seq_printf(file, " xdp->data=%p", xdp->data);
}
if (tx_ring->txbufs[i].dma_addr)
seq_printf(file, " dma_addr=%pad",
&tx_ring->txbufs[i].dma_addr);
if (i == tx_ring->rd_p % txd_cnt)
seq_puts(file, " H_RD");
if (i == tx_ring->wr_p % txd_cnt)
seq_puts(file, " H_WR");
if (i == d_rd_p % txd_cnt)
seq_puts(file, " D_RD");
if (i == d_wr_p % txd_cnt)
seq_puts(file, " D_WR");
seq_putc(file, '\n');
}
out: out:
rtnl_unlock(); rtnl_unlock();
return 0; return 0;
......
This diff is collapsed.
This diff is collapsed.
...@@ -15,15 +15,27 @@ static inline bool nfp_net_has_xsk_pool_slow(struct nfp_net_dp *dp, ...@@ -15,15 +15,27 @@ static inline bool nfp_net_has_xsk_pool_slow(struct nfp_net_dp *dp,
return dp->xdp_prog && dp->xsk_pools[qid]; return dp->xdp_prog && dp->xsk_pools[qid];
} }
static inline int nfp_net_rx_space(struct nfp_net_rx_ring *rx_ring)
{
return rx_ring->cnt - rx_ring->wr_p + rx_ring->rd_p - 1;
}
static inline int nfp_net_tx_space(struct nfp_net_tx_ring *tx_ring)
{
return tx_ring->cnt - tx_ring->wr_p + tx_ring->rd_p - 1;
}
void nfp_net_xsk_rx_unstash(struct nfp_net_xsk_rx_buf *rxbuf);
void nfp_net_xsk_rx_free(struct nfp_net_xsk_rx_buf *rxbuf);
void nfp_net_xsk_rx_drop(struct nfp_net_r_vector *r_vec,
struct nfp_net_xsk_rx_buf *xrxbuf);
int nfp_net_xsk_setup_pool(struct net_device *netdev, struct xsk_buff_pool *pool, int nfp_net_xsk_setup_pool(struct net_device *netdev, struct xsk_buff_pool *pool,
u16 queue_id); u16 queue_id);
void nfp_net_xsk_tx_bufs_free(struct nfp_net_tx_ring *tx_ring);
void nfp_net_xsk_rx_bufs_free(struct nfp_net_rx_ring *rx_ring); void nfp_net_xsk_rx_bufs_free(struct nfp_net_rx_ring *rx_ring);
void nfp_net_xsk_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring); void nfp_net_xsk_rx_ring_fill_freelist(struct nfp_net_rx_ring *rx_ring);
int nfp_net_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags); int nfp_net_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
int nfp_net_xsk_poll(struct napi_struct *napi, int budget);
#endif /* _NFP_XSK_H_ */ #endif /* _NFP_XSK_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment