Commit 79c12a75 authored by Jakub Kicinski's avatar Jakub Kicinski Committed by David S. Miller

nfp: separate data path information from the reset of adapter structure

Move all data path information into a separate structure.  This way
we will be able to allocate new data path with all new rings etc.
and swap it in easily.

No functional changes.
Signed-off-by: default avatarJakub Kicinski <jakub.kicinski@netronome.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6dc6826f
......@@ -50,14 +50,14 @@
#include "nfp_net_ctrl.h"
#define nn_err(nn, fmt, args...) netdev_err((nn)->netdev, fmt, ## args)
#define nn_warn(nn, fmt, args...) netdev_warn((nn)->netdev, fmt, ## args)
#define nn_info(nn, fmt, args...) netdev_info((nn)->netdev, fmt, ## args)
#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->netdev, fmt, ## args)
#define nn_warn_ratelimit(nn, fmt, args...) \
#define nn_err(nn, fmt, args...) netdev_err((nn)->dp.netdev, fmt, ## args)
#define nn_warn(nn, fmt, args...) netdev_warn((nn)->dp.netdev, fmt, ## args)
#define nn_info(nn, fmt, args...) netdev_info((nn)->dp.netdev, fmt, ## args)
#define nn_dbg(nn, fmt, args...) netdev_dbg((nn)->dp.netdev, fmt, ## args)
#define nn_dp_warn(dp, fmt, args...) \
do { \
if (unlikely(net_ratelimit())) \
netdev_warn((nn)->netdev, fmt, ## args); \
netdev_warn((dp)->netdev, fmt, ## args); \
} while (0)
/* Max time to wait for NFP to respond on updates (in seconds) */
......@@ -434,18 +434,62 @@ struct nfp_stat_pair {
};
/**
* struct nfp_net - NFP network device structure
* struct nfp_net_dp - NFP network device datapath data structure
* @dev: Backpointer to struct device
* @netdev: Backpointer to net_device structure
* @is_vf: Is the driver attached to a VF?
* @netdev: Backpointer to net_device structure
* @is_vf: Is the driver attached to a VF?
* @bpf_offload_skip_sw: Offloaded BPF program will not be rerun by cls_bpf
* @bpf_offload_xdp: Offloaded BPF program is XDP
* @chained_metadata_format: Firemware will use new metadata format
* @ctrl: Local copy of the control register/word.
* @fl_bufsz: Currently configured size of the freelist buffers
* @ctrl: Local copy of the control register/word.
* @fl_bufsz: Currently configured size of the freelist buffers
* @rx_offset: Offset in the RX buffers where packet data starts
* @xdp_prog: Installed XDP program
* @fw_ver: Firmware version
* @tx_rings: Array of pre-allocated TX ring structures
* @rx_rings: Array of pre-allocated RX ring structures
*
* @txd_cnt: Size of the TX ring in number of descriptors
* @rxd_cnt: Size of the RX ring in number of descriptors
* @num_r_vecs: Number of used ring vectors
* @num_tx_rings: Currently configured number of TX rings
* @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
* @num_rx_rings: Currently configured number of RX rings
*/
struct nfp_net_dp {
struct device *dev;
struct net_device *netdev;
unsigned is_vf:1;
unsigned bpf_offload_skip_sw:1;
unsigned bpf_offload_xdp:1;
unsigned chained_metadata_format:1;
u32 ctrl;
u32 fl_bufsz;
u32 rx_offset;
struct bpf_prog *xdp_prog;
struct nfp_net_tx_ring *tx_rings;
struct nfp_net_rx_ring *rx_rings;
/* Cold data follows */
unsigned int txd_cnt;
unsigned int rxd_cnt;
unsigned int num_r_vecs;
unsigned int num_tx_rings;
unsigned int num_stack_tx_rings;
unsigned int num_rx_rings;
};
/**
* struct nfp_net - NFP network device structure
* @dp: Datapath structure
* @fw_ver: Firmware version
* @cap: Capabilities advertised by the Firmware
* @max_mtu: Maximum support MTU advertised by the Firmware
* @rss_hfunc: RSS selected hash function
......@@ -457,17 +501,9 @@ struct nfp_stat_pair {
* @rx_filter_change: Jiffies when statistics last changed
* @rx_filter_stats_timer: Timer for polling filter offload statistics
* @rx_filter_lock: Lock protecting timer state changes (teardown)
* @max_r_vecs: Number of allocated interrupt vectors for RX/TX
* @max_tx_rings: Maximum number of TX rings supported by the Firmware
* @max_rx_rings: Maximum number of RX rings supported by the Firmware
* @num_tx_rings: Currently configured number of TX rings
* @num_stack_tx_rings: Number of TX rings used by the stack (not XDP)
* @num_rx_rings: Currently configured number of RX rings
* @txd_cnt: Size of the TX ring in number of descriptors
* @rxd_cnt: Size of the RX ring in number of descriptors
* @tx_rings: Array of pre-allocated TX ring structures
* @rx_rings: Array of pre-allocated RX ring structures
* @max_r_vecs: Number of allocated interrupt vectors for RX/TX
* @num_r_vecs: Number of used ring vectors
* @r_vecs: Pre-allocated array of ring vectors
* @irq_entries: Pre-allocated array of MSI-X entries
* @lsc_handler: Handler for Link State Change interrupt
......@@ -502,25 +538,10 @@ struct nfp_stat_pair {
* @eth_port: Translated ETH Table port entry
*/
struct nfp_net {
struct device *dev;
struct net_device *netdev;
unsigned is_vf:1;
unsigned bpf_offload_skip_sw:1;
unsigned bpf_offload_xdp:1;
unsigned chained_metadata_format:1;
u32 ctrl;
u32 fl_bufsz;
u32 rx_offset;
struct bpf_prog *xdp_prog;
struct nfp_net_tx_ring *tx_rings;
struct nfp_net_rx_ring *rx_rings;
struct nfp_net_dp dp;
struct nfp_net_fw_version fw_ver;
u32 cap;
u32 max_mtu;
......@@ -537,18 +558,10 @@ struct nfp_net {
unsigned int max_tx_rings;
unsigned int max_rx_rings;
unsigned int num_tx_rings;
unsigned int num_stack_tx_rings;
unsigned int num_rx_rings;
int stride_tx;
int stride_rx;
int txd_cnt;
int rxd_cnt;
unsigned int max_r_vecs;
unsigned int num_r_vecs;
struct nfp_net_r_vector r_vecs[NFP_NET_MAX_R_VECS];
struct msix_entry irq_entries[NFP_NET_MAX_IRQS];
......
......@@ -54,7 +54,7 @@ static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data)
goto out;
nn = r_vec->nfp_net;
rx_ring = r_vec->rx_ring;
if (!netif_running(nn->netdev))
if (!netif_running(nn->dp.netdev))
goto out;
rxd_cnt = rx_ring->cnt;
......@@ -145,7 +145,7 @@ static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data)
if (!r_vec->nfp_net || !tx_ring)
goto out;
nn = r_vec->nfp_net;
if (!netif_running(nn->netdev))
if (!netif_running(nn->dp.netdev))
goto out;
txd_cnt = tx_ring->cnt;
......
......@@ -127,9 +127,9 @@ static const struct _nfp_net_et_stats nfp_net_et_stats[] = {
};
#define NN_ET_GLOBAL_STATS_LEN ARRAY_SIZE(nfp_net_et_stats)
#define NN_ET_RVEC_STATS_LEN (nn->num_r_vecs * 3)
#define NN_ET_RVEC_STATS_LEN (nn->dp.num_r_vecs * 3)
#define NN_ET_RVEC_GATHER_STATS 7
#define NN_ET_QUEUE_STATS_LEN ((nn->num_tx_rings + nn->num_rx_rings) * 2)
#define NN_ET_QUEUE_STATS_LEN ((nn->dp.num_tx_rings + nn->dp.num_rx_rings) * 2)
#define NN_ET_STATS_LEN (NN_ET_GLOBAL_STATS_LEN + NN_ET_RVEC_GATHER_STATS + \
NN_ET_RVEC_STATS_LEN + NN_ET_QUEUE_STATS_LEN)
......@@ -180,29 +180,29 @@ static void nfp_net_get_ringparam(struct net_device *netdev,
ring->rx_max_pending = NFP_NET_MAX_RX_DESCS;
ring->tx_max_pending = NFP_NET_MAX_TX_DESCS;
ring->rx_pending = nn->rxd_cnt;
ring->tx_pending = nn->txd_cnt;
ring->rx_pending = nn->dp.rxd_cnt;
ring->tx_pending = nn->dp.txd_cnt;
}
static int nfp_net_set_ring_size(struct nfp_net *nn, u32 rxd_cnt, u32 txd_cnt)
{
struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
struct nfp_net_ring_set rx = {
.n_rings = nn->num_rx_rings,
.mtu = nn->netdev->mtu,
.n_rings = nn->dp.num_rx_rings,
.mtu = nn->dp.netdev->mtu,
.dcnt = rxd_cnt,
};
struct nfp_net_ring_set tx = {
.n_rings = nn->num_tx_rings,
.n_rings = nn->dp.num_tx_rings,
.dcnt = txd_cnt,
};
if (nn->rxd_cnt != rxd_cnt)
if (nn->dp.rxd_cnt != rxd_cnt)
reconfig_rx = &rx;
if (nn->txd_cnt != txd_cnt)
if (nn->dp.txd_cnt != txd_cnt)
reconfig_tx = &tx;
return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
return nfp_net_ring_reconfig(nn, &nn->dp.xdp_prog,
reconfig_rx, reconfig_tx);
}
......@@ -224,11 +224,11 @@ static int nfp_net_set_ringparam(struct net_device *netdev,
txd_cnt < NFP_NET_MIN_TX_DESCS || txd_cnt > NFP_NET_MAX_TX_DESCS)
return -EINVAL;
if (nn->rxd_cnt == rxd_cnt && nn->txd_cnt == txd_cnt)
if (nn->dp.rxd_cnt == rxd_cnt && nn->dp.txd_cnt == txd_cnt)
return 0;
nn_dbg(nn, "Change ring size: RxQ %u->%u, TxQ %u->%u\n",
nn->rxd_cnt, rxd_cnt, nn->txd_cnt, txd_cnt);
nn->dp.rxd_cnt, rxd_cnt, nn->dp.txd_cnt, txd_cnt);
return nfp_net_set_ring_size(nn, rxd_cnt, txd_cnt);
}
......@@ -246,7 +246,7 @@ static void nfp_net_get_strings(struct net_device *netdev,
memcpy(p, nfp_net_et_stats[i].name, ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < nn->num_r_vecs; i++) {
for (i = 0; i < nn->dp.num_r_vecs; i++) {
sprintf(p, "rvec_%u_rx_pkts", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rvec_%u_tx_pkts", i);
......@@ -268,13 +268,13 @@ static void nfp_net_get_strings(struct net_device *netdev,
p += ETH_GSTRING_LEN;
strncpy(p, "tx_lso", ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
for (i = 0; i < nn->num_tx_rings; i++) {
for (i = 0; i < nn->dp.num_tx_rings; i++) {
sprintf(p, "txq_%u_pkts", i);
p += ETH_GSTRING_LEN;
sprintf(p, "txq_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
for (i = 0; i < nn->num_rx_rings; i++) {
for (i = 0; i < nn->dp.num_rx_rings; i++) {
sprintf(p, "rxq_%u_pkts", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rxq_%u_bytes", i);
......@@ -312,7 +312,7 @@ static void nfp_net_get_stats(struct net_device *netdev,
break;
}
}
for (j = 0; j < nn->num_r_vecs; j++) {
for (j = 0; j < nn->dp.num_r_vecs; j++) {
unsigned int start;
do {
......@@ -338,13 +338,13 @@ static void nfp_net_get_stats(struct net_device *netdev,
}
for (j = 0; j < NN_ET_RVEC_GATHER_STATS; j++)
data[i++] = gathered_stats[j];
for (j = 0; j < nn->num_tx_rings; j++) {
for (j = 0; j < nn->dp.num_tx_rings; j++) {
io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j);
data[i++] = readq(io_p);
io_p = nn->ctrl_bar + NFP_NET_CFG_TXR_STATS(j) + 8;
data[i++] = readq(io_p);
}
for (j = 0; j < nn->num_rx_rings; j++) {
for (j = 0; j < nn->dp.num_rx_rings; j++) {
io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j);
data[i++] = readq(io_p);
io_p = nn->ctrl_bar + NFP_NET_CFG_RXR_STATS(j) + 8;
......@@ -411,7 +411,7 @@ static int nfp_net_get_rxnfc(struct net_device *netdev,
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = nn->num_rx_rings;
cmd->data = nn->dp.num_rx_rings;
return 0;
case ETHTOOL_GRXFH:
return nfp_net_get_rss_hash_opts(nn, cmd);
......@@ -745,16 +745,16 @@ static void nfp_net_get_channels(struct net_device *netdev,
struct nfp_net *nn = netdev_priv(netdev);
unsigned int num_tx_rings;
num_tx_rings = nn->num_tx_rings;
if (nn->xdp_prog)
num_tx_rings -= nn->num_rx_rings;
num_tx_rings = nn->dp.num_tx_rings;
if (nn->dp.xdp_prog)
num_tx_rings -= nn->dp.num_rx_rings;
channel->max_rx = min(nn->max_rx_rings, nn->max_r_vecs);
channel->max_tx = min(nn->max_tx_rings, nn->max_r_vecs);
channel->max_combined = min(channel->max_rx, channel->max_tx);
channel->max_other = NFP_NET_NON_Q_VECTORS;
channel->combined_count = min(nn->num_rx_rings, num_tx_rings);
channel->rx_count = nn->num_rx_rings - channel->combined_count;
channel->combined_count = min(nn->dp.num_rx_rings, num_tx_rings);
channel->rx_count = nn->dp.num_rx_rings - channel->combined_count;
channel->tx_count = num_tx_rings - channel->combined_count;
channel->other_count = NFP_NET_NON_Q_VECTORS;
}
......@@ -765,25 +765,25 @@ static int nfp_net_set_num_rings(struct nfp_net *nn, unsigned int total_rx,
struct nfp_net_ring_set *reconfig_rx = NULL, *reconfig_tx = NULL;
struct nfp_net_ring_set rx = {
.n_rings = total_rx,
.mtu = nn->netdev->mtu,
.dcnt = nn->rxd_cnt,
.mtu = nn->dp.netdev->mtu,
.dcnt = nn->dp.rxd_cnt,
};
struct nfp_net_ring_set tx = {
.n_rings = total_tx,
.dcnt = nn->txd_cnt,
.dcnt = nn->dp.txd_cnt,
};
if (nn->num_rx_rings != total_rx)
if (nn->dp.num_rx_rings != total_rx)
reconfig_rx = &rx;
if (nn->num_stack_tx_rings != total_tx ||
(nn->xdp_prog && reconfig_rx))
if (nn->dp.num_stack_tx_rings != total_tx ||
(nn->dp.xdp_prog && reconfig_rx))
reconfig_tx = &tx;
/* nfp_net_check_config() will catch tx.n_rings > nn->max_tx_rings */
if (nn->xdp_prog)
if (nn->dp.xdp_prog)
tx.n_rings += total_rx;
return nfp_net_ring_reconfig(nn, &nn->xdp_prog,
return nfp_net_ring_reconfig(nn, &nn->dp.xdp_prog,
reconfig_rx, reconfig_tx);
}
......
......@@ -130,7 +130,7 @@ static u8 __iomem *nfp_net_map_area(struct nfp_cpp *cpp,
}
static void
nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp,
nfp_net_get_mac_addr_hwinfo(struct nfp_net_dp *dp, struct nfp_cpp *cpp,
unsigned int id)
{
u8 mac_addr[ETH_ALEN];
......@@ -141,22 +141,22 @@ nfp_net_get_mac_addr_hwinfo(struct nfp_net *nn, struct nfp_cpp *cpp,
mac_str = nfp_hwinfo_lookup(cpp, name);
if (!mac_str) {
dev_warn(nn->dev, "Can't lookup MAC address. Generate\n");
eth_hw_addr_random(nn->netdev);
dev_warn(dp->dev, "Can't lookup MAC address. Generate\n");
eth_hw_addr_random(dp->netdev);
return;
}
if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
&mac_addr[0], &mac_addr[1], &mac_addr[2],
&mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
dev_warn(nn->dev,
dev_warn(dp->dev,
"Can't parse MAC address (%s). Generate.\n", mac_str);
eth_hw_addr_random(nn->netdev);
eth_hw_addr_random(dp->netdev);
return;
}
ether_addr_copy(nn->netdev->dev_addr, mac_addr);
ether_addr_copy(nn->netdev->perm_addr, mac_addr);
ether_addr_copy(dp->netdev->dev_addr, mac_addr);
ether_addr_copy(dp->netdev->perm_addr, mac_addr);
}
/**
......@@ -179,12 +179,12 @@ nfp_net_get_mac_addr(struct nfp_net *nn, struct nfp_pf *pf, unsigned int id)
nn->eth_port = &pf->eth_tbl->ports[i];
ether_addr_copy(nn->netdev->dev_addr, mac_addr);
ether_addr_copy(nn->netdev->perm_addr, mac_addr);
ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
return;
}
nfp_net_get_mac_addr_hwinfo(nn, pf->cpp, id);
nfp_net_get_mac_addr_hwinfo(&nn->dp, pf->cpp, id);
}
static unsigned int nfp_net_pf_get_num_ports(struct nfp_pf *pf)
......@@ -309,7 +309,7 @@ nfp_net_pf_alloc_port_netdev(struct nfp_pf *pf, void __iomem *ctrl_bar,
nn->ctrl_bar = ctrl_bar;
nn->tx_bar = tx_bar;
nn->rx_bar = rx_bar;
nn->is_vf = 0;
nn->dp.is_vf = 0;
nn->stride_rx = stride;
nn->stride_tx = stride;
......@@ -331,7 +331,7 @@ nfp_net_pf_init_port_netdev(struct nfp_pf *pf, struct nfp_net *nn,
*/
nn->me_freq_mhz = 1200;
err = nfp_net_netdev_init(nn->netdev);
err = nfp_net_netdev_init(nn->dp.netdev);
if (err)
return err;
......@@ -400,7 +400,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
/* Get MSI-X vectors */
wanted_irqs = 0;
list_for_each_entry(nn, &pf->ports, port_list)
wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->num_r_vecs;
wanted_irqs += NFP_NET_NON_Q_VECTORS + nn->dp.num_r_vecs;
pf->irq_entries = kcalloc(wanted_irqs, sizeof(*pf->irq_entries),
GFP_KERNEL);
if (!pf->irq_entries) {
......@@ -445,7 +445,7 @@ nfp_net_pf_spawn_netdevs(struct nfp_pf *pf,
err_prev_deinit:
list_for_each_entry_continue_reverse(nn, &pf->ports, port_list) {
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_netdev_clean(nn->netdev);
nfp_net_netdev_clean(nn->dp.netdev);
}
nfp_net_irqs_disable(pf->pdev);
err_vec_free:
......@@ -571,7 +571,7 @@ void nfp_net_pci_remove(struct nfp_pf *pf)
list_for_each_entry(nn, &pf->ports, port_list) {
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_netdev_clean(nn->netdev);
nfp_net_netdev_clean(nn->dp.netdev);
}
nfp_net_pf_free_netdevs(pf);
......
......@@ -58,7 +58,7 @@ void nfp_net_filter_stats_timer(unsigned long data)
spin_lock_bh(&nn->rx_filter_lock);
if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
mod_timer(&nn->rx_filter_stats_timer,
jiffies + NFP_NET_STAT_POLL_IVL);
......@@ -132,7 +132,7 @@ nfp_net_bpf_get_act(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
return NN_ACT_TC_DROP;
if (is_tcf_mirred_egress_redirect(a) &&
tcf_mirred_ifindex(a) == nn->netdev->ifindex)
tcf_mirred_ifindex(a) == nn->dp.netdev->ifindex)
return NN_ACT_TC_REDIR;
}
......@@ -160,7 +160,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
act = ret;
max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32;
if (max_mtu < nn->netdev->mtu) {
if (max_mtu < nn->dp.netdev->mtu) {
nn_info(nn, "BPF offload not supported with MTU larger than HW packet split boundary\n");
return -ENOTSUPP;
}
......@@ -168,7 +168,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
done_off = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
*code = dma_zalloc_coherent(nn->dev, code_sz, dma_addr, GFP_KERNEL);
*code = dma_zalloc_coherent(nn->dp.dev, code_sz, dma_addr, GFP_KERNEL);
if (!*code)
return -ENOMEM;
......@@ -180,7 +180,7 @@ nfp_net_bpf_offload_prepare(struct nfp_net *nn,
return 0;
out:
dma_free_coherent(nn->dev, code_sz, *code, *dma_addr);
dma_free_coherent(nn->dp.dev, code_sz, *code, *dma_addr);
return ret;
}
......@@ -193,7 +193,7 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
u64 bpf_addr = dma_addr;
int err;
nn->bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
nn->dp.bpf_offload_skip_sw = !!(tc_flags & TCA_CLS_FLAGS_SKIP_SW);
if (dense_mode)
bpf_addr |= NFP_NET_CFG_BPF_CFG_8CTX;
......@@ -207,13 +207,13 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
nn_err(nn, "FW command error while loading BPF: %d\n", err);
/* Enable passing packets through BPF function */
nn->ctrl |= NFP_NET_CFG_CTRL_BPF;
nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
nn->dp.ctrl |= NFP_NET_CFG_CTRL_BPF;
nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
err = nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
if (err)
nn_err(nn, "FW command error while enabling BPF: %d\n", err);
dma_free_coherent(nn->dev, code_sz, code, dma_addr);
dma_free_coherent(nn->dp.dev, code_sz, code, dma_addr);
nfp_net_bpf_stats_reset(nn);
mod_timer(&nn->rx_filter_stats_timer, jiffies + NFP_NET_STAT_POLL_IVL);
......@@ -221,16 +221,16 @@ nfp_net_bpf_load_and_start(struct nfp_net *nn, u32 tc_flags,
static int nfp_net_bpf_stop(struct nfp_net *nn)
{
if (!(nn->ctrl & NFP_NET_CFG_CTRL_BPF))
if (!(nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF))
return 0;
spin_lock_bh(&nn->rx_filter_lock);
nn->ctrl &= ~NFP_NET_CFG_CTRL_BPF;
nn->dp.ctrl &= ~NFP_NET_CFG_CTRL_BPF;
spin_unlock_bh(&nn->rx_filter_lock);
nn_writel(nn, NFP_NET_CFG_CTRL, nn->ctrl);
nn_writel(nn, NFP_NET_CFG_CTRL, nn->dp.ctrl);
del_timer_sync(&nn->rx_filter_stats_timer);
nn->bpf_offload_skip_sw = 0;
nn->dp.bpf_offload_skip_sw = 0;
return nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_GEN);
}
......@@ -254,7 +254,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
* frames which didn't have BPF applied in the hardware should
* be fine if software fallback is available, though.
*/
if (nn->bpf_offload_skip_sw)
if (nn->dp.bpf_offload_skip_sw)
return -EBUSY;
err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
......@@ -269,7 +269,7 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct tc_cls_bpf_offload *cls_bpf)
return 0;
case TC_CLSBPF_ADD:
if (nn->ctrl & NFP_NET_CFG_CTRL_BPF)
if (nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
return -EBUSY;
err = nfp_net_bpf_offload_prepare(nn, cls_bpf, &res, &code,
......
......@@ -84,12 +84,12 @@ static void nfp_netvf_get_mac_addr(struct nfp_net *nn)
put_unaligned_be16(nn_readw(nn, NFP_NET_CFG_MACADDR + 6), &mac_addr[4]);
if (!is_valid_ether_addr(mac_addr)) {
eth_hw_addr_random(nn->netdev);
eth_hw_addr_random(nn->dp.netdev);
return;
}
ether_addr_copy(nn->netdev->dev_addr, mac_addr);
ether_addr_copy(nn->netdev->perm_addr, mac_addr);
ether_addr_copy(nn->dp.netdev->dev_addr, mac_addr);
ether_addr_copy(nn->dp.netdev->perm_addr, mac_addr);
}
static int nfp_netvf_pci_probe(struct pci_dev *pdev,
......@@ -211,7 +211,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
nn->fw_ver = fw_ver;
nn->ctrl_bar = ctrl_bar;
nn->is_vf = 1;
nn->dp.is_vf = 1;
nn->stride_tx = stride;
nn->stride_rx = stride;
......@@ -268,7 +268,8 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
num_irqs = nfp_net_irqs_alloc(pdev, vf->irq_entries,
NFP_NET_MIN_PORT_IRQS,
NFP_NET_NON_Q_VECTORS + nn->num_r_vecs);
NFP_NET_NON_Q_VECTORS +
nn->dp.num_r_vecs);
if (!num_irqs) {
nn_warn(nn, "Unable to allocate MSI-X Vectors. Exiting\n");
err = -EIO;
......@@ -282,7 +283,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
*/
nn->me_freq_mhz = 1200;
err = nfp_net_netdev_init(nn->netdev);
err = nfp_net_netdev_init(nn->dp.netdev);
if (err)
goto err_irqs_disable;
......@@ -327,7 +328,7 @@ static void nfp_netvf_pci_remove(struct pci_dev *pdev)
nfp_net_debugfs_dir_clean(&nn->debugfs_dir);
nfp_net_debugfs_dir_clean(&vf->ddir);
nfp_net_netdev_clean(nn->netdev);
nfp_net_netdev_clean(nn->dp.netdev);
nfp_net_irqs_disable(pdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment