Commit 74811a71 authored by David S. Miller's avatar David S. Miller

Merge branch 'nfp-add-flower-app-with-representors'

Simon Horman says:

====================
nfp: add flower app with representors

this series adds a flower app to the NFP driver.
It initialises four types of netdevs:

* PF netdev - lower-device for communication of packets to device
* PF representor netdev
* VF representor netdevs
* Phys port representor netdevs

The PF netdev acts as a lower-device which sends and receives packets to
and from the firmware. The representors act as upper-devices. For TX
representors attach a metadata dst to the skb which is used by the PF
netdev to prepend metadata to the packet before forwarding the firmware. On
RX the PF netdev looks up the representor based on the prepended metadata
received from the firmware and forwards the skb to the representor after
removing the metadata.

Control queues are used to send and receive control messages which are
used to communicate configuration information with the firmware. These
are in separate vNIC to the queues belonging to the PF netdev. The control
queues are not exposed to use-space via a netdev or any other means.

The first 9 patches of this series provide app-independent infrastructure
to instantiate representors and the remaining 3 patches provide an app
which uses this infrastructure.

As the name implies this app is targeted at providing offload of TC flower.
Flower offload - allowing classifiers to be attached to representor netdevs
- is intended to be provided by follow-up patches at which point it will
become the dominant feature of the app.

Minor changes since v2 noted in changelogs of individual patches.
Review of v1 and v2 of this patchset have been addressed either
through discussion on-list or changes in this patchset.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents cf3db45d 24a021ed
......@@ -22,10 +22,13 @@ nfp-objs := \
nfp_net_common.o \
nfp_net_ethtool.o \
nfp_net_main.o \
nfp_net_repr.o \
nfp_netvf_main.o \
nfp_port.o \
bpf/main.o \
bpf/offload.o \
flower/cmsg.o \
flower/main.o \
nic/main.o
ifeq ($(CONFIG_BPF_SYSCALL),y)
......
/*
* Copyright (C) 2015-2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/bitfield.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/dst_metadata.h>
#include "../nfpcore/nfp_cpp.h"
#include "../nfp_net_repr.h"
#include "./cmsg.h"
#define nfp_flower_cmsg_warn(app, fmt, args...) \
do { \
if (net_ratelimit()) \
nfp_warn((app)->cpp, fmt, ## args); \
} while (0)
static struct nfp_flower_cmsg_hdr *
nfp_flower_cmsg_get_hdr(struct sk_buff *skb)
{
return (struct nfp_flower_cmsg_hdr *)skb->data;
}
static void *nfp_flower_cmsg_get_data(struct sk_buff *skb)
{
return (unsigned char *)skb->data + NFP_FLOWER_CMSG_HLEN;
}
static struct sk_buff *
nfp_flower_cmsg_alloc(struct nfp_app *app, unsigned int size,
enum nfp_flower_cmsg_type_port type)
{
struct nfp_flower_cmsg_hdr *ch;
struct sk_buff *skb;
size += NFP_FLOWER_CMSG_HLEN;
skb = nfp_app_ctrl_msg_alloc(app, size, GFP_KERNEL);
if (!skb)
return NULL;
ch = nfp_flower_cmsg_get_hdr(skb);
ch->pad = 0;
ch->version = NFP_FLOWER_CMSG_VER1;
ch->type = type;
skb_put(skb, size);
return skb;
}
int nfp_flower_cmsg_portmod(struct net_device *netdev, bool carrier_ok)
{
struct nfp_repr *repr = netdev_priv(netdev);
struct nfp_flower_cmsg_portmod *msg;
struct sk_buff *skb;
skb = nfp_flower_cmsg_alloc(repr->app, sizeof(*msg),
NFP_FLOWER_CMSG_TYPE_PORT_MOD);
if (!skb)
return -ENOMEM;
msg = nfp_flower_cmsg_get_data(skb);
msg->portnum = cpu_to_be32(repr->dst->u.port_info.port_id);
msg->reserved = 0;
msg->info = carrier_ok;
msg->mtu = cpu_to_be16(netdev->mtu);
nfp_ctrl_tx(repr->app->ctrl, skb);
return 0;
}
static void
nfp_flower_cmsg_portmod_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_flower_cmsg_portmod *msg;
struct net_device *netdev;
bool link;
msg = nfp_flower_cmsg_get_data(skb);
link = msg->info & NFP_FLOWER_CMSG_PORTMOD_INFO_LINK;
rcu_read_lock();
netdev = nfp_app_repr_get(app, be32_to_cpu(msg->portnum));
if (!netdev) {
nfp_flower_cmsg_warn(app, "ctrl msg for unknown port 0x%08x\n",
be32_to_cpu(msg->portnum));
rcu_read_unlock();
return;
}
if (link) {
netif_carrier_on(netdev);
rtnl_lock();
dev_set_mtu(netdev, be16_to_cpu(msg->mtu));
rtnl_unlock();
} else {
netif_carrier_off(netdev);
}
rcu_read_unlock();
}
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb)
{
struct nfp_flower_cmsg_hdr *cmsg_hdr;
enum nfp_flower_cmsg_type_port type;
cmsg_hdr = nfp_flower_cmsg_get_hdr(skb);
if (unlikely(cmsg_hdr->version != NFP_FLOWER_CMSG_VER1)) {
nfp_flower_cmsg_warn(app, "Cannot handle repr control version %u\n",
cmsg_hdr->version);
goto out;
}
type = cmsg_hdr->type;
switch (type) {
case NFP_FLOWER_CMSG_TYPE_PORT_MOD:
nfp_flower_cmsg_portmod_rx(app, skb);
break;
default:
nfp_flower_cmsg_warn(app, "Cannot handle invalid repr control type %u\n",
type);
}
out:
dev_kfree_skb_any(skb);
}
/*
* Copyright (C) 2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef NFP_FLOWER_CMSG_H
#define NFP_FLOWER_CMSG_H
#include <linux/bitfield.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include "../nfp_app.h"
/* The base header for a control message packet.
* Defines an 8-bit version, and an 8-bit type, padded
* to a 32-bit word. Rest of the packet is type-specific.
*/
struct nfp_flower_cmsg_hdr {
__be16 pad;
u8 type;
u8 version;
};
#define NFP_FLOWER_CMSG_HLEN sizeof(struct nfp_flower_cmsg_hdr)
#define NFP_FLOWER_CMSG_VER1 1
/* Types defined for port related control messages */
enum nfp_flower_cmsg_type_port {
NFP_FLOWER_CMSG_TYPE_PORT_MOD = 8,
NFP_FLOWER_CMSG_TYPE_PORT_ECHO = 16,
NFP_FLOWER_CMSG_TYPE_MAX = 32,
};
/* NFP_FLOWER_CMSG_TYPE_PORT_MOD */
struct nfp_flower_cmsg_portmod {
__be32 portnum;
u8 reserved;
u8 info;
__be16 mtu;
};
#define NFP_FLOWER_CMSG_PORTMOD_INFO_LINK BIT(0)
enum nfp_flower_cmsg_port_type {
NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC = 0x0,
NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT = 0x1,
NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT = 0x2,
};
enum nfp_flower_cmsg_port_vnic_type {
NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF = 0x0,
NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF = 0x1,
NFP_FLOWER_CMSG_PORT_VNIC_TYPE_CTRL = 0x2,
};
#define NFP_FLOWER_CMSG_PORT_TYPE GENMASK(31, 28)
#define NFP_FLOWER_CMSG_PORT_SYS_ID GENMASK(27, 24)
#define NFP_FLOWER_CMSG_PORT_NFP_ID GENMASK(23, 22)
#define NFP_FLOWER_CMSG_PORT_PCI GENMASK(15, 14)
#define NFP_FLOWER_CMSG_PORT_VNIC_TYPE GENMASK(13, 12)
#define NFP_FLOWER_CMSG_PORT_VNIC GENMASK(11, 6)
#define NFP_FLOWER_CMSG_PORT_PCIE_Q GENMASK(5, 0)
#define NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM GENMASK(7, 0)
static inline u32 nfp_flower_cmsg_phys_port(u8 phys_port)
{
return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, phys_port) |
FIELD_PREP(NFP_FLOWER_CMSG_PORT_TYPE,
NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT);
}
static inline u32
nfp_flower_cmsg_pcie_port(u8 nfp_pcie, enum nfp_flower_cmsg_port_vnic_type type,
u8 vnic, u8 q)
{
return FIELD_PREP(NFP_FLOWER_CMSG_PORT_PCI, nfp_pcie) |
FIELD_PREP(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, type) |
FIELD_PREP(NFP_FLOWER_CMSG_PORT_VNIC, vnic) |
FIELD_PREP(NFP_FLOWER_CMSG_PORT_PCIE_Q, q) |
FIELD_PREP(NFP_FLOWER_CMSG_PORT_TYPE,
NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT);
}
int nfp_flower_cmsg_portmod(struct net_device *netdev, bool carrier_ok);
void nfp_flower_cmsg_rx(struct nfp_app *app, struct sk_buff *skb);
#endif
/*
* Copyright (C) 2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/skbuff.h>
#include <net/devlink.h>
#include <net/dst_metadata.h>
#include "../nfpcore/nfp_cpp.h"
#include "../nfpcore/nfp_nsp.h"
#include "../nfp_app.h"
#include "../nfp_main.h"
#include "../nfp_net.h"
#include "../nfp_net_repr.h"
#include "../nfp_port.h"
#include "./cmsg.h"
/**
* struct nfp_flower_priv - Flower APP per-vNIC priv data
* @nn: Pointer to vNIC
*/
struct nfp_flower_priv {
struct nfp_net *nn;
};
static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
{
return "FLOWER";
}
static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
{
return DEVLINK_ESWITCH_MODE_SWITCHDEV;
}
static enum nfp_repr_type
nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
{
switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id)) {
case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT:
*port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM,
port_id);
return NFP_REPR_TYPE_PHYS_PORT;
case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT:
*port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port_id);
if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, port_id) ==
NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF)
return NFP_REPR_TYPE_PF;
else
return NFP_REPR_TYPE_VF;
}
return NFP_FLOWER_CMSG_PORT_TYPE_UNSPEC;
}
static struct net_device *
nfp_flower_repr_get(struct nfp_app *app, u32 port_id)
{
enum nfp_repr_type repr_type;
struct nfp_reprs *reprs;
u8 port = 0;
repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
reprs = rcu_dereference(app->reprs[repr_type]);
if (!reprs)
return NULL;
if (port >= reprs->num_reprs)
return NULL;
return reprs->reprs[port];
}
static void
nfp_flower_repr_netdev_get_stats64(struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct nfp_repr *repr = netdev_priv(netdev);
enum nfp_repr_type type;
u32 port_id;
u8 port = 0;
port_id = repr->dst->u.port_info.port_id;
type = nfp_flower_repr_get_type_and_port(repr->app, port_id, &port);
nfp_repr_get_stats64(repr->app, type, port, stats);
}
static int nfp_flower_repr_netdev_open(struct net_device *netdev)
{
int err;
err = nfp_flower_cmsg_portmod(netdev, true);
if (err)
return err;
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
return 0;
}
static int nfp_flower_repr_netdev_stop(struct net_device *netdev)
{
netif_carrier_off(netdev);
netif_tx_disable(netdev);
return nfp_flower_cmsg_portmod(netdev, false);
}
static const struct net_device_ops nfp_flower_repr_netdev_ops = {
.ndo_open = nfp_flower_repr_netdev_open,
.ndo_stop = nfp_flower_repr_netdev_stop,
.ndo_start_xmit = nfp_repr_xmit,
.ndo_get_stats64 = nfp_flower_repr_netdev_get_stats64,
.ndo_has_offload_stats = nfp_repr_has_offload_stats,
.ndo_get_offload_stats = nfp_repr_get_offload_stats,
};
static void nfp_flower_sriov_disable(struct nfp_app *app)
{
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
}
static int
nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
enum nfp_flower_cmsg_port_vnic_type vnic_type,
enum nfp_repr_type repr_type, unsigned int cnt)
{
u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
struct nfp_flower_priv *priv = app->priv;
struct nfp_reprs *reprs, *old_reprs;
const u8 queue = 0;
int i, err;
reprs = nfp_reprs_alloc(cnt);
if (!reprs)
return -ENOMEM;
for (i = 0; i < cnt; i++) {
u32 port_id;
reprs->reprs[i] = nfp_repr_alloc(app);
if (!reprs->reprs[i]) {
err = -ENOMEM;
goto err_reprs_clean;
}
eth_hw_addr_random(reprs->reprs[i]);
port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
i, queue);
err = nfp_repr_init(app, reprs->reprs[i],
&nfp_flower_repr_netdev_ops,
port_id, NULL, priv->nn->dp.netdev);
if (err)
goto err_reprs_clean;
nfp_info(app->cpp, "%s%d Representor(%s) created\n",
repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
reprs->reprs[i]->name);
}
old_reprs = nfp_app_reprs_set(app, repr_type, reprs);
if (IS_ERR(old_reprs)) {
err = PTR_ERR(old_reprs);
goto err_reprs_clean;
}
return 0;
err_reprs_clean:
nfp_reprs_clean_and_free(reprs);
return err;
}
static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs)
{
return nfp_flower_spawn_vnic_reprs(app,
NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
NFP_REPR_TYPE_VF, num_vfs);
}
static void nfp_flower_stop(struct nfp_app *app)
{
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
}
static int
nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
{
struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
struct nfp_reprs *reprs, *old_reprs;
unsigned int i;
int err;
reprs = nfp_reprs_alloc(eth_tbl->max_index + 1);
if (!reprs)
return -ENOMEM;
for (i = 0; i < eth_tbl->count; i++) {
int phys_port = eth_tbl->ports[i].index;
struct nfp_port *port;
u32 cmsg_port_id;
reprs->reprs[phys_port] = nfp_repr_alloc(app);
if (!reprs->reprs[phys_port]) {
err = -ENOMEM;
goto err_reprs_clean;
}
port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT,
reprs->reprs[phys_port]);
if (IS_ERR(port)) {
err = PTR_ERR(port);
goto err_reprs_clean;
}
err = nfp_port_init_phy_port(app->pf, app, port, i);
if (err) {
nfp_port_free(port);
goto err_reprs_clean;
}
SET_NETDEV_DEV(reprs->reprs[phys_port], &priv->nn->pdev->dev);
nfp_net_get_mac_addr(app->pf, port,
eth_tbl->ports[i].eth_index);
cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
err = nfp_repr_init(app, reprs->reprs[phys_port],
&nfp_flower_repr_netdev_ops,
cmsg_port_id, port, priv->nn->dp.netdev);
if (err) {
nfp_port_free(port);
goto err_reprs_clean;
}
nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
phys_port, reprs->reprs[phys_port]->name);
}
old_reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
if (IS_ERR(old_reprs)) {
err = PTR_ERR(old_reprs);
goto err_reprs_clean;
}
return 0;
err_reprs_clean:
nfp_reprs_clean_and_free(reprs);
return err;
}
static int nfp_flower_start(struct nfp_app *app)
{
int err;
err = nfp_flower_spawn_phy_reprs(app, app->priv);
if (err)
return err;
return nfp_flower_spawn_vnic_reprs(app,
NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF,
NFP_REPR_TYPE_PF, 1);
}
static void nfp_flower_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
{
kfree(app->priv);
app->priv = NULL;
}
static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn,
unsigned int id)
{
struct nfp_flower_priv *priv;
if (id > 0) {
nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n");
goto err_invalid_port;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
app->priv = priv;
priv->nn = nn;
eth_hw_addr_random(nn->dp.netdev);
netif_keep_dst(nn->dp.netdev);
return 0;
err_invalid_port:
nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev);
return PTR_ERR_OR_ZERO(nn->port);
}
static int nfp_flower_init(struct nfp_app *app)
{
const struct nfp_pf *pf = app->pf;
if (!pf->eth_tbl) {
nfp_warn(app->cpp, "FlowerNIC requires eth table\n");
return -EINVAL;
}
if (!pf->mac_stats_bar) {
nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n");
return -EINVAL;
}
if (!pf->vf_cfg_bar) {
nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n");
return -EINVAL;
}
return 0;
}
const struct nfp_app_type app_flower = {
.id = NFP_APP_FLOWER_NIC,
.name = "flower",
.ctrl_has_meta = true,
.extra_cap = nfp_flower_extra_cap,
.init = nfp_flower_init,
.vnic_init = nfp_flower_vnic_init,
.vnic_clean = nfp_flower_vnic_clean,
.start = nfp_flower_start,
.stop = nfp_flower_stop,
.ctrl_msg_rx = nfp_flower_cmsg_rx,
.sriov_enable = nfp_flower_sriov_enable,
.sriov_disable = nfp_flower_sriov_disable,
.eswitch_mode_get = eswitch_mode_get,
.repr_get = nfp_flower_repr_get,
};
......@@ -38,10 +38,12 @@
#include "nfpcore/nfp_nffw.h"
#include "nfp_app.h"
#include "nfp_main.h"
#include "nfp_net_repr.h"
static const struct nfp_app_type *apps[] = {
&app_nic,
&app_bpf,
&app_flower,
};
const char *nfp_app_mip_name(struct nfp_app *app)
......@@ -51,14 +53,15 @@ const char *nfp_app_mip_name(struct nfp_app *app)
return nfp_mip_name(app->pf->mip);
}
struct sk_buff *nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size)
struct sk_buff *
nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority)
{
struct sk_buff *skb;
if (nfp_app_ctrl_has_meta(app))
size += 8;
skb = alloc_skb(size, GFP_ATOMIC);
skb = alloc_skb(size, priority);
if (!skb)
return NULL;
......@@ -68,6 +71,25 @@ struct sk_buff *nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size)
return skb;
}
struct nfp_reprs *
nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
struct nfp_reprs *reprs)
{
struct nfp_reprs *old;
old = rcu_dereference_protected(app->reprs[type],
lockdep_is_held(&app->pf->lock));
if (reprs && old) {
old = ERR_PTR(-EBUSY);
goto exit_unlock;
}
rcu_assign_pointer(app->reprs[type], reprs);
exit_unlock:
return old;
}
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id)
{
struct nfp_app *app;
......
......@@ -34,6 +34,10 @@
#ifndef _NFP_APP_H
#define _NFP_APP_H 1
#include <net/devlink.h>
#include "nfp_net_repr.h"
struct bpf_prog;
struct net_device;
struct pci_dev;
......@@ -48,10 +52,12 @@ struct nfp_net;
enum nfp_app_id {
NFP_APP_CORE_NIC = 0x1,
NFP_APP_BPF_NIC = 0x2,
NFP_APP_FLOWER_NIC = 0x3,
};
extern const struct nfp_app_type app_nic;
extern const struct nfp_app_type app_bpf;
extern const struct nfp_app_type app_flower;
/**
* struct nfp_app_type - application definition
......@@ -70,6 +76,10 @@ extern const struct nfp_app_type app_bpf;
* @setup_tc: setup TC ndo
* @tc_busy: TC HW offload busy (rules loaded)
* @xdp_offload: offload an XDP program
* @eswitch_mode_get: get SR-IOV eswitch mode
* @sriov_enable: app-specific sriov initialisation
* @sriov_disable: app-specific sriov clean-up
* @repr_get: get representor netdev
*/
struct nfp_app_type {
enum nfp_app_id id;
......@@ -95,6 +105,12 @@ struct nfp_app_type {
bool (*tc_busy)(struct nfp_app *app, struct nfp_net *nn);
int (*xdp_offload)(struct nfp_app *app, struct nfp_net *nn,
struct bpf_prog *prog);
int (*sriov_enable)(struct nfp_app *app, int num_vfs);
void (*sriov_disable)(struct nfp_app *app);
enum devlink_eswitch_mode (*eswitch_mode_get)(struct nfp_app *app);
struct net_device *(*repr_get)(struct nfp_app *app, u32 id);
};
/**
......@@ -103,7 +119,9 @@ struct nfp_app_type {
* @pf: backpointer to NFP PF structure
* @cpp: pointer to the CPP handle
* @ctrl: pointer to ctrl vNIC struct
* @reprs: array of pointers to representors
* @type: pointer to const application ops and info
* @priv: app-specific priv data
*/
struct nfp_app {
struct pci_dev *pdev;
......@@ -111,8 +129,10 @@ struct nfp_app {
struct nfp_cpp *cpp;
struct nfp_net *ctrl;
struct nfp_reprs __rcu *reprs[NFP_REPR_TYPE_MAX + 1];
const struct nfp_app_type *type;
void *priv;
};
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
......@@ -216,8 +236,44 @@ static inline void nfp_app_ctrl_rx(struct nfp_app *app, struct sk_buff *skb)
app->type->ctrl_msg_rx(app, skb);
}
static inline int nfp_app_eswitch_mode_get(struct nfp_app *app, u16 *mode)
{
if (!app->type->eswitch_mode_get)
return -EOPNOTSUPP;
*mode = app->type->eswitch_mode_get(app);
return 0;
}
static inline int nfp_app_sriov_enable(struct nfp_app *app, int num_vfs)
{
if (!app || !app->type->sriov_enable)
return -EOPNOTSUPP;
return app->type->sriov_enable(app, num_vfs);
}
static inline void nfp_app_sriov_disable(struct nfp_app *app)
{
if (app && app->type->sriov_disable)
app->type->sriov_disable(app);
}
static inline struct net_device *nfp_app_repr_get(struct nfp_app *app, u32 id)
{
if (unlikely(!app || !app->type->repr_get))
return NULL;
return app->type->repr_get(app, id);
}
struct nfp_reprs *
nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type,
struct nfp_reprs *reprs);
const char *nfp_app_mip_name(struct nfp_app *app);
struct sk_buff *nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size);
struct sk_buff *
nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority);
struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id);
void nfp_app_free(struct nfp_app *app);
......
......@@ -42,6 +42,8 @@ static int
nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
struct nfp_net *nn, unsigned int id)
{
int err;
if (!pf->eth_tbl)
return 0;
......@@ -49,26 +51,13 @@ nfp_app_nic_vnic_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
if (IS_ERR(nn->port))
return PTR_ERR(nn->port);
nn->port->eth_id = id;
nn->port->eth_port = nfp_net_find_port(pf->eth_tbl, id);
/* Check if vNIC has external port associated and cfg is OK */
if (!nn->port->eth_port) {
nfp_err(app->cpp,
"NSP port entries don't match vNICs (no entry for port #%d)\n",
id);
err = nfp_port_init_phy_port(pf, app, nn->port, id);
if (err) {
nfp_port_free(nn->port);
return -EINVAL;
}
if (nn->port->eth_port->override_changed) {
nfp_warn(app->cpp,
"Config changed for port #%d, reboot required before port will be operational\n",
id);
nn->port->type = NFP_PORT_INVALID;
return 1;
return err;
}
return 0;
return nn->port->type == NFP_PORT_INVALID;
}
int nfp_app_nic_vnic_init(struct nfp_app *app, struct nfp_net *nn,
......@@ -80,7 +69,7 @@ int nfp_app_nic_vnic_init(struct nfp_app *app, struct nfp_net *nn,
if (err)
return err < 0 ? err : 0;
nfp_net_get_mac_addr(app->pf, nn, id);
nfp_net_get_mac_addr(app->pf, nn->port, id);
return 0;
}
......@@ -149,9 +149,27 @@ nfp_devlink_port_unsplit(struct devlink *devlink, unsigned int port_index)
return ret;
}
static int nfp_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct nfp_pf *pf = devlink_priv(devlink);
int ret;
mutex_lock(&pf->lock);
if (!pf->app) {
ret = -EBUSY;
goto out;
}
ret = nfp_app_eswitch_mode_get(pf->app, mode);
out:
mutex_unlock(&pf->lock);
return ret;
}
const struct devlink_ops nfp_devlink_ops = {
.port_split = nfp_devlink_port_split,
.port_unsplit = nfp_devlink_port_unsplit,
.eswitch_mode_get = nfp_devlink_eswitch_mode_get,
};
int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port)
......
......@@ -54,6 +54,7 @@
#include "nfpcore/nfp6000_pcie.h"
#include "nfp_app.h"
#include "nfp_main.h"
#include "nfp_net.h"
......@@ -97,28 +98,45 @@ static int nfp_pcie_sriov_enable(struct pci_dev *pdev, int num_vfs)
struct nfp_pf *pf = pci_get_drvdata(pdev);
int err;
mutex_lock(&pf->lock);
if (num_vfs > pf->limit_vfs) {
nfp_info(pf->cpp, "Firmware limits number of VFs to %u\n",
pf->limit_vfs);
return -EINVAL;
err = -EINVAL;
goto err_unlock;
}
err = nfp_app_sriov_enable(pf->app, num_vfs);
if (err) {
dev_warn(&pdev->dev, "App specific PCI sriov configuration failed: %d\n",
err);
goto err_unlock;
}
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
dev_warn(&pdev->dev, "Failed to enable PCI sriov: %d\n", err);
return err;
goto err_app_sriov_disable;
}
pf->num_vfs = num_vfs;
dev_dbg(&pdev->dev, "Created %d VFs.\n", pf->num_vfs);
mutex_unlock(&pf->lock);
return num_vfs;
err_app_sriov_disable:
nfp_app_sriov_disable(pf->app);
err_unlock:
mutex_unlock(&pf->lock);
return err;
#endif
return 0;
}
static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
static int __nfp_pcie_sriov_disable(struct pci_dev *pdev)
{
#ifdef CONFIG_PCI_IOV
struct nfp_pf *pf = pci_get_drvdata(pdev);
......@@ -132,6 +150,8 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
return -EPERM;
}
nfp_app_sriov_disable(pf->app);
pf->num_vfs = 0;
pci_disable_sriov(pdev);
......@@ -140,6 +160,18 @@ static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
return 0;
}
static int nfp_pcie_sriov_disable(struct pci_dev *pdev)
{
struct nfp_pf *pf = pci_get_drvdata(pdev);
int err;
mutex_lock(&pf->lock);
err = __nfp_pcie_sriov_disable(pdev);
mutex_unlock(&pf->lock);
return err;
}
static int nfp_pcie_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
if (num_vfs == 0)
......@@ -431,11 +463,11 @@ static void nfp_pci_remove(struct pci_dev *pdev)
devlink = priv_to_devlink(pf);
nfp_net_pci_remove(pf);
nfp_pcie_sriov_disable(pdev);
pci_sriov_set_totalvfs(pf->pdev, 0);
nfp_net_pci_remove(pf);
devlink_unregister(devlink);
kfree(pf->rtbl);
......
......@@ -58,6 +58,7 @@ struct nfp_hwinfo;
struct nfp_mip;
struct nfp_net;
struct nfp_nsp_identify;
struct nfp_port;
struct nfp_rtsym_table;
/**
......@@ -68,6 +69,10 @@ struct nfp_rtsym_table;
* @data_vnic_bar: Pointer to the CPP area for the data vNICs' BARs
* @ctrl_vnic_bar: Pointer to the CPP area for the ctrl vNIC's BAR
* @qc_area: Pointer to the CPP area for the queues
* @mac_stats_bar: Pointer to the CPP area for the MAC stats
* @mac_stats_mem: Pointer to mapped MAC stats area
* @vf_cfg_bar: Pointer to the CPP area for the VF configuration BAR
* @vf_cfg_mem: Pointer to mapped VF configuration area
* @irq_entries: Array of MSI-X entries for all vNICs
* @limit_vfs: Number of VFs supported by firmware (~0 for PCI limit)
* @num_vfs: Number of SR-IOV VFs enabled
......@@ -97,6 +102,10 @@ struct nfp_pf {
struct nfp_cpp_area *data_vnic_bar;
struct nfp_cpp_area *ctrl_vnic_bar;
struct nfp_cpp_area *qc_area;
struct nfp_cpp_area *mac_stats_bar;
u8 __iomem *mac_stats_mem;
struct nfp_cpp_area *vf_cfg_bar;
u8 __iomem *vf_cfg_mem;
struct msix_entry *irq_entries;
......@@ -139,7 +148,7 @@ void nfp_hwmon_unregister(struct nfp_pf *pf);
struct nfp_eth_table_port *
nfp_net_find_port(struct nfp_eth_table *eth_tbl, unsigned int id);
void
nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id);
nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port, unsigned int id);
bool nfp_ctrl_tx(struct nfp_net *nn, struct sk_buff *skb);
......
......@@ -318,6 +318,7 @@ struct nfp_meta_parsed {
u8 csum_type;
u32 hash;
u32 mark;
u32 portid;
__wsum csum;
};
......
......@@ -755,6 +755,26 @@ static void nfp_net_tx_xmit_more_flush(struct nfp_net_tx_ring *tx_ring)
tx_ring->wr_ptr_add = 0;
}
static int nfp_net_prep_port_id(struct sk_buff *skb)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
unsigned char *data;
if (likely(!md_dst))
return 0;
if (unlikely(md_dst->type != METADATA_HW_PORT_MUX))
return 0;
if (unlikely(skb_cow_head(skb, 8)))
return -ENOMEM;
data = skb_push(skb, 8);
put_unaligned_be32(NFP_NET_META_PORTID, data);
put_unaligned_be32(md_dst->u.port_info.port_id, data + 4);
return 8;
}
/**
* nfp_net_tx() - Main transmit entry point
* @skb: SKB to transmit
......@@ -767,6 +787,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
struct nfp_net *nn = netdev_priv(netdev);
const struct skb_frag_struct *frag;
struct nfp_net_tx_desc *txd, txdg;
int f, nr_frags, wr_idx, md_bytes;
struct nfp_net_tx_ring *tx_ring;
struct nfp_net_r_vector *r_vec;
struct nfp_net_tx_buf *txbuf;
......@@ -774,8 +795,6 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
struct nfp_net_dp *dp;
dma_addr_t dma_addr;
unsigned int fsize;
int f, nr_frags;
int wr_idx;
u16 qidx;
dp = &nn->dp;
......@@ -797,6 +816,13 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_BUSY;
}
md_bytes = nfp_net_prep_port_id(skb);
if (unlikely(md_bytes < 0)) {
nfp_net_tx_xmit_more_flush(tx_ring);
dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
/* Start with the head skbuf */
dma_addr = dma_map_single(dp->dev, skb->data, skb_headlen(skb),
DMA_TO_DEVICE);
......@@ -815,7 +841,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
/* Build TX descriptor */
txd = &tx_ring->txds[wr_idx];
txd->offset_eop = (nr_frags == 0) ? PCIE_DESC_TX_EOP : 0;
txd->offset_eop = (nr_frags ? 0 : PCIE_DESC_TX_EOP) | md_bytes;
txd->dma_len = cpu_to_le16(skb_headlen(skb));
nfp_desc_set_dma_addr(txd, dma_addr);
txd->data_len = cpu_to_le16(skb->len);
......@@ -855,7 +881,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev)
*txd = txdg;
txd->dma_len = cpu_to_le16(fsize);
nfp_desc_set_dma_addr(txd, dma_addr);
txd->offset_eop =
txd->offset_eop |=
(f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0;
}
......@@ -1450,6 +1476,10 @@ nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta,
meta->mark = get_unaligned_be32(data);
data += 4;
break;
case NFP_NET_META_PORTID:
meta->portid = get_unaligned_be32(data);
data += 4;
break;
case NFP_NET_META_CSUM:
meta->csum_type = CHECKSUM_COMPLETE;
meta->csum =
......@@ -1594,6 +1624,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
struct nfp_net_rx_buf *rxbuf;
struct nfp_net_rx_desc *rxd;
struct nfp_meta_parsed meta;
struct net_device *netdev;
dma_addr_t new_dma_addr;
void *new_frag;
......@@ -1672,7 +1703,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
}
if (xdp_prog && !(rxd->rxd.flags & PCIE_DESC_RX_BPF &&
dp->bpf_offload_xdp)) {
dp->bpf_offload_xdp) && !meta.portid) {
unsigned int dma_off;
void *hard_start;
int act;
......@@ -1718,6 +1749,20 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
continue;
}
if (likely(!meta.portid)) {
netdev = dp->netdev;
} else {
struct nfp_net *nn;
nn = netdev_priv(dp->netdev);
netdev = nfp_app_repr_get(nn->app, meta.portid);
if (unlikely(!netdev)) {
nfp_net_rx_drop(dp, r_vec, rx_ring, rxbuf, skb);
continue;
}
nfp_repr_inc_rx_stats(netdev, pkt_len);
}
nfp_net_dma_unmap_rx(dp, rxbuf->dma_addr);
nfp_net_rx_give_one(dp, rx_ring, new_frag, new_dma_addr);
......@@ -1729,7 +1774,7 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
skb_set_hash(skb, meta.hash, meta.hash_type);
skb_record_rx_queue(skb, rx_ring->idx);
skb->protocol = eth_type_trans(skb, dp->netdev);
skb->protocol = eth_type_trans(skb, netdev);
nfp_net_rx_csum(dp, r_vec, rxd, &meta, skb);
......
......@@ -135,25 +135,24 @@ static u8 __iomem *nfp_net_map_area(struct nfp_cpp *cpp,
/**
* nfp_net_get_mac_addr() - Get the MAC address.
* @pf: NFP PF handle
* @nn: NFP Network structure
* @port: NFP port structure
* @id: NFP port id
*
* First try to get the MAC address from NSP ETH table. If that
* fails try HWInfo. As a last resort generate a random address.
*/
void
nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_port *port, unsigned int id)
{
struct nfp_eth_table_port *eth_port;
struct nfp_net_dp *dp = &nn->dp;
u8 mac_addr[ETH_ALEN];
const char *mac_str;
char name[32];
eth_port = __nfp_port_get_eth_port(nn->port);
eth_port = __nfp_port_get_eth_port(port);
if (eth_port) {
ether_addr_copy(dp->netdev->dev_addr, eth_port->mac_addr);
ether_addr_copy(dp->netdev->perm_addr, eth_port->mac_addr);
ether_addr_copy(port->netdev->dev_addr, eth_port->mac_addr);
ether_addr_copy(port->netdev->perm_addr, eth_port->mac_addr);
return;
}
......@@ -161,22 +160,22 @@ nfp_net_get_mac_addr(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
mac_str = nfp_hwinfo_lookup(pf->hwinfo, name);
if (!mac_str) {
dev_warn(dp->dev, "Can't lookup MAC address. Generate\n");
eth_hw_addr_random(dp->netdev);
nfp_warn(pf->cpp, "Can't lookup MAC address. Generate\n");
eth_hw_addr_random(port->netdev);
return;
}
if (sscanf(mac_str, "%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
&mac_addr[0], &mac_addr[1], &mac_addr[2],
&mac_addr[3], &mac_addr[4], &mac_addr[5]) != 6) {
dev_warn(dp->dev,
"Can't parse MAC address (%s). Generate.\n", mac_str);
eth_hw_addr_random(dp->netdev);
nfp_warn(pf->cpp, "Can't parse MAC address (%s). Generate.\n",
mac_str);
eth_hw_addr_random(port->netdev);
return;
}
ether_addr_copy(dp->netdev->dev_addr, mac_addr);
ether_addr_copy(dp->netdev->perm_addr, mac_addr);
ether_addr_copy(port->netdev->dev_addr, mac_addr);
ether_addr_copy(port->netdev->perm_addr, mac_addr);
}
struct nfp_eth_table_port *
......@@ -235,10 +234,8 @@ nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
nfp_cppcore_pcie_unit(pf->cpp));
sym = nfp_rtsym_lookup(pf->rtbl, pf_symbol);
if (!sym) {
nfp_err(pf->cpp, "Failed to find PF symbol %s\n", pf_symbol);
if (!sym)
return (u8 __iomem *)ERR_PTR(-ENOENT);
}
if (sym->size < min_size) {
nfp_err(pf->cpp, "PF symbol %s too small\n", pf_symbol);
......@@ -486,6 +483,7 @@ nfp_net_pf_app_init(struct nfp_pf *pf, u8 __iomem *qc_bar, unsigned int stride)
NFP_PF_CSR_SLICE_SIZE,
&pf->ctrl_vnic_bar);
if (IS_ERR(ctrl_bar)) {
nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
err = PTR_ERR(ctrl_bar);
goto err_free;
}
......@@ -570,6 +568,80 @@ static void nfp_net_pf_app_stop(struct nfp_pf *pf)
nfp_net_pf_app_stop_ctrl(pf);
}
static void nfp_net_pci_unmap_mem(struct nfp_pf *pf)
{
if (pf->vf_cfg_bar)
nfp_cpp_area_release_free(pf->vf_cfg_bar);
if (pf->mac_stats_bar)
nfp_cpp_area_release_free(pf->mac_stats_bar);
nfp_cpp_area_release_free(pf->qc_area);
nfp_cpp_area_release_free(pf->data_vnic_bar);
}
static int nfp_net_pci_map_mem(struct nfp_pf *pf)
{
u32 ctrl_bar_sz;
u8 __iomem *mem;
int err;
ctrl_bar_sz = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
mem = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%d_net_bar0",
ctrl_bar_sz, &pf->data_vnic_bar);
if (IS_ERR(mem)) {
nfp_err(pf->cpp, "Failed to find data vNIC memory symbol\n");
err = PTR_ERR(mem);
if (!pf->fw_loaded && err == -ENOENT)
err = -EPROBE_DEFER;
return err;
}
pf->mac_stats_mem = nfp_net_pf_map_rtsym(pf, "net.macstats",
"_mac_stats",
NFP_MAC_STATS_SIZE *
(pf->eth_tbl->max_index + 1),
&pf->mac_stats_bar);
if (IS_ERR(pf->mac_stats_mem)) {
if (PTR_ERR(pf->mac_stats_mem) != -ENOENT) {
err = PTR_ERR(pf->mac_stats_mem);
goto err_unmap_ctrl;
}
pf->mac_stats_mem = NULL;
}
pf->vf_cfg_mem = nfp_net_pf_map_rtsym(pf, "net.vfcfg",
"_pf%d_net_vf_bar",
NFP_NET_CFG_BAR_SZ *
pf->limit_vfs, &pf->vf_cfg_bar);
if (IS_ERR(pf->vf_cfg_mem)) {
if (PTR_ERR(pf->vf_cfg_mem) != -ENOENT) {
err = PTR_ERR(pf->vf_cfg_mem);
goto err_unmap_mac_stats;
}
pf->vf_cfg_mem = NULL;
}
mem = nfp_net_map_area(pf->cpp, "net.qc", 0, 0,
NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ,
&pf->qc_area);
if (IS_ERR(mem)) {
nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
err = PTR_ERR(mem);
goto err_unmap_vf_cfg;
}
return 0;
err_unmap_vf_cfg:
if (pf->vf_cfg_bar)
nfp_cpp_area_release_free(pf->vf_cfg_bar);
err_unmap_mac_stats:
if (pf->mac_stats_bar)
nfp_cpp_area_release_free(pf->mac_stats_bar);
err_unmap_ctrl:
nfp_cpp_area_release_free(pf->data_vnic_bar);
return err;
}
static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
{
nfp_net_pf_app_stop(pf);
......@@ -577,11 +649,8 @@ static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
nfp_net_debugfs_dir_clean(&pf->ddir);
nfp_net_pf_free_irqs(pf);
nfp_net_pf_app_clean(pf);
nfp_cpp_area_release_free(pf->qc_area);
nfp_cpp_area_release_free(pf->data_vnic_bar);
nfp_net_pci_unmap_mem(pf);
}
static int
......@@ -706,7 +775,6 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
{
struct nfp_net_fw_version fw_ver;
u8 __iomem *ctrl_bar, *qc_bar;
u32 ctrl_bar_sz;
int stride;
int err;
......@@ -725,14 +793,15 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
goto err_unlock;
}
ctrl_bar_sz = pf->max_data_vnics * NFP_PF_CSR_SLICE_SIZE;
ctrl_bar = nfp_net_pf_map_rtsym(pf, "net.ctrl", "_pf%d_net_bar0",
ctrl_bar_sz, &pf->data_vnic_bar);
if (IS_ERR(ctrl_bar)) {
err = PTR_ERR(ctrl_bar);
if (!pf->fw_loaded && err == -ENOENT)
err = -EPROBE_DEFER;
err = nfp_net_pci_map_mem(pf);
if (err)
goto err_unlock;
ctrl_bar = nfp_cpp_area_iomem(pf->data_vnic_bar);
qc_bar = nfp_cpp_area_iomem(pf->qc_area);
if (!ctrl_bar || !qc_bar) {
err = -EIO;
goto err_unmap;
}
nfp_net_get_fw_version(&fw_ver, ctrl_bar);
......@@ -740,7 +809,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
nfp_err(pf->cpp, "Unknown Firmware ABI %d.%d.%d.%d\n",
fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
err = -EINVAL;
goto err_ctrl_unmap;
goto err_unmap;
}
/* Determine stride */
......@@ -757,23 +826,13 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
fw_ver.resv, fw_ver.class,
fw_ver.major, fw_ver.minor);
err = -EINVAL;
goto err_ctrl_unmap;
goto err_unmap;
}
}
/* Map queues */
qc_bar = nfp_net_map_area(pf->cpp, "net.qc", 0, 0,
NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ,
&pf->qc_area);
if (IS_ERR(qc_bar)) {
nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
err = PTR_ERR(qc_bar);
goto err_ctrl_unmap;
}
err = nfp_net_pf_app_init(pf, qc_bar, stride);
if (err)
goto err_unmap_qc;
goto err_unmap;
pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
......@@ -807,10 +866,8 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
err_clean_ddir:
nfp_net_debugfs_dir_clean(&pf->ddir);
nfp_net_pf_app_clean(pf);
err_unmap_qc:
nfp_cpp_area_release_free(pf->qc_area);
err_ctrl_unmap:
nfp_cpp_area_release_free(pf->data_vnic_bar);
err_unmap:
nfp_net_pci_unmap_mem(pf);
err_unlock:
mutex_unlock(&pf->lock);
cancel_work_sync(&pf->port_refresh_work);
......
/*
* Copyright (C) 2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/etherdevice.h>
#include <linux/io-64-nonatomic-hi-lo.h>
#include <linux/lockdep.h>
#include <net/dst_metadata.h>
#include "nfpcore/nfp_cpp.h"
#include "nfp_app.h"
#include "nfp_main.h"
#include "nfp_net_ctrl.h"
#include "nfp_net_repr.h"
#include "nfp_port.h"
static void
nfp_repr_inc_tx_stats(struct net_device *netdev, unsigned int len,
int tx_status)
{
struct nfp_repr *repr = netdev_priv(netdev);
struct nfp_repr_pcpu_stats *stats;
if (unlikely(tx_status != NET_XMIT_SUCCESS &&
tx_status != NET_XMIT_CN)) {
this_cpu_inc(repr->stats->tx_drops);
return;
}
stats = this_cpu_ptr(repr->stats);
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += len;
u64_stats_update_end(&stats->syncp);
}
void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len)
{
struct nfp_repr *repr = netdev_priv(netdev);
struct nfp_repr_pcpu_stats *stats;
stats = this_cpu_ptr(repr->stats);
u64_stats_update_begin(&stats->syncp);
stats->rx_packets++;
stats->rx_bytes += len;
u64_stats_update_end(&stats->syncp);
}
static void
nfp_repr_phy_port_get_stats64(const struct nfp_app *app, u8 phy_port,
struct rtnl_link_stats64 *stats)
{
u8 __iomem *mem;
mem = app->pf->mac_stats_mem + phy_port * NFP_MAC_STATS_SIZE;
/* TX and RX stats are flipped as we are returning the stats as seen
* at the switch port corresponding to the phys port.
*/
stats->tx_packets = readq(mem + NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK);
stats->tx_bytes = readq(mem + NFP_MAC_STATS_RX_IN_OCTETS);
stats->tx_dropped = readq(mem + NFP_MAC_STATS_RX_IN_ERRORS);
stats->rx_packets = readq(mem + NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK);
stats->rx_bytes = readq(mem + NFP_MAC_STATS_TX_OUT_OCTETS);
stats->rx_dropped = readq(mem + NFP_MAC_STATS_TX_OUT_ERRORS);
}
static void
nfp_repr_vf_get_stats64(const struct nfp_app *app, u8 vf,
struct rtnl_link_stats64 *stats)
{
u8 __iomem *mem;
mem = app->pf->vf_cfg_mem + vf * NFP_NET_CFG_BAR_SZ;
/* TX and RX stats are flipped as we are returning the stats as seen
* at the switch port corresponding to the VF.
*/
stats->tx_packets = readq(mem + NFP_NET_CFG_STATS_RX_FRAMES);
stats->tx_bytes = readq(mem + NFP_NET_CFG_STATS_RX_OCTETS);
stats->tx_dropped = readq(mem + NFP_NET_CFG_STATS_RX_DISCARDS);
stats->rx_packets = readq(mem + NFP_NET_CFG_STATS_TX_FRAMES);
stats->rx_bytes = readq(mem + NFP_NET_CFG_STATS_TX_OCTETS);
stats->rx_dropped = readq(mem + NFP_NET_CFG_STATS_TX_DISCARDS);
}
static void
nfp_repr_pf_get_stats64(const struct nfp_app *app, u8 pf,
struct rtnl_link_stats64 *stats)
{
u8 __iomem *mem;
if (pf)
return;
mem = nfp_cpp_area_iomem(app->pf->data_vnic_bar);
stats->tx_packets = readq(mem + NFP_NET_CFG_STATS_RX_FRAMES);
stats->tx_bytes = readq(mem + NFP_NET_CFG_STATS_RX_OCTETS);
stats->tx_dropped = readq(mem + NFP_NET_CFG_STATS_RX_DISCARDS);
stats->rx_packets = readq(mem + NFP_NET_CFG_STATS_TX_FRAMES);
stats->rx_bytes = readq(mem + NFP_NET_CFG_STATS_TX_OCTETS);
stats->rx_dropped = readq(mem + NFP_NET_CFG_STATS_TX_DISCARDS);
}
void
nfp_repr_get_stats64(const struct nfp_app *app, enum nfp_repr_type type,
u8 port, struct rtnl_link_stats64 *stats)
{
switch (type) {
case NFP_REPR_TYPE_PHYS_PORT:
nfp_repr_phy_port_get_stats64(app, port, stats);
break;
case NFP_REPR_TYPE_PF:
nfp_repr_pf_get_stats64(app, port, stats);
break;
case NFP_REPR_TYPE_VF:
nfp_repr_vf_get_stats64(app, port, stats);
default:
break;
}
}
bool
nfp_repr_has_offload_stats(const struct net_device *dev, int attr_id)
{
switch (attr_id) {
case IFLA_OFFLOAD_XSTATS_CPU_HIT:
return true;
}
return false;
}
static int
nfp_repr_get_host_stats64(const struct net_device *netdev,
struct rtnl_link_stats64 *stats)
{
struct nfp_repr *repr = netdev_priv(netdev);
int i;
for_each_possible_cpu(i) {
u64 tbytes, tpkts, tdrops, rbytes, rpkts;
struct nfp_repr_pcpu_stats *repr_stats;
unsigned int start;
repr_stats = per_cpu_ptr(repr->stats, i);
do {
start = u64_stats_fetch_begin_irq(&repr_stats->syncp);
tbytes = repr_stats->tx_bytes;
tpkts = repr_stats->tx_packets;
tdrops = repr_stats->tx_drops;
rbytes = repr_stats->rx_bytes;
rpkts = repr_stats->rx_packets;
} while (u64_stats_fetch_retry_irq(&repr_stats->syncp, start));
stats->tx_bytes += tbytes;
stats->tx_packets += tpkts;
stats->tx_dropped += tdrops;
stats->rx_bytes += rbytes;
stats->rx_packets += rpkts;
}
return 0;
}
int nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
void *stats)
{
switch (attr_id) {
case IFLA_OFFLOAD_XSTATS_CPU_HIT:
return nfp_repr_get_host_stats64(dev, stats);
}
return -EINVAL;
}
netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct nfp_repr *repr = netdev_priv(netdev);
unsigned int len = skb->len;
int ret;
skb_dst_drop(skb);
dst_hold((struct dst_entry *)repr->dst);
skb_dst_set(skb, (struct dst_entry *)repr->dst);
skb->dev = repr->dst->u.port_info.lower_dev;
ret = dev_queue_xmit(skb);
nfp_repr_inc_tx_stats(netdev, len, ret);
return ret;
}
static void nfp_repr_clean(struct nfp_repr *repr)
{
unregister_netdev(repr->netdev);
dst_release((struct dst_entry *)repr->dst);
nfp_port_free(repr->port);
}
static struct lock_class_key nfp_repr_netdev_xmit_lock_key;
static struct lock_class_key nfp_repr_netdev_addr_lock_key;
static void nfp_repr_set_lockdep_class_one(struct net_device *dev,
struct netdev_queue *txq,
void *_unused)
{
lockdep_set_class(&txq->_xmit_lock, &nfp_repr_netdev_xmit_lock_key);
}
static void nfp_repr_set_lockdep_class(struct net_device *dev)
{
lockdep_set_class(&dev->addr_list_lock, &nfp_repr_netdev_addr_lock_key);
netdev_for_each_tx_queue(dev, nfp_repr_set_lockdep_class_one, NULL);
}
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
const struct net_device_ops *netdev_ops, u32 cmsg_port_id,
struct nfp_port *port, struct net_device *pf_netdev)
{
struct nfp_repr *repr = netdev_priv(netdev);
int err;
nfp_repr_set_lockdep_class(netdev);
repr->port = port;
repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL);
if (!repr->dst)
return -ENOMEM;
repr->dst->u.port_info.port_id = cmsg_port_id;
repr->dst->u.port_info.lower_dev = pf_netdev;
netdev->netdev_ops = netdev_ops;
err = register_netdev(netdev);
if (err)
goto err_clean;
return 0;
err_clean:
dst_release((struct dst_entry *)repr->dst);
return err;
}
static void nfp_repr_free(struct nfp_repr *repr)
{
free_percpu(repr->stats);
free_netdev(repr->netdev);
}
struct net_device *nfp_repr_alloc(struct nfp_app *app)
{
struct net_device *netdev;
struct nfp_repr *repr;
netdev = alloc_etherdev(sizeof(*repr));
if (!netdev)
return NULL;
repr = netdev_priv(netdev);
repr->netdev = netdev;
repr->app = app;
repr->stats = netdev_alloc_pcpu_stats(struct nfp_repr_pcpu_stats);
if (!repr->stats)
goto err_free_netdev;
return netdev;
err_free_netdev:
free_netdev(netdev);
return NULL;
}
static void nfp_repr_clean_and_free(struct nfp_repr *repr)
{
nfp_info(repr->app->cpp, "Destroying Representor(%s)\n",
repr->netdev->name);
nfp_repr_clean(repr);
nfp_repr_free(repr);
}
void nfp_reprs_clean_and_free(struct nfp_reprs *reprs)
{
unsigned int i;
for (i = 0; i < reprs->num_reprs; i++)
if (reprs->reprs[i])
nfp_repr_clean_and_free(netdev_priv(reprs->reprs[i]));
kfree(reprs);
}
void
nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
enum nfp_repr_type type)
{
struct nfp_reprs *reprs;
reprs = nfp_app_reprs_set(app, type, NULL);
if (!reprs)
return;
synchronize_rcu();
nfp_reprs_clean_and_free(reprs);
}
struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs)
{
struct nfp_reprs *reprs;
reprs = kzalloc(sizeof(*reprs) +
num_reprs * sizeof(struct net_device *), GFP_KERNEL);
if (!reprs)
return NULL;
reprs->num_reprs = num_reprs;
return reprs;
}
/*
* Copyright (C) 2017 Netronome Systems, Inc.
*
* This software is dual licensed under the GNU General License Version 2,
* June 1991 as shown in the file COPYING in the top-level directory of this
* source tree or the BSD 2-Clause License provided below. You have the
* option to license this software under the complete terms of either license.
*
* The BSD 2-Clause License:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef NFP_NET_REPR_H
#define NFP_NET_REPR_H
struct metadata_dst;
struct nfp_net;
struct nfp_port;
/**
* struct nfp_reprs - container for representor netdevs
* @num_reprs: Number of elements in reprs array
* @reprs: Array of representor netdevs
*/
struct nfp_reprs {
unsigned int num_reprs;
struct net_device *reprs[0];
};
/**
* struct nfp_repr_pcpu_stats
* @rx_packets: Received packets
* @rx_bytes: Received bytes
* @tx_packets: Transmitted packets
* @tx_bytes: Transmitted dropped
* @tx_drops: Packets dropped on transmit
* @syncp: Reference count
*/
struct nfp_repr_pcpu_stats {
u64 rx_packets;
u64 rx_bytes;
u64 tx_packets;
u64 tx_bytes;
u64 tx_drops;
struct u64_stats_sync syncp;
};
/**
* struct nfp_repr - priv data for representor netdevs
* @netdev: Back pointer to netdev
* @dst: Destination for packet TX
* @port: Port of representor
* @app: APP handle
* @stats: Statistic of packets hitting CPU
*/
struct nfp_repr {
struct net_device *netdev;
struct metadata_dst *dst;
struct nfp_port *port;
struct nfp_app *app;
struct nfp_repr_pcpu_stats __percpu *stats;
};
/**
* enum nfp_repr_type - type of representor
* @NFP_REPR_TYPE_PHYS_PORT: external NIC port
* @NFP_REPR_TYPE_PF: physical function
* @NFP_REPR_TYPE_VF: virtual function
*/
enum nfp_repr_type {
NFP_REPR_TYPE_PHYS_PORT,
NFP_REPR_TYPE_PF,
NFP_REPR_TYPE_VF,
__NFP_REPR_TYPE_MAX,
};
#define NFP_REPR_TYPE_MAX (__NFP_REPR_TYPE_MAX - 1)
void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len);
void
nfp_repr_get_stats64(const struct nfp_app *app, enum nfp_repr_type type,
u8 port, struct rtnl_link_stats64 *stats);
bool nfp_repr_has_offload_stats(const struct net_device *dev, int attr_id);
int nfp_repr_get_offload_stats(int attr_id, const struct net_device *dev,
void *stats);
netdev_tx_t nfp_repr_xmit(struct sk_buff *skb, struct net_device *netdev);
int nfp_repr_init(struct nfp_app *app, struct net_device *netdev,
const struct net_device_ops *netdev_ops,
u32 cmsg_port_id, struct nfp_port *port,
struct net_device *pf_netdev);
struct net_device *nfp_repr_alloc(struct nfp_app *app);
void
nfp_reprs_clean_and_free(struct nfp_reprs *reprs);
void
nfp_reprs_clean_and_free_by_type(struct nfp_app *app,
enum nfp_repr_type type);
struct nfp_reprs *nfp_reprs_alloc(unsigned int num_reprs);
#endif /* NFP_NET_REPR_H */
......@@ -33,6 +33,7 @@
#include <linux/lockdep.h>
#include "nfpcore/nfp_cpp.h"
#include "nfpcore/nfp_nsp.h"
#include "nfp_app.h"
#include "nfp_main.h"
......@@ -112,6 +113,30 @@ nfp_port_get_phys_port_name(struct net_device *netdev, char *name, size_t len)
return 0;
}
int nfp_port_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
struct nfp_port *port, unsigned int id)
{
port->eth_id = id;
port->eth_port = nfp_net_find_port(pf->eth_tbl, id);
/* Check if vNIC has external port associated and cfg is OK */
if (!port->eth_port) {
nfp_err(app->cpp,
"NSP port entries don't match vNICs (no entry for port #%d)\n",
id);
return -EINVAL;
}
if (port->eth_port->override_changed) {
nfp_warn(app->cpp,
"Config changed for port #%d, reboot required before port will be operational\n",
id);
port->type = NFP_PORT_INVALID;
return 0;
}
return 0;
}
struct nfp_port *
nfp_port_alloc(struct nfp_app *app, enum nfp_port_type type,
struct net_device *netdev)
......
......@@ -104,6 +104,9 @@ nfp_port_alloc(struct nfp_app *app, enum nfp_port_type type,
struct net_device *netdev);
void nfp_port_free(struct nfp_port *port);
int nfp_port_init_phy_port(struct nfp_pf *pf, struct nfp_app *app,
struct nfp_port *port, unsigned int id);
int nfp_net_refresh_eth_port(struct nfp_port *port);
void nfp_net_refresh_port_table(struct nfp_port *port);
int nfp_net_refresh_port_table_sync(struct nfp_pf *pf);
......@@ -111,4 +114,64 @@ int nfp_net_refresh_port_table_sync(struct nfp_pf *pf);
int nfp_devlink_port_register(struct nfp_app *app, struct nfp_port *port);
void nfp_devlink_port_unregister(struct nfp_port *port);
/**
* Mac stats (0x0000 - 0x0200)
* all counters are 64bit.
*/
#define NFP_MAC_STATS_BASE 0x0000
#define NFP_MAC_STATS_SIZE 0x0200
#define NFP_MAC_STATS_RX_IN_OCTETS (NFP_MAC_STATS_BASE + 0x000)
#define NFP_MAC_STATS_RX_FRAME_TOO_LONG_ERRORS (NFP_MAC_STATS_BASE + 0x010)
#define NFP_MAC_STATS_RX_RANGE_LENGTH_ERRORS (NFP_MAC_STATS_BASE + 0x018)
#define NFP_MAC_STATS_RX_VLAN_REVEIVE_OK (NFP_MAC_STATS_BASE + 0x020)
#define NFP_MAC_STATS_RX_IN_ERRORS (NFP_MAC_STATS_BASE + 0x028)
#define NFP_MAC_STATS_RX_IN_BROADCAST_PKTS (NFP_MAC_STATS_BASE + 0x030)
#define NFP_MAC_STATS_RX_STATS_DROP_EVENTS (NFP_MAC_STATS_BASE + 0x038)
#define NFP_MAC_STATS_RX_ALIGNMENT_ERRORS (NFP_MAC_STATS_BASE + 0x040)
#define NFP_MAC_STATS_RX_PAUSE_MAC_CTRL_FRAMES (NFP_MAC_STATS_BASE + 0x048)
#define NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK (NFP_MAC_STATS_BASE + 0x050)
#define NFP_MAC_STATS_RX_FRAME_CHECK_SEQUENCE_ERRORS (NFP_MAC_STATS_BASE + 0x058)
#define NFP_MAC_STATS_RX_UNICAST_PKTS (NFP_MAC_STATS_BASE + 0x060)
#define NFP_MAC_STATS_RX_MULTICAST_PKTS (NFP_MAC_STATS_BASE + 0x068)
#define NFP_MAC_STATS_RX_STATS_PKTS (NFP_MAC_STATS_BASE + 0x070)
#define NFP_MAC_STATS_RX_STATS_UNDERSIZE_PKTS (NFP_MAC_STATS_BASE + 0x078)
#define NFP_MAC_STATS_RX_STATS_PKTS_64_OCTETS (NFP_MAC_STATS_BASE + 0x080)
#define NFP_MAC_STATS_RX_STATS_PKTS_65_TO_127_OCTETS (NFP_MAC_STATS_BASE + 0x088)
#define NFP_MAC_STATS_RX_STATS_PKTS_512_TO_1023_OCTETS (NFP_MAC_STATS_BASE + 0x090)
#define NFP_MAC_STATS_RX_STATS_PKTS_1024_TO_1518_OCTETS (NFP_MAC_STATS_BASE + 0x098)
#define NFP_MAC_STATS_RX_STATS_JABBERS (NFP_MAC_STATS_BASE + 0x0a0)
#define NFP_MAC_STATS_RX_STATS_FRAGMENTS (NFP_MAC_STATS_BASE + 0x0a8)
#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS2 (NFP_MAC_STATS_BASE + 0x0b0)
#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS3 (NFP_MAC_STATS_BASE + 0x0b8)
#define NFP_MAC_STATS_RX_STATS_PKTS_128_TO_255_OCTETS (NFP_MAC_STATS_BASE + 0x0c0)
#define NFP_MAC_STATS_RX_STATS_PKTS_256_TO_511_OCTETS (NFP_MAC_STATS_BASE + 0x0c8)
#define NFP_MAC_STATS_RX_STATS_PKTS_1519_TO_MAX_OCTETS (NFP_MAC_STATS_BASE + 0x0d0)
#define NFP_MAC_STATS_RX_OVERSIZE_PKTS (NFP_MAC_STATS_BASE + 0x0d8)
#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS0 (NFP_MAC_STATS_BASE + 0x0e0)
#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS1 (NFP_MAC_STATS_BASE + 0x0e8)
#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS4 (NFP_MAC_STATS_BASE + 0x0f0)
#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS5 (NFP_MAC_STATS_BASE + 0x0f8)
#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS6 (NFP_MAC_STATS_BASE + 0x100)
#define NFP_MAC_STATS_RX_PAUSE_FRAMES_CLASS7 (NFP_MAC_STATS_BASE + 0x108)
#define NFP_MAC_STATS_RX_MAC_CTRL_FRAMES_RECEIVED (NFP_MAC_STATS_BASE + 0x110)
#define NFP_MAC_STATS_RX_MAC_HEAD_DROP (NFP_MAC_STATS_BASE + 0x118)
#define NFP_MAC_STATS_TX_QUEUE_DROP (NFP_MAC_STATS_BASE + 0x138)
#define NFP_MAC_STATS_TX_OUT_OCTETS (NFP_MAC_STATS_BASE + 0x140)
#define NFP_MAC_STATS_TX_VLAN_TRANSMITTED_OK (NFP_MAC_STATS_BASE + 0x150)
#define NFP_MAC_STATS_TX_OUT_ERRORS (NFP_MAC_STATS_BASE + 0x158)
#define NFP_MAC_STATS_TX_BROADCAST_PKTS (NFP_MAC_STATS_BASE + 0x160)
#define NFP_MAC_STATS_TX_PKTS_64_OCTETS (NFP_MAC_STATS_BASE + 0x168)
#define NFP_MAC_STATS_TX_PKTS_256_TO_511_OCTETS (NFP_MAC_STATS_BASE + 0x170)
#define NFP_MAC_STATS_TX_PKTS_512_TO_1023_OCTETS (NFP_MAC_STATS_BASE + 0x178)
#define NFP_MAC_STATS_TX_PAUSE_MAC_CTRL_FRAMES (NFP_MAC_STATS_BASE + 0x180)
#define NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK (NFP_MAC_STATS_BASE + 0x188)
#define NFP_MAC_STATS_TX_UNICAST_PKTS (NFP_MAC_STATS_BASE + 0x190)
#define NFP_MAC_STATS_TX_MULTICAST_PKTS (NFP_MAC_STATS_BASE + 0x198)
#define NFP_MAC_STATS_TX_PKTS_65_TO_127_OCTETS (NFP_MAC_STATS_BASE + 0x1a0)
#define NFP_MAC_STATS_TX_PKTS_127_TO_512_OCTETS (NFP_MAC_STATS_BASE + 0x1a8)
#define NFP_MAC_STATS_TX_PKTS_128_TO_1518_OCTETS (NFP_MAC_STATS_BASE + 0x1b0)
#define NFP_MAC_STATS_TX_PKTS_1518_TO_MAX_OCTETS (NFP_MAC_STATS_BASE + 0x1b8)
#endif
......@@ -76,6 +76,7 @@ enum nfp_eth_aneg {
/**
* struct nfp_eth_table - ETH table information
* @count: number of table entries
* @max_index: max of @index fields of all @ports
* @ports: table of ports
*
* @eth_index: port index according to legacy ethX numbering
......@@ -101,6 +102,7 @@ enum nfp_eth_aneg {
*/
struct nfp_eth_table {
unsigned int count;
unsigned int max_index;
struct nfp_eth_table_port {
unsigned int eth_index;
unsigned int index;
......
......@@ -190,7 +190,9 @@ nfp_eth_calc_port_geometry(struct nfp_cpp *cpp, struct nfp_eth_table *table)
{
unsigned int i, j;
for (i = 0; i < table->count; i++)
for (i = 0; i < table->count; i++) {
table->max_index = max(table->max_index, table->ports[i].index);
for (j = 0; j < table->count; j++) {
if (table->ports[i].label_port !=
table->ports[j].label_port)
......@@ -208,6 +210,7 @@ nfp_eth_calc_port_geometry(struct nfp_cpp *cpp, struct nfp_eth_table *table)
table->ports[i].is_split = true;
}
}
}
static void
......
......@@ -5,10 +5,22 @@
#include <net/ip_tunnels.h>
#include <net/dst.h>
enum metadata_type {
METADATA_IP_TUNNEL,
METADATA_HW_PORT_MUX,
};
struct hw_port_info {
struct net_device *lower_dev;
u32 port_id;
};
struct metadata_dst {
struct dst_entry dst;
enum metadata_type type;
union {
struct ip_tunnel_info tun_info;
struct hw_port_info port_info;
} u;
};
......@@ -27,7 +39,7 @@ static inline struct ip_tunnel_info *skb_tunnel_info(struct sk_buff *skb)
struct metadata_dst *md_dst = skb_metadata_dst(skb);
struct dst_entry *dst;
if (md_dst)
if (md_dst && md_dst->type == METADATA_IP_TUNNEL)
return &md_dst->u.tun_info;
dst = skb_dst(skb);
......@@ -55,22 +67,33 @@ static inline int skb_metadata_dst_cmp(const struct sk_buff *skb_a,
a = (const struct metadata_dst *) skb_dst(skb_a);
b = (const struct metadata_dst *) skb_dst(skb_b);
if (!a != !b || a->u.tun_info.options_len != b->u.tun_info.options_len)
if (!a != !b || a->type != b->type)
return 1;
return memcmp(&a->u.tun_info, &b->u.tun_info,
sizeof(a->u.tun_info) + a->u.tun_info.options_len);
switch (a->type) {
case METADATA_HW_PORT_MUX:
return memcmp(&a->u.port_info, &b->u.port_info,
sizeof(a->u.port_info));
case METADATA_IP_TUNNEL:
return memcmp(&a->u.tun_info, &b->u.tun_info,
sizeof(a->u.tun_info) +
a->u.tun_info.options_len);
default:
return 1;
}
}
void metadata_dst_free(struct metadata_dst *);
struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags);
struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags);
struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
gfp_t flags);
struct metadata_dst __percpu *
metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags);
static inline struct metadata_dst *tun_rx_dst(int md_size)
{
struct metadata_dst *tun_dst;
tun_dst = metadata_dst_alloc(md_size, GFP_ATOMIC);
tun_dst = metadata_dst_alloc(md_size, METADATA_IP_TUNNEL, GFP_ATOMIC);
if (!tun_dst)
return NULL;
......@@ -85,11 +108,11 @@ static inline struct metadata_dst *tun_dst_unclone(struct sk_buff *skb)
int md_size;
struct metadata_dst *new_md;
if (!md_dst)
if (!md_dst || md_dst->type != METADATA_IP_TUNNEL)
return ERR_PTR(-EINVAL);
md_size = md_dst->u.tun_info.options_len;
new_md = metadata_dst_alloc(md_size, GFP_ATOMIC);
new_md = metadata_dst_alloc(md_size, METADATA_IP_TUNNEL, GFP_ATOMIC);
if (!new_md)
return ERR_PTR(-ENOMEM);
......
......@@ -264,7 +264,9 @@ static int dst_md_discard(struct sk_buff *skb)
return 0;
}
static void __metadata_dst_init(struct metadata_dst *md_dst, u8 optslen)
static void __metadata_dst_init(struct metadata_dst *md_dst,
enum metadata_type type, u8 optslen)
{
struct dst_entry *dst;
......@@ -276,9 +278,11 @@ static void __metadata_dst_init(struct metadata_dst *md_dst, u8 optslen)
dst->output = dst_md_discard_out;
memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
md_dst->type = type;
}
struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags)
struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
gfp_t flags)
{
struct metadata_dst *md_dst;
......@@ -286,7 +290,7 @@ struct metadata_dst *metadata_dst_alloc(u8 optslen, gfp_t flags)
if (!md_dst)
return NULL;
__metadata_dst_init(md_dst, optslen);
__metadata_dst_init(md_dst, type, optslen);
return md_dst;
}
......@@ -300,7 +304,8 @@ void metadata_dst_free(struct metadata_dst *md_dst)
kfree(md_dst);
}
struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags)
struct metadata_dst __percpu *
metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags)
{
int cpu;
struct metadata_dst __percpu *md_dst;
......@@ -311,7 +316,7 @@ struct metadata_dst __percpu *metadata_dst_alloc_percpu(u8 optslen, gfp_t flags)
return NULL;
for_each_possible_cpu(cpu)
__metadata_dst_init(per_cpu_ptr(md_dst, cpu), optslen);
__metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen);
return md_dst;
}
......
......@@ -2565,6 +2565,7 @@ bpf_get_skb_set_tunnel_proto(enum bpf_func_id which)
* that is holding verifier mutex.
*/
md_dst = metadata_dst_alloc_percpu(IP_TUNNEL_OPTS_MAX,
METADATA_IP_TUNNEL,
GFP_KERNEL);
if (!md_dst)
return NULL;
......
......@@ -134,10 +134,12 @@ struct metadata_dst *iptunnel_metadata_reply(struct metadata_dst *md,
struct metadata_dst *res;
struct ip_tunnel_info *dst, *src;
if (!md || md->u.tun_info.mode & IP_TUNNEL_INFO_TX)
if (!md || md->type != METADATA_IP_TUNNEL ||
md->u.tun_info.mode & IP_TUNNEL_INFO_TX)
return NULL;
res = metadata_dst_alloc(0, flags);
res = metadata_dst_alloc(0, METADATA_IP_TUNNEL, flags);
if (!res)
return NULL;
......
......@@ -2202,7 +2202,9 @@ static int validate_and_copy_set_tun(const struct nlattr *attr,
if (start < 0)
return start;
tun_dst = metadata_dst_alloc(key.tun_opts_len, GFP_KERNEL);
tun_dst = metadata_dst_alloc(key.tun_opts_len, METADATA_IP_TUNNEL,
GFP_KERNEL);
if (!tun_dst)
return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment