Commit bae75b66 authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-updates'

Michael Chan says:

====================
bnxt_en: Updates for net-next.

This series includes updating the firmware interface, adding
methods to get and set VEPA/VEB bridge modes, some minor DCBX and ETS
refinements, and 3 patches from Sathya Perla to implement initial
VF representors for SRIOV switching.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents e42e24c3 c124a62f
......@@ -193,6 +193,7 @@ config SYSTEMPORT
config BNXT
tristate "Broadcom NetXtreme-C/E support"
depends on PCI
depends on MAY_USE_DEVLINK
select FW_LOADER
select LIBCRC32C
---help---
......
obj-$(CONFIG_BNXT) += bnxt_en.o
bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o
bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_vfr.o
......@@ -33,6 +33,7 @@
#include <linux/mii.h>
#include <linux/if.h>
#include <linux/if_vlan.h>
#include <linux/if_bridge.h>
#include <linux/rtc.h>
#include <linux/bpf.h>
#include <net/ip.h>
......@@ -56,6 +57,7 @@
#include "bnxt_ethtool.h"
#include "bnxt_dcb.h"
#include "bnxt_xdp.h"
#include "bnxt_vfr.h"
#define BNXT_TX_TIMEOUT (5 * HZ)
......@@ -243,6 +245,16 @@ const u16 bnxt_lhint_arr[] = {
TX_BD_FLAGS_LHINT_2048_AND_LARGER,
};
static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
{
struct metadata_dst *md_dst = skb_metadata_dst(skb);
if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
return 0;
return md_dst->u.port_info.port_id;
}
static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bnxt *bp = netdev_priv(dev);
......@@ -287,7 +299,7 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_buf->nr_frags = last_frag;
vlan_tag_flags = 0;
cfa_action = 0;
cfa_action = bnxt_xmit_get_cfa_action(skb);
if (skb_vlan_tag_present(skb)) {
vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
skb_vlan_tag_get(skb);
......@@ -322,7 +334,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
tx_push1->tx_bd_hsize_lflags = 0;
tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
tx_push1->tx_bd_cfa_action =
cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
end = pdata + length;
end = PTR_ALIGN(end, 8) - 1;
......@@ -427,7 +440,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
txbd1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
txbd1->tx_bd_cfa_action =
cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
for (i = 0; i < last_frag; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
......@@ -1032,7 +1046,10 @@ static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
bnxt_sched_reset(bp, rxr);
return;
}
/* Store cfa_code in tpa_info to use in tpa_end
* completion processing.
*/
tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
prod_rx_buf->data = tpa_info->data;
prod_rx_buf->data_ptr = tpa_info->data_ptr;
......@@ -1267,6 +1284,17 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
return skb;
}
/* Given the cfa_code of a received packet determine which
* netdev (vf-rep or PF) the packet is destined to.
*/
static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
{
struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
/* if vf-rep dev is NULL, the must belongs to the PF */
return dev ? dev : bp->dev;
}
static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
struct bnxt_napi *bnapi,
u32 *raw_cons,
......@@ -1360,7 +1388,9 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return NULL;
}
}
skb->protocol = eth_type_trans(skb, bp->dev);
skb->protocol =
eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
......@@ -1387,6 +1417,18 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
return skb;
}
static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
struct sk_buff *skb)
{
if (skb->dev != bp->dev) {
/* this packet belongs to a vf-rep */
bnxt_vf_rep_rx(bp, skb);
return;
}
skb_record_rx_queue(skb, bnapi->index);
napi_gro_receive(&bnapi->napi, skb);
}
/* returns the following:
* 1 - 1 packet successfully received
* 0 - successful TPA_START, packet not completed yet
......@@ -1403,7 +1445,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
struct rx_cmp *rxcmp;
struct rx_cmp_ext *rxcmp1;
u32 tmp_raw_cons = *raw_cons;
u16 cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
struct bnxt_sw_rx_bd *rx_buf;
unsigned int len;
u8 *data_ptr, agg_bufs, cmp_type;
......@@ -1445,8 +1487,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
rc = -ENOMEM;
if (likely(skb)) {
skb_record_rx_queue(skb, bnapi->index);
napi_gro_receive(&bnapi->napi, skb);
bnxt_deliver_skb(bp, bnapi, skb);
rc = 1;
}
*event |= BNXT_RX_EVENT;
......@@ -1535,7 +1576,8 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
}
skb->protocol = eth_type_trans(skb, dev);
cfa_code = RX_CMP_CFA_CODE(rxcmp1);
skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
if ((rxcmp1->rx_cmp_flags2 &
cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
......@@ -1560,8 +1602,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
}
}
skb_record_rx_queue(skb, bnapi->index);
napi_gro_receive(&bnapi->napi, skb);
bnxt_deliver_skb(bp, bnapi, skb);
rc = 1;
next_rx:
......@@ -4577,6 +4618,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
{
struct hwrm_func_qcfg_input req = {0};
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
u16 flags;
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
......@@ -4593,15 +4635,15 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
}
#endif
if (BNXT_PF(bp)) {
u16 flags = le16_to_cpu(resp->flags);
if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED))
bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
if (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)
bp->flags |= BNXT_FLAG_MULTI_HOST;
flags = le16_to_cpu(resp->flags);
if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
bp->flags |= BNXT_FLAG_FW_DCBX_AGENT;
}
if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
bp->flags |= BNXT_FLAG_MULTI_HOST;
switch (resp->port_partition_type) {
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
......@@ -4610,6 +4652,13 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
bp->port_partition_type = resp->port_partition_type;
break;
}
if (bp->hwrm_spec_code < 0x10707 ||
resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
bp->br_mode = BRIDGE_MODE_VEB;
else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
bp->br_mode = BRIDGE_MODE_VEPA;
else
bp->br_mode = BRIDGE_MODE_UNDEF;
func_qcfg_exit:
mutex_unlock(&bp->hwrm_cmd_lock);
......@@ -4911,6 +4960,26 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
}
}
static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
{
struct hwrm_func_cfg_input req = {0};
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(0xffff);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
if (br_mode == BRIDGE_MODE_VEB)
req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
else if (br_mode == BRIDGE_MODE_VEPA)
req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
else
return -EINVAL;
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
rc = -EIO;
return rc;
}
static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
{
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
......@@ -5646,7 +5715,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
if (rc)
goto hwrm_phy_qcaps_exit;
if (resp->eee_supported & PORT_PHY_QCAPS_RESP_EEE_SUPPORTED) {
if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
struct ethtool_eee *eee = &bp->eee;
u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
......@@ -5686,13 +5755,15 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
link_info->phy_link_status = resp->link;
link_info->duplex = resp->duplex;
link_info->duplex = resp->duplex_cfg;
if (bp->hwrm_spec_code >= 0x10800)
link_info->duplex = resp->duplex_state;
link_info->pause = resp->pause;
link_info->auto_mode = resp->auto_mode;
link_info->auto_pause_setting = resp->auto_pause;
link_info->lp_pause = resp->link_partner_adv_pause;
link_info->force_pause_setting = resp->force_pause;
link_info->duplex_setting = resp->duplex;
link_info->duplex_setting = resp->duplex_cfg;
if (link_info->phy_link_status == BNXT_LINK_LINK)
link_info->link_speed = le16_to_cpu(resp->link_speed);
else
......@@ -6214,6 +6285,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
/* Poll link status and check for SFP+ module status */
bnxt_get_port_module_status(bp);
/* VF-reps may need to be re-opened after the PF is re-opened */
if (BNXT_PF(bp))
bnxt_vf_reps_open(bp);
return 0;
open_err:
......@@ -6302,6 +6376,10 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
if (rc)
netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
}
/* Close the VF-reps before closing PF */
if (BNXT_PF(bp))
bnxt_vf_reps_close(bp);
#endif
/* Change device state to avoid TX queue wake up's */
bnxt_tx_disable(bp);
......@@ -6813,7 +6891,8 @@ static void bnxt_timer(unsigned long data)
if (atomic_read(&bp->intr_sem) != 0)
goto bnxt_restart_timer;
if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS)) {
if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
bp->stats_coal_ticks) {
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
schedule_work(&bp->sp_task);
}
......@@ -7422,6 +7501,106 @@ static void bnxt_udp_tunnel_del(struct net_device *dev,
schedule_work(&bp->sp_task);
}
static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u32 filter_mask,
int nlflags)
{
struct bnxt *bp = netdev_priv(dev);
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
nlflags, filter_mask, NULL);
}
static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
u16 flags)
{
struct bnxt *bp = netdev_priv(dev);
struct nlattr *attr, *br_spec;
int rem, rc = 0;
if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
return -EOPNOTSUPP;
br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
if (!br_spec)
return -EINVAL;
nla_for_each_nested(attr, br_spec, rem) {
u16 mode;
if (nla_type(attr) != IFLA_BRIDGE_MODE)
continue;
if (nla_len(attr) < sizeof(mode))
return -EINVAL;
mode = nla_get_u16(attr);
if (mode == bp->br_mode)
break;
rc = bnxt_hwrm_set_br_mode(bp, mode);
if (!rc)
bp->br_mode = mode;
break;
}
return rc;
}
static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
size_t len)
{
struct bnxt *bp = netdev_priv(dev);
int rc;
/* The PF and it's VF-reps only support the switchdev framework */
if (!BNXT_PF(bp))
return -EOPNOTSUPP;
/* The switch-id that the pf belongs to is exported by
* the switchdev ndo. This name is just to distinguish from the
* vf-rep ports.
*/
rc = snprintf(buf, len, "pf%d", bp->pf.port_id);
if (rc >= len)
return -EOPNOTSUPP;
return 0;
}
int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
{
if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
return -EOPNOTSUPP;
/* The PF and it's VF-reps only support the switchdev framework */
if (!BNXT_PF(bp))
return -EOPNOTSUPP;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
/* In SRIOV each PF-pool (PF + child VFs) serves as a
* switching domain, the PF's perm mac-addr can be used
* as the unique parent-id
*/
attr->u.ppid.id_len = ETH_ALEN;
ether_addr_copy(attr->u.ppid.id, bp->pf.mac_addr);
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int bnxt_swdev_port_attr_get(struct net_device *dev,
struct switchdev_attr *attr)
{
return bnxt_port_attr_get(netdev_priv(dev), attr);
}
static const struct switchdev_ops bnxt_switchdev_ops = {
.switchdev_port_attr_get = bnxt_swdev_port_attr_get
};
static const struct net_device_ops bnxt_netdev_ops = {
.ndo_open = bnxt_open,
.ndo_start_xmit = bnxt_start_xmit,
......@@ -7453,6 +7632,9 @@ static const struct net_device_ops bnxt_netdev_ops = {
.ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
.ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
.ndo_xdp = bnxt_xdp,
.ndo_bridge_getlink = bnxt_bridge_getlink,
.ndo_bridge_setlink = bnxt_bridge_setlink,
.ndo_get_phys_port_name = bnxt_get_phys_port_name
};
static void bnxt_remove_one(struct pci_dev *pdev)
......@@ -7460,8 +7642,10 @@ static void bnxt_remove_one(struct pci_dev *pdev)
struct net_device *dev = pci_get_drvdata(pdev);
struct bnxt *bp = netdev_priv(dev);
if (BNXT_PF(bp))
if (BNXT_PF(bp)) {
bnxt_sriov_disable(bp);
bnxt_dl_unregister(bp);
}
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
......@@ -7710,6 +7894,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &bnxt_netdev_ops;
dev->watchdog_timeo = BNXT_TX_TIMEOUT;
dev->ethtool_ops = &bnxt_ethtool_ops;
dev->switchdev_ops = &bnxt_switchdev_ops;
pci_set_drvdata(pdev, dev);
rc = bnxt_alloc_hwrm_resources(bp);
......@@ -7764,6 +7949,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_BNXT_SRIOV
init_waitqueue_head(&bp->sriov_cfg_wait);
mutex_init(&bp->sriov_lock);
#endif
bp->gro_func = bnxt_gro_func_5730x;
if (BNXT_CHIP_P4_PLUS(bp))
......@@ -7855,6 +8041,9 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
goto init_err_clr_int;
if (BNXT_PF(bp))
bnxt_dl_register(bp);
netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
board_info[ent->driver_data].name,
(long)pci_resource_start(pdev, 0), dev->dev_addr);
......
......@@ -12,13 +12,16 @@
#define BNXT_H
#define DRV_MODULE_NAME "bnxt_en"
#define DRV_MODULE_VERSION "1.7.0"
#define DRV_MODULE_VERSION "1.8.0"
#define DRV_VER_MAJ 1
#define DRV_VER_MIN 7
#define DRV_VER_MIN 8
#define DRV_VER_UPD 0
#include <linux/interrupt.h>
#include <net/devlink.h>
#include <net/dst_metadata.h>
#include <net/switchdev.h>
struct tx_bd {
__le32 tx_bd_len_flags_type;
......@@ -242,6 +245,10 @@ struct rx_cmp_ext {
((le32_to_cpu((rxcmp1)->rx_cmp_flags2) & \
RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3)
#define RX_CMP_CFA_CODE(rxcmpl1) \
((le32_to_cpu((rxcmpl1)->rx_cmp_cfa_code_errors_v2) & \
RX_CMPL_CFA_CODE_MASK) >> RX_CMPL_CFA_CODE_SFT)
struct rx_agg_cmp {
__le32 rx_agg_cmp_len_flags_type;
#define RX_AGG_CMP_TYPE (0x3f << 0)
......@@ -311,6 +318,10 @@ struct rx_tpa_start_cmp_ext {
__le32 rx_tpa_start_cmp_hdr_info;
};
#define TPA_START_CFA_CODE(rx_tpa_start) \
((le32_to_cpu((rx_tpa_start)->rx_tpa_start_cmp_cfa_code_v2) & \
RX_TPA_START_CMP_CFA_CODE) >> RX_TPA_START_CMPL_CFA_CODE_SHIFT)
struct rx_tpa_end_cmp {
__le32 rx_tpa_end_cmp_len_flags_type;
#define RX_TPA_END_CMP_TYPE (0x3f << 0)
......@@ -618,6 +629,8 @@ struct bnxt_tpa_info {
#define BNXT_TPA_OUTER_L3_OFF(hdr_info) \
((hdr_info) & 0x1ff)
u16 cfa_code; /* cfa_code in TPA start compl */
};
struct bnxt_rx_ring_info {
......@@ -825,8 +838,8 @@ struct bnxt_link_info {
u8 loop_back;
u8 link_up;
u8 duplex;
#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_HALF
#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_FULL
#define BNXT_LINK_DUPLEX_HALF PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF
#define BNXT_LINK_DUPLEX_FULL PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL
u8 pause;
#define BNXT_LINK_PAUSE_TX PORT_PHY_QCFG_RESP_PAUSE_TX
#define BNXT_LINK_PAUSE_RX PORT_PHY_QCFG_RESP_PAUSE_RX
......@@ -928,6 +941,24 @@ struct bnxt_test_info {
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
struct bnxt_vf_rep_stats {
u64 packets;
u64 bytes;
u64 dropped;
};
struct bnxt_vf_rep {
struct bnxt *bp;
struct net_device *dev;
struct metadata_dst *dst;
u16 vf_idx;
u16 tx_cfa_action;
u16 rx_cfa_code;
struct bnxt_vf_rep_stats rx_stats;
struct bnxt_vf_rep_stats tx_stats;
};
struct bnxt {
void __iomem *bar0;
void __iomem *bar1;
......@@ -1027,6 +1058,7 @@ struct bnxt {
#define BNXT_FLAG_MULTI_HOST 0x100000
#define BNXT_FLAG_SHORT_CMD 0x200000
#define BNXT_FLAG_DOUBLE_DB 0x400000
#define BNXT_FLAG_FW_DCBX_AGENT 0x800000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
......@@ -1164,6 +1196,7 @@ struct bnxt {
u8 nge_port_cnt;
__le16 nge_fw_dst_port_id;
u8 port_partition_type;
u16 br_mode;
u16 rx_coal_ticks;
u16 rx_coal_ticks_irq;
......@@ -1206,6 +1239,12 @@ struct bnxt {
wait_queue_head_t sriov_cfg_wait;
bool sriov_cfg;
#define BNXT_SRIOV_CFG_WAIT_TMO msecs_to_jiffies(10000)
/* lock to protect VF-rep creation/cleanup via
* multiple paths such as ->sriov_configure() and
* devlink ->eswitch_mode_set()
*/
struct mutex sriov_lock;
#endif
#define BNXT_NTP_FLTR_MAX_FLTR 4096
......@@ -1232,6 +1271,12 @@ struct bnxt {
struct bnxt_led_info leds[BNXT_MAX_LED];
struct bpf_prog *xdp_prog;
/* devlink interface and vf-rep structs */
struct devlink *dl;
enum devlink_eswitch_mode eswitch_mode;
struct bnxt_vf_rep **vf_reps; /* array of vf-rep ptrs */
u16 *cfa_code_map; /* cfa_code -> vf_idx map */
};
#define BNXT_RX_STATS_OFFSET(counter) \
......@@ -1306,4 +1351,5 @@ int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
int bnxt_setup_mq_tc(struct net_device *dev, u8 tc);
int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
void bnxt_restore_pf_fw_resources(struct bnxt *bp);
int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr);
#endif
......@@ -93,6 +93,12 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
cos2bw.tsa =
QUEUE_COS2BW_QCFG_RESP_QUEUE_ID0_TSA_ASSIGN_ETS;
cos2bw.bw_weight = ets->tc_tx_bw[i];
/* older firmware requires min_bw to be set to the
* same weight value in percent.
*/
cos2bw.min_bw =
cpu_to_le32((ets->tc_tx_bw[i] * 100) |
BW_VALUE_UNIT_PERCENT1_100);
}
memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4);
if (i == 0) {
......@@ -549,13 +555,18 @@ static u8 bnxt_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
struct bnxt *bp = netdev_priv(dev);
/* only support IEEE */
if ((mode & DCB_CAP_DCBX_VER_CEE) || !(mode & DCB_CAP_DCBX_VER_IEEE))
/* All firmware DCBX settings are set in NVRAM */
if (bp->dcbx_cap & DCB_CAP_DCBX_LLD_MANAGED)
return 1;
if (mode & DCB_CAP_DCBX_HOST) {
if (BNXT_VF(bp) || (bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
return 1;
/* only support IEEE */
if ((mode & DCB_CAP_DCBX_VER_CEE) ||
!(mode & DCB_CAP_DCBX_VER_IEEE))
return 1;
}
if (mode == bp->dcbx_cap)
......@@ -584,7 +595,7 @@ void bnxt_dcb_init(struct bnxt *bp)
bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
else
else if (bp->flags & BNXT_FLAG_FW_DCBX_AGENT)
bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
bp->dev->dcbnl_ops = &dcbnl_ops;
}
......
......@@ -26,6 +26,7 @@ struct bnxt_cos2bw_cfg {
u8 queue_id;
__le32 min_bw;
__le32 max_bw;
#define BW_VALUE_UNIT_PERCENT1_100 (0x1UL << 29)
u8 tsa;
u8 pri_lvl;
u8 bw_weight;
......
......@@ -86,9 +86,11 @@ static int bnxt_set_coalesce(struct net_device *dev,
if (bp->stats_coal_ticks != coal->stats_block_coalesce_usecs) {
u32 stats_ticks = coal->stats_block_coalesce_usecs;
stats_ticks = clamp_t(u32, stats_ticks,
BNXT_MIN_STATS_COAL_TICKS,
BNXT_MAX_STATS_COAL_TICKS);
/* Allow 0, which means disable. */
if (stats_ticks)
stats_ticks = clamp_t(u32, stats_ticks,
BNXT_MIN_STATS_COAL_TICKS,
BNXT_MAX_STATS_COAL_TICKS);
stats_ticks = rounddown(stats_ticks, BNXT_MIN_STATS_COAL_TICKS);
bp->stats_coal_ticks = stats_ticks;
update_stats = true;
......@@ -198,19 +200,23 @@ static const struct {
#define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
static int bnxt_get_num_stats(struct bnxt *bp)
{
int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
if (bp->flags & BNXT_FLAG_PORT_STATS)
num_stats += BNXT_NUM_PORT_STATS;
return num_stats;
}
static int bnxt_get_sset_count(struct net_device *dev, int sset)
{
struct bnxt *bp = netdev_priv(dev);
switch (sset) {
case ETH_SS_STATS: {
int num_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
if (bp->flags & BNXT_FLAG_PORT_STATS)
num_stats += BNXT_NUM_PORT_STATS;
return num_stats;
}
case ETH_SS_STATS:
return bnxt_get_num_stats(bp);
case ETH_SS_TEST:
if (!bp->num_tests)
return -EOPNOTSUPP;
......@@ -225,11 +231,8 @@ static void bnxt_get_ethtool_stats(struct net_device *dev,
{
u32 i, j = 0;
struct bnxt *bp = netdev_priv(dev);
u32 buf_size = sizeof(struct ctx_hw_stats) * bp->cp_nr_rings;
u32 stat_fields = sizeof(struct ctx_hw_stats) / 8;
memset(buf, 0, buf_size);
if (!bp->bnapi)
return;
......@@ -835,7 +838,7 @@ static void bnxt_get_drvinfo(struct net_device *dev,
strlcpy(info->fw_version, bp->fw_ver_str,
sizeof(info->fw_version));
strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
info->n_stats = BNXT_NUM_STATS * bp->cp_nr_rings;
info->n_stats = bnxt_get_num_stats(bp);
info->testinfo_len = bp->num_tests;
/* TODO CHIMP_FW: eeprom dump details */
info->eedump_len = 0;
......
......@@ -11,14 +11,14 @@
#ifndef BNXT_HSI_H
#define BNXT_HSI_H
/* HSI and HWRM Specification 1.7.6 */
/* HSI and HWRM Specification 1.8.0 */
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 7
#define HWRM_VERSION_UPDATE 6
#define HWRM_VERSION_MINOR 8
#define HWRM_VERSION_UPDATE 0
#define HWRM_VERSION_RSVD 2 /* non-zero means beta version */
#define HWRM_VERSION_RSVD 0 /* non-zero means beta version */
#define HWRM_VERSION_STR "1.7.6.2"
#define HWRM_VERSION_STR "1.8.0.0"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
......@@ -813,7 +813,7 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED 0x4UL
#define FUNC_QCFG_RESP_FLAGS_STD_TX_RING_MODE_ENABLED 0x8UL
#define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL
#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
#define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL
u8 mac_address[6];
__le16 pci_id;
__le16 alloc_rsscos_ctx;
......@@ -835,9 +835,8 @@ struct hwrm_func_qcfg_output {
u8 port_pf_cnt;
#define FUNC_QCFG_RESP_PORT_PF_CNT_UNAVAIL 0x0UL
__le16 dflt_vnic_id;
u8 host_cnt;
#define FUNC_QCFG_RESP_HOST_CNT_UNAVAIL 0x0UL
u8 unused_0;
u8 unused_1;
__le32 min_bw;
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_MASK 0xfffffffUL
#define FUNC_QCFG_RESP_MIN_BW_BW_VALUE_SFT 0
......@@ -874,12 +873,56 @@ struct hwrm_func_qcfg_output {
#define FUNC_QCFG_RESP_EVB_MODE_NO_EVB 0x0UL
#define FUNC_QCFG_RESP_EVB_MODE_VEB 0x1UL
#define FUNC_QCFG_RESP_EVB_MODE_VEPA 0x2UL
u8 unused_1;
u8 unused_2;
__le16 alloc_vfs;
__le32 alloc_mcast_filters;
__le32 alloc_hw_ring_grps;
__le16 alloc_sp_tx_rings;
u8 unused_3;
u8 valid;
};
/* hwrm_func_vlan_cfg */
/* Input (48 bytes) */
struct hwrm_func_vlan_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le16 fid;
u8 unused_0;
u8 unused_1;
__le32 enables;
#define FUNC_VLAN_CFG_REQ_ENABLES_STAG_VID 0x1UL
#define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_VID 0x2UL
#define FUNC_VLAN_CFG_REQ_ENABLES_STAG_PCP 0x4UL
#define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_PCP 0x8UL
#define FUNC_VLAN_CFG_REQ_ENABLES_STAG_TPID 0x10UL
#define FUNC_VLAN_CFG_REQ_ENABLES_CTAG_TPID 0x20UL
__le16 stag_vid;
u8 stag_pcp;
u8 unused_2;
__be16 stag_tpid;
__le16 ctag_vid;
u8 ctag_pcp;
u8 unused_3;
__be16 ctag_tpid;
__le32 rsvd1;
__le32 rsvd2;
__le32 unused_4;
};
/* Output (16 bytes) */
struct hwrm_func_vlan_cfg_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le32 unused_0;
u8 unused_1;
u8 unused_2;
u8 unused_3;
u8 valid;
};
......@@ -902,6 +945,7 @@ struct hwrm_func_cfg_input {
#define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_ENABLE 0x200UL
#define FUNC_CFG_REQ_FLAGS_STD_TX_RING_MODE_DISABLE 0x400UL
#define FUNC_CFG_REQ_FLAGS_VIRT_MAC_PERSIST 0x800UL
#define FUNC_CFG_REQ_FLAGS_NO_AUTOCLEAR_STATISTIC 0x1000UL
__le32 enables;
#define FUNC_CFG_REQ_ENABLES_MTU 0x1UL
#define FUNC_CFG_REQ_ENABLES_MRU 0x2UL
......@@ -1456,9 +1500,9 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_LINK_SPEED_50GB 0x1f4UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_100GB 0x3e8UL
#define PORT_PHY_QCFG_RESP_LINK_SPEED_10MB 0xffffUL
u8 duplex;
#define PORT_PHY_QCFG_RESP_DUPLEX_HALF 0x0UL
#define PORT_PHY_QCFG_RESP_DUPLEX_FULL 0x1UL
u8 duplex_cfg;
#define PORT_PHY_QCFG_RESP_DUPLEX_CFG_HALF 0x0UL
#define PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL 0x1UL
u8 pause;
#define PORT_PHY_QCFG_RESP_PAUSE_TX 0x1UL
#define PORT_PHY_QCFG_RESP_PAUSE_RX 0x2UL
......@@ -1573,6 +1617,9 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4 0x16UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4 0x17UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE 0x18UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET 0x19UL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX 0x1aUL
#define PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX 0x1bUL
u8 media_type;
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_UNKNOWN 0x0UL
#define PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP 0x1UL
......@@ -1651,14 +1698,16 @@ struct hwrm_port_phy_qcfg_output {
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE74_ENABLED 0x10UL
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_SUPPORTED 0x20UL
#define PORT_PHY_QCFG_RESP_FEC_CFG_FEC_CLAUSE91_ENABLED 0x40UL
u8 duplex_state;
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF 0x0UL
#define PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL 0x1UL
u8 unused_1;
u8 unused_2;
char phy_vendor_name[16];
char phy_vendor_partnumber[16];
__le32 unused_3;
__le32 unused_2;
u8 unused_3;
u8 unused_4;
u8 unused_5;
u8 unused_6;
u8 valid;
};
......@@ -1744,6 +1793,51 @@ struct hwrm_port_mac_cfg_output {
u8 valid;
};
/* hwrm_port_mac_ptp_qcfg */
/* Input (24 bytes) */
struct hwrm_port_mac_ptp_qcfg_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le16 port_id;
__le16 unused_0[3];
};
/* Output (80 bytes) */
struct hwrm_port_mac_ptp_qcfg_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 flags;
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS 0x1UL
#define PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS 0x2UL
u8 unused_0;
__le16 unused_1;
__le32 rx_ts_reg_off_lower;
__le32 rx_ts_reg_off_upper;
__le32 rx_ts_reg_off_seq_id;
__le32 rx_ts_reg_off_src_id_0;
__le32 rx_ts_reg_off_src_id_1;
__le32 rx_ts_reg_off_src_id_2;
__le32 rx_ts_reg_off_domain_id;
__le32 rx_ts_reg_off_fifo;
__le32 rx_ts_reg_off_fifo_adv;
__le32 rx_ts_reg_off_granularity;
__le32 tx_ts_reg_off_lower;
__le32 tx_ts_reg_off_upper;
__le32 tx_ts_reg_off_seq_id;
__le32 tx_ts_reg_off_fifo;
__le32 tx_ts_reg_off_granularity;
__le32 unused_2;
u8 unused_3;
u8 unused_4;
u8 unused_5;
u8 valid;
};
/* hwrm_port_qstats */
/* Input (40 bytes) */
struct hwrm_port_qstats_input {
......@@ -1874,10 +1968,10 @@ struct hwrm_port_phy_qcaps_output {
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 eee_supported;
#define PORT_PHY_QCAPS_RESP_EEE_SUPPORTED 0x1UL
#define PORT_PHY_QCAPS_RESP_RSVD1_MASK 0xfeUL
#define PORT_PHY_QCAPS_RESP_RSVD1_SFT 1
u8 flags;
#define PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED 0x1UL
#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_MASK 0xfeUL
#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1_SFT 1
u8 unused_0;
__le16 supported_speeds_force_mode;
#define PORT_PHY_QCAPS_RESP_SUPPORTED_SPEEDS_FORCE_MODE_100MBHD 0x1UL
......@@ -3152,6 +3246,95 @@ struct hwrm_queue_cos2bw_cfg_output {
u8 valid;
};
/* hwrm_queue_dscp_qcaps */
/* Input (24 bytes) */
struct hwrm_queue_dscp_qcaps_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
u8 port_id;
u8 unused_0[7];
};
/* Output (16 bytes) */
struct hwrm_queue_dscp_qcaps_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
u8 num_dscp_bits;
u8 unused_0;
__le16 max_entries;
u8 unused_1;
u8 unused_2;
u8 unused_3;
u8 valid;
};
/* hwrm_queue_dscp2pri_qcfg */
/* Input (32 bytes) */
struct hwrm_queue_dscp2pri_qcfg_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le64 dest_data_addr;
u8 port_id;
u8 unused_0;
__le16 dest_data_buffer_size;
__le32 unused_1;
};
/* Output (16 bytes) */
struct hwrm_queue_dscp2pri_qcfg_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le16 entry_cnt;
u8 default_pri;
u8 unused_0;
u8 unused_1;
u8 unused_2;
u8 unused_3;
u8 valid;
};
/* hwrm_queue_dscp2pri_cfg */
/* Input (40 bytes) */
struct hwrm_queue_dscp2pri_cfg_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le64 src_data_addr;
__le32 flags;
#define QUEUE_DSCP2PRI_CFG_REQ_FLAGS_USE_HW_DEFAULT_PRI 0x1UL
__le32 enables;
#define QUEUE_DSCP2PRI_CFG_REQ_ENABLES_DEFAULT_PRI 0x1UL
u8 port_id;
u8 default_pri;
__le16 entry_cnt;
__le32 unused_0;
};
/* Output (16 bytes) */
struct hwrm_queue_dscp2pri_cfg_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le32 unused_0;
u8 unused_1;
u8 unused_2;
u8 unused_3;
u8 valid;
};
/* hwrm_vnic_alloc */
/* Input (24 bytes) */
struct hwrm_vnic_alloc_input {
......@@ -4038,7 +4221,7 @@ struct hwrm_cfa_encap_record_alloc_input {
#define CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_IPGRE 0x8UL
u8 unused_0;
__le16 unused_1;
__le32 encap_data[16];
__le32 encap_data[20];
};
/* Output (16 bytes) */
......@@ -4120,8 +4303,8 @@ struct hwrm_cfa_ntuple_filter_alloc_input {
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6 0x6UL
u8 ip_protocol;
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UNKNOWN 0x0UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x6UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x11UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_TCP 0x6UL
#define CFA_NTUPLE_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP 0x11UL
__le16 dst_id;
__le16 mirror_vnic_id;
u8 tunnel_type;
......@@ -4224,6 +4407,58 @@ struct hwrm_cfa_ntuple_filter_cfg_output {
u8 valid;
};
/* hwrm_cfa_vfr_alloc */
/* Input (32 bytes) */
struct hwrm_cfa_vfr_alloc_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le16 vf_id;
__le16 reserved;
__le32 unused_0;
char vfr_name[32];
};
/* Output (16 bytes) */
struct hwrm_cfa_vfr_alloc_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le16 rx_cfa_code;
__le16 tx_cfa_action;
u8 unused_0;
u8 unused_1;
u8 unused_2;
u8 valid;
};
/* hwrm_cfa_vfr_free */
/* Input (24 bytes) */
struct hwrm_cfa_vfr_free_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
char vfr_name[32];
};
/* Output (16 bytes) */
struct hwrm_cfa_vfr_free_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le32 unused_0;
u8 unused_1;
u8 unused_2;
u8 unused_3;
u8 valid;
};
/* hwrm_tunnel_dst_port_query */
/* Input (24 bytes) */
struct hwrm_tunnel_dst_port_query_input {
......@@ -4448,12 +4683,13 @@ struct hwrm_fw_reset_input {
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL
#define FW_RESET_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
u8 selfrst_status;
#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE 0x0UL
#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP 0x1UL
#define FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
__le16 unused_0[3];
u8 host_idx;
u8 unused_0[5];
};
/* Output (16 bytes) */
......@@ -4487,7 +4723,7 @@ struct hwrm_fw_qstatus_input {
#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_MGMT 0x1UL
#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_NETCTRL 0x2UL
#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_ROCE 0x3UL
#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_RSVD 0x4UL
#define FW_QSTATUS_REQ_EMBEDDED_PROC_TYPE_HOST 0x4UL
u8 unused_0[7];
};
......@@ -4572,6 +4808,16 @@ struct hwrm_fw_set_structured_data_output {
u8 valid;
};
/* Command specific Error Codes (8 bytes) */
struct hwrm_fw_set_structured_data_cmd_err {
u8 code;
#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_HDR_CNT 0x1UL
#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_FMT 0x2UL
#define FW_SET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
u8 unused_0[7];
};
/* hwrm_fw_get_structured_data */
/* Input (32 bytes) */
struct hwrm_fw_get_structured_data_input {
......@@ -4611,6 +4857,14 @@ struct hwrm_fw_get_structured_data_output {
u8 valid;
};
/* Command specific Error Codes (8 bytes) */
struct hwrm_fw_get_structured_data_cmd_err {
u8 code;
#define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_UNKNOWN 0x0UL
#define FW_GET_STRUCTURED_DATA_CMD_ERR_CODE_BAD_ID 0x3UL
u8 unused_0[7];
};
/* hwrm_exec_fwd_resp */
/* Input (128 bytes) */
struct hwrm_exec_fwd_resp_input {
......@@ -5411,7 +5665,7 @@ struct cmd_nums {
#define HWRM_PORT_LPBK_CLR_STATS (0x26UL)
#define HWRM_PORT_PHY_QCFG (0x27UL)
#define HWRM_PORT_MAC_QCFG (0x28UL)
#define RESERVED7 (0x29UL)
#define HWRM_PORT_MAC_PTP_QCFG (0x29UL)
#define HWRM_PORT_PHY_QCAPS (0x2aUL)
#define HWRM_PORT_PHY_I2C_WRITE (0x2bUL)
#define HWRM_PORT_PHY_I2C_READ (0x2cUL)
......@@ -5421,14 +5675,17 @@ struct cmd_nums {
#define HWRM_QUEUE_QPORTCFG (0x30UL)
#define HWRM_QUEUE_QCFG (0x31UL)
#define HWRM_QUEUE_CFG (0x32UL)
#define RESERVED2 (0x33UL)
#define RESERVED3 (0x34UL)
#define HWRM_FUNC_VLAN_CFG (0x33UL)
#define HWRM_FUNC_VLAN_QCFG (0x34UL)
#define HWRM_QUEUE_PFCENABLE_QCFG (0x35UL)
#define HWRM_QUEUE_PFCENABLE_CFG (0x36UL)
#define HWRM_QUEUE_PRI2COS_QCFG (0x37UL)
#define HWRM_QUEUE_PRI2COS_CFG (0x38UL)
#define HWRM_QUEUE_COS2BW_QCFG (0x39UL)
#define HWRM_QUEUE_COS2BW_CFG (0x3aUL)
#define HWRM_QUEUE_DSCP_QCAPS (0x3bUL)
#define HWRM_QUEUE_DSCP2PRI_QCFG (0x3cUL)
#define HWRM_QUEUE_DSCP2PRI_CFG (0x3dUL)
#define HWRM_VNIC_ALLOC (0x40UL)
#define HWRM_VNIC_FREE (0x41UL)
#define HWRM_VNIC_CFG (0x42UL)
......@@ -5455,7 +5712,7 @@ struct cmd_nums {
#define HWRM_CFA_L2_FILTER_FREE (0x91UL)
#define HWRM_CFA_L2_FILTER_CFG (0x92UL)
#define HWRM_CFA_L2_SET_RX_MASK (0x93UL)
#define RESERVED4 (0x94UL)
#define HWRM_CFA_VLAN_ANTISPOOF_CFG (0x94UL)
#define HWRM_CFA_TUNNEL_FILTER_ALLOC (0x95UL)
#define HWRM_CFA_TUNNEL_FILTER_FREE (0x96UL)
#define HWRM_CFA_ENCAP_RECORD_ALLOC (0x97UL)
......@@ -5494,6 +5751,8 @@ struct cmd_nums {
#define HWRM_CFA_METER_PROFILE_CFG (0xf7UL)
#define HWRM_CFA_METER_INSTANCE_ALLOC (0xf8UL)
#define HWRM_CFA_METER_INSTANCE_FREE (0xf9UL)
#define HWRM_CFA_VFR_ALLOC (0xfdUL)
#define HWRM_CFA_VFR_FREE (0xfeUL)
#define HWRM_CFA_VF_PAIR_ALLOC (0x100UL)
#define HWRM_CFA_VF_PAIR_FREE (0x101UL)
#define HWRM_CFA_VF_PAIR_INFO (0x102UL)
......@@ -5502,6 +5761,9 @@ struct cmd_nums {
#define HWRM_CFA_FLOW_FLUSH (0x105UL)
#define HWRM_CFA_FLOW_STATS (0x106UL)
#define HWRM_CFA_FLOW_INFO (0x107UL)
#define HWRM_CFA_DECAP_FILTER_ALLOC (0x108UL)
#define HWRM_CFA_DECAP_FILTER_FREE (0x109UL)
#define HWRM_CFA_VLAN_ANTISPOOF_QCFG (0x10aUL)
#define HWRM_SELFTEST_QLIST (0x200UL)
#define HWRM_SELFTEST_EXEC (0x201UL)
#define HWRM_SELFTEST_IRQ (0x202UL)
......@@ -5510,6 +5772,8 @@ struct cmd_nums {
#define HWRM_DBG_WRITE_DIRECT (0xff12UL)
#define HWRM_DBG_WRITE_INDIRECT (0xff13UL)
#define HWRM_DBG_DUMP (0xff14UL)
#define HWRM_DBG_ERASE_NVM (0xff15UL)
#define HWRM_DBG_CFG (0xff16UL)
#define HWRM_NVM_FACTORY_DEFAULTS (0xffeeUL)
#define HWRM_NVM_VALIDATE_OPTION (0xffefUL)
#define HWRM_NVM_FLUSH (0xfff0UL)
......
......@@ -18,6 +18,7 @@
#include "bnxt.h"
#include "bnxt_ulp.h"
#include "bnxt_sriov.h"
#include "bnxt_vfr.h"
#include "bnxt_ethtool.h"
#ifdef CONFIG_BNXT_SRIOV
......@@ -587,6 +588,10 @@ void bnxt_sriov_disable(struct bnxt *bp)
if (!num_vfs)
return;
/* synchronize VF and VF-rep create and destroy */
mutex_lock(&bp->sriov_lock);
bnxt_vf_reps_destroy(bp);
if (pci_vfs_assigned(bp->pdev)) {
bnxt_hwrm_fwd_async_event_cmpl(
bp, NULL, ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD);
......@@ -597,6 +602,7 @@ void bnxt_sriov_disable(struct bnxt *bp)
/* Free the HW resources reserved for various VF's */
bnxt_hwrm_func_vf_resource_free(bp, num_vfs);
}
mutex_unlock(&bp->sriov_lock);
bnxt_free_vf_resources(bp);
......@@ -794,8 +800,10 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
PORT_PHY_QCFG_RESP_LINK_LINK;
phy_qcfg_resp.link_speed = cpu_to_le16(
PORT_PHY_QCFG_RESP_LINK_SPEED_10GB);
phy_qcfg_resp.duplex =
PORT_PHY_QCFG_RESP_DUPLEX_FULL;
phy_qcfg_resp.duplex_cfg =
PORT_PHY_QCFG_RESP_DUPLEX_CFG_FULL;
phy_qcfg_resp.duplex_state =
PORT_PHY_QCFG_RESP_DUPLEX_STATE_FULL;
phy_qcfg_resp.pause =
(PORT_PHY_QCFG_RESP_PAUSE_TX |
PORT_PHY_QCFG_RESP_PAUSE_RX);
......@@ -804,7 +812,8 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf)
/* force link down */
phy_qcfg_resp.link = PORT_PHY_QCFG_RESP_LINK_NO_LINK;
phy_qcfg_resp.link_speed = 0;
phy_qcfg_resp.duplex = PORT_PHY_QCFG_RESP_DUPLEX_HALF;
phy_qcfg_resp.duplex_state =
PORT_PHY_QCFG_RESP_DUPLEX_STATE_HALF;
phy_qcfg_resp.pause = 0;
}
rc = bnxt_hwrm_fwd_resp(bp, vf, &phy_qcfg_resp,
......
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/jhash.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
#include "bnxt_vfr.h"
#define CFA_HANDLE_INVALID 0xffff
#define VF_IDX_INVALID 0xffff
static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx,
u16 *tx_cfa_action, u16 *rx_cfa_code)
{
struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_cfa_vfr_alloc_input req = { 0 };
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_ALLOC, -1, -1);
req.vf_id = cpu_to_le16(vf_idx);
sprintf(req.vfr_name, "vfr%d", vf_idx);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
*tx_cfa_action = le16_to_cpu(resp->tx_cfa_action);
*rx_cfa_code = le16_to_cpu(resp->rx_cfa_code);
netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x",
*tx_cfa_action, *rx_cfa_code);
} else {
netdev_info(bp->dev, "%s error rc=%d", __func__, rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx)
{
struct hwrm_cfa_vfr_free_input req = { 0 };
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_FREE, -1, -1);
sprintf(req.vfr_name, "vfr%d", vf_idx);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
netdev_info(bp->dev, "%s error rc=%d", __func__, rc);
return rc;
}
static int bnxt_vf_rep_open(struct net_device *dev)
{
struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
struct bnxt *bp = vf_rep->bp;
/* Enable link and TX only if the parent PF is open. */
if (netif_running(bp->dev)) {
netif_carrier_on(dev);
netif_tx_start_all_queues(dev);
}
return 0;
}
static int bnxt_vf_rep_close(struct net_device *dev)
{
netif_carrier_off(dev);
netif_tx_disable(dev);
return 0;
}
static netdev_tx_t bnxt_vf_rep_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
int rc, len = skb->len;
skb_dst_drop(skb);
dst_hold((struct dst_entry *)vf_rep->dst);
skb_dst_set(skb, (struct dst_entry *)vf_rep->dst);
skb->dev = vf_rep->dst->u.port_info.lower_dev;
rc = dev_queue_xmit(skb);
if (!rc) {
vf_rep->tx_stats.packets++;
vf_rep->tx_stats.bytes += len;
}
return rc;
}
static void
bnxt_vf_rep_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
stats->rx_packets = vf_rep->rx_stats.packets;
stats->rx_bytes = vf_rep->rx_stats.bytes;
stats->tx_packets = vf_rep->tx_stats.packets;
stats->tx_bytes = vf_rep->tx_stats.bytes;
}
struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code)
{
u16 vf_idx;
if (cfa_code && bp->cfa_code_map && BNXT_PF(bp)) {
vf_idx = bp->cfa_code_map[cfa_code];
if (vf_idx != VF_IDX_INVALID)
return bp->vf_reps[vf_idx]->dev;
}
return NULL;
}
void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb)
{
struct bnxt_vf_rep *vf_rep = netdev_priv(skb->dev);
struct bnxt_vf_rep_stats *rx_stats;
rx_stats = &vf_rep->rx_stats;
vf_rep->rx_stats.bytes += skb->len;
vf_rep->rx_stats.packets++;
netif_receive_skb(skb);
}
static int bnxt_vf_rep_get_phys_port_name(struct net_device *dev, char *buf,
size_t len)
{
struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
int rc;
rc = snprintf(buf, len, "vfr%d", vf_rep->vf_idx);
if (rc >= len)
return -EOPNOTSUPP;
return 0;
}
static void bnxt_vf_rep_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
}
static int bnxt_vf_rep_port_attr_get(struct net_device *dev,
struct switchdev_attr *attr)
{
struct bnxt_vf_rep *vf_rep = netdev_priv(dev);
/* as only PORT_PARENT_ID is supported currently use common code
* between PF and VF-rep for now.
*/
return bnxt_port_attr_get(vf_rep->bp, attr);
}
static const struct switchdev_ops bnxt_vf_rep_switchdev_ops = {
.switchdev_port_attr_get = bnxt_vf_rep_port_attr_get
};
static const struct ethtool_ops bnxt_vf_rep_ethtool_ops = {
.get_drvinfo = bnxt_vf_rep_get_drvinfo
};
static const struct net_device_ops bnxt_vf_rep_netdev_ops = {
.ndo_open = bnxt_vf_rep_open,
.ndo_stop = bnxt_vf_rep_close,
.ndo_start_xmit = bnxt_vf_rep_xmit,
.ndo_get_stats64 = bnxt_vf_rep_get_stats64,
.ndo_get_phys_port_name = bnxt_vf_rep_get_phys_port_name
};
/* Called when the parent PF interface is closed:
* As the mode transition from SWITCHDEV to LEGACY
* happens under the rtnl_lock() this routine is safe
* under the rtnl_lock()
*/
void bnxt_vf_reps_close(struct bnxt *bp)
{
struct bnxt_vf_rep *vf_rep;
u16 num_vfs, i;
if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
return;
num_vfs = pci_num_vf(bp->pdev);
for (i = 0; i < num_vfs; i++) {
vf_rep = bp->vf_reps[i];
if (netif_running(vf_rep->dev))
bnxt_vf_rep_close(vf_rep->dev);
}
}
/* Called when the parent PF interface is opened (re-opened):
* As the mode transition from SWITCHDEV to LEGACY
* happen under the rtnl_lock() this routine is safe
* under the rtnl_lock()
*/
void bnxt_vf_reps_open(struct bnxt *bp)
{
int i;
if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
return;
for (i = 0; i < pci_num_vf(bp->pdev); i++)
bnxt_vf_rep_open(bp->vf_reps[i]->dev);
}
static void __bnxt_vf_reps_destroy(struct bnxt *bp)
{
u16 num_vfs = pci_num_vf(bp->pdev);
struct bnxt_vf_rep *vf_rep;
int i;
for (i = 0; i < num_vfs; i++) {
vf_rep = bp->vf_reps[i];
if (vf_rep) {
dst_release((struct dst_entry *)vf_rep->dst);
if (vf_rep->tx_cfa_action != CFA_HANDLE_INVALID)
hwrm_cfa_vfr_free(bp, vf_rep->vf_idx);
if (vf_rep->dev) {
/* if register_netdev failed, then netdev_ops
* would have been set to NULL
*/
if (vf_rep->dev->netdev_ops)
unregister_netdev(vf_rep->dev);
free_netdev(vf_rep->dev);
}
}
}
kfree(bp->vf_reps);
bp->vf_reps = NULL;
}
void bnxt_vf_reps_destroy(struct bnxt *bp)
{
bool closed = false;
if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
return;
if (!bp->vf_reps)
return;
/* Ensure that parent PF's and VF-reps' RX/TX has been quiesced
* before proceeding with VF-rep cleanup.
*/
rtnl_lock();
if (netif_running(bp->dev)) {
bnxt_close_nic(bp, false, false);
closed = true;
}
/* un-publish cfa_code_map so that RX path can't see it anymore */
kfree(bp->cfa_code_map);
bp->cfa_code_map = NULL;
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
if (closed)
bnxt_open_nic(bp, false, false);
rtnl_unlock();
/* Need to call vf_reps_destroy() outside of rntl_lock
* as unregister_netdev takes rtnl_lock
*/
__bnxt_vf_reps_destroy(bp);
}
/* Use the OUI of the PF's perm addr and report the same mac addr
* for the same VF-rep each time
*/
static void bnxt_vf_rep_eth_addr_gen(u8 *src_mac, u16 vf_idx, u8 *mac)
{
u32 addr;
ether_addr_copy(mac, src_mac);
addr = jhash(src_mac, ETH_ALEN, 0) + vf_idx;
mac[3] = (u8)(addr & 0xFF);
mac[4] = (u8)((addr >> 8) & 0xFF);
mac[5] = (u8)((addr >> 16) & 0xFF);
}
static void bnxt_vf_rep_netdev_init(struct bnxt *bp, struct bnxt_vf_rep *vf_rep,
struct net_device *dev)
{
struct net_device *pf_dev = bp->dev;
dev->netdev_ops = &bnxt_vf_rep_netdev_ops;
dev->ethtool_ops = &bnxt_vf_rep_ethtool_ops;
dev->switchdev_ops = &bnxt_vf_rep_switchdev_ops;
/* Just inherit all the featues of the parent PF as the VF-R
* uses the RX/TX rings of the parent PF
*/
dev->hw_features = pf_dev->hw_features;
dev->gso_partial_features = pf_dev->gso_partial_features;
dev->vlan_features = pf_dev->vlan_features;
dev->hw_enc_features = pf_dev->hw_enc_features;
dev->features |= pf_dev->features;
bnxt_vf_rep_eth_addr_gen(bp->pf.mac_addr, vf_rep->vf_idx,
dev->perm_addr);
ether_addr_copy(dev->dev_addr, dev->perm_addr);
}
static int bnxt_vf_reps_create(struct bnxt *bp)
{
u16 *cfa_code_map = NULL, num_vfs = pci_num_vf(bp->pdev);
struct bnxt_vf_rep *vf_rep;
struct net_device *dev;
int rc, i;
bp->vf_reps = kcalloc(num_vfs, sizeof(vf_rep), GFP_KERNEL);
if (!bp->vf_reps)
return -ENOMEM;
/* storage for cfa_code to vf-idx mapping */
cfa_code_map = kmalloc(sizeof(*bp->cfa_code_map) * MAX_CFA_CODE,
GFP_KERNEL);
if (!cfa_code_map) {
rc = -ENOMEM;
goto err;
}
for (i = 0; i < MAX_CFA_CODE; i++)
cfa_code_map[i] = VF_IDX_INVALID;
for (i = 0; i < num_vfs; i++) {
dev = alloc_etherdev(sizeof(*vf_rep));
if (!dev) {
rc = -ENOMEM;
goto err;
}
vf_rep = netdev_priv(dev);
bp->vf_reps[i] = vf_rep;
vf_rep->dev = dev;
vf_rep->bp = bp;
vf_rep->vf_idx = i;
vf_rep->tx_cfa_action = CFA_HANDLE_INVALID;
/* get cfa handles from FW */
rc = hwrm_cfa_vfr_alloc(bp, vf_rep->vf_idx,
&vf_rep->tx_cfa_action,
&vf_rep->rx_cfa_code);
if (rc) {
rc = -ENOLINK;
goto err;
}
cfa_code_map[vf_rep->rx_cfa_code] = vf_rep->vf_idx;
vf_rep->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
GFP_KERNEL);
if (!vf_rep->dst) {
rc = -ENOMEM;
goto err;
}
/* only cfa_action is needed to mux a packet while TXing */
vf_rep->dst->u.port_info.port_id = vf_rep->tx_cfa_action;
vf_rep->dst->u.port_info.lower_dev = bp->dev;
bnxt_vf_rep_netdev_init(bp, vf_rep, dev);
rc = register_netdev(dev);
if (rc) {
/* no need for unregister_netdev in cleanup */
dev->netdev_ops = NULL;
goto err;
}
}
/* publish cfa_code_map only after all VF-reps have been initialized */
bp->cfa_code_map = cfa_code_map;
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
netif_keep_dst(bp->dev);
return 0;
err:
netdev_info(bp->dev, "%s error=%d", __func__, rc);
kfree(cfa_code_map);
__bnxt_vf_reps_destroy(bp);
return rc;
}
/* Devlink related routines */
static int bnxt_dl_eswitch_mode_get(struct devlink *devlink, u16 *mode)
{
struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
*mode = bp->eswitch_mode;
return 0;
}
static int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode)
{
struct bnxt *bp = bnxt_get_bp_from_dl(devlink);
int rc = 0;
mutex_lock(&bp->sriov_lock);
if (bp->eswitch_mode == mode) {
netdev_info(bp->dev, "already in %s eswitch mode",
mode == DEVLINK_ESWITCH_MODE_LEGACY ?
"legacy" : "switchdev");
rc = -EINVAL;
goto done;
}
switch (mode) {
case DEVLINK_ESWITCH_MODE_LEGACY:
bnxt_vf_reps_destroy(bp);
break;
case DEVLINK_ESWITCH_MODE_SWITCHDEV:
if (pci_num_vf(bp->pdev) == 0) {
netdev_info(bp->dev,
"Enable VFs before setting swtichdev mode");
rc = -EPERM;
goto done;
}
rc = bnxt_vf_reps_create(bp);
break;
default:
rc = -EINVAL;
goto done;
}
done:
mutex_unlock(&bp->sriov_lock);
return rc;
}
static const struct devlink_ops bnxt_dl_ops = {
.eswitch_mode_set = bnxt_dl_eswitch_mode_set,
.eswitch_mode_get = bnxt_dl_eswitch_mode_get
};
int bnxt_dl_register(struct bnxt *bp)
{
struct devlink *dl;
int rc;
if (!pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV))
return 0;
if (bp->hwrm_spec_code < 0x10800) {
netdev_warn(bp->dev, "Firmware does not support SR-IOV E-Switch SWITCHDEV mode.\n");
return -ENOTSUPP;
}
dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl));
if (!dl) {
netdev_warn(bp->dev, "devlink_alloc failed");
return -ENOMEM;
}
bnxt_link_bp_to_dl(dl, bp);
bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
rc = devlink_register(dl, &bp->pdev->dev);
if (rc) {
bnxt_link_bp_to_dl(dl, NULL);
devlink_free(dl);
netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc);
return rc;
}
return 0;
}
void bnxt_dl_unregister(struct bnxt *bp)
{
struct devlink *dl = bp->dl;
if (!dl)
return;
devlink_unregister(dl);
devlink_free(dl);
}
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*/
#ifndef BNXT_VFR_H
#define BNXT_VFR_H
#define MAX_CFA_CODE 65536
/* Struct to hold housekeeping info needed by devlink interface */
struct bnxt_dl {
struct bnxt *bp; /* back ptr to the controlling dev */
};
static inline struct bnxt *bnxt_get_bp_from_dl(struct devlink *dl)
{
return ((struct bnxt_dl *)devlink_priv(dl))->bp;
}
static inline void bnxt_link_bp_to_dl(struct devlink *dl, struct bnxt *bp)
{
struct bnxt_dl *bp_dl = devlink_priv(dl);
bp_dl->bp = bp;
if (bp)
bp->dl = dl;
}
int bnxt_dl_register(struct bnxt *bp);
void bnxt_dl_unregister(struct bnxt *bp);
void bnxt_vf_reps_destroy(struct bnxt *bp);
void bnxt_vf_reps_close(struct bnxt *bp);
void bnxt_vf_reps_open(struct bnxt *bp);
void bnxt_vf_rep_rx(struct bnxt *bp, struct sk_buff *skb);
struct net_device *bnxt_get_vf_rep(struct bnxt *bp, u16 cfa_code);
#endif /* BNXT_VFR_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment