Commit 26a4d063 authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-next'

Michael Chan says:

====================
bnxt_en: Patches for net-next.

Mainly clean-ups, optimizations, and updating to the latest firmware
interface spec.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f4568828 fbc9a523
This diff is collapsed.
...@@ -11,11 +11,11 @@ ...@@ -11,11 +11,11 @@
#define BNXT_H #define BNXT_H
#define DRV_MODULE_NAME "bnxt_en" #define DRV_MODULE_NAME "bnxt_en"
#define DRV_MODULE_VERSION "0.1.24" #define DRV_MODULE_VERSION "1.0.0"
#define DRV_VER_MAJ 0 #define DRV_VER_MAJ 1
#define DRV_VER_MIN 1 #define DRV_VER_MIN 0
#define DRV_VER_UPD 24 #define DRV_VER_UPD 0
struct tx_bd { struct tx_bd {
__le32 tx_bd_len_flags_type; __le32 tx_bd_len_flags_type;
...@@ -695,6 +695,7 @@ struct bnxt_vf_info { ...@@ -695,6 +695,7 @@ struct bnxt_vf_info {
u16 max_cp_rings; u16 max_cp_rings;
u16 max_tx_rings; u16 max_tx_rings;
u16 max_rx_rings; u16 max_rx_rings;
u16 max_hw_ring_grps;
u16 max_l2_ctxs; u16 max_l2_ctxs;
u16 max_irqs; u16 max_irqs;
u16 max_vnics; u16 max_vnics;
...@@ -722,9 +723,8 @@ struct bnxt_pf_info { ...@@ -722,9 +723,8 @@ struct bnxt_pf_info {
u16 max_rsscos_ctxs; u16 max_rsscos_ctxs;
u16 max_cp_rings; u16 max_cp_rings;
u16 max_tx_rings; /* HW assigned max tx rings for this PF */ u16 max_tx_rings; /* HW assigned max tx rings for this PF */
u16 max_pf_tx_rings; /* runtime max tx rings owned by PF */
u16 max_rx_rings; /* HW assigned max rx rings for this PF */ u16 max_rx_rings; /* HW assigned max rx rings for this PF */
u16 max_pf_rx_rings; /* runtime max rx rings owned by PF */ u16 max_hw_ring_grps;
u16 max_irqs; u16 max_irqs;
u16 max_l2_ctxs; u16 max_l2_ctxs;
u16 max_vnics; u16 max_vnics;
...@@ -1084,6 +1084,7 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); ...@@ -1084,6 +1084,7 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
int _hwrm_send_message(struct bnxt *, void *, u32, int); int _hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message(struct bnxt *, void *, u32, int); int hwrm_send_message(struct bnxt *, void *, u32, int);
int bnxt_hwrm_set_coal(struct bnxt *); int bnxt_hwrm_set_coal(struct bnxt *);
int bnxt_hwrm_func_qcaps(struct bnxt *);
int bnxt_hwrm_set_pause(struct bnxt *); int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool); int bnxt_hwrm_set_link_setting(struct bnxt *, bool);
int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_open_nic(struct bnxt *, bool, bool);
......
...@@ -266,6 +266,8 @@ static int bnxt_set_channels(struct net_device *dev, ...@@ -266,6 +266,8 @@ static int bnxt_set_channels(struct net_device *dev,
bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
bp->num_stat_ctxs = bp->cp_nr_rings; bp->num_stat_ctxs = bp->cp_nr_rings;
/* After changing number of rx channels, update NTUPLE feature. */
netdev_update_features(dev);
if (netif_running(dev)) { if (netif_running(dev)) {
rc = bnxt_open_nic(bp, true, false); rc = bnxt_open_nic(bp, true, false);
if ((!rc) && BNXT_PF(bp)) { if ((!rc) && BNXT_PF(bp)) {
...@@ -818,6 +820,9 @@ static int bnxt_flash_firmware(struct net_device *dev, ...@@ -818,6 +820,9 @@ static int bnxt_flash_firmware(struct net_device *dev,
case BNX_DIR_TYPE_BOOTCODE_2: case BNX_DIR_TYPE_BOOTCODE_2:
code_type = CODE_BOOT; code_type = CODE_BOOT;
break; break;
case BNX_DIR_TYPE_APE_FW:
code_type = CODE_MCTP_PASSTHRU;
break;
default: default:
netdev_err(dev, "Unsupported directory entry type: %u\n", netdev_err(dev, "Unsupported directory entry type: %u\n",
dir_type); dir_type);
......
...@@ -64,7 +64,7 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) ...@@ -64,7 +64,7 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting)
* the spoof check should also include vlan anti-spoofing * the spoof check should also include vlan anti-spoofing
*/ */
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.vf_id = cpu_to_le16(vf->fw_fid); req.fid = cpu_to_le16(vf->fw_fid);
req.flags = cpu_to_le32(func_flags); req.flags = cpu_to_le32(func_flags);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) { if (!rc) {
...@@ -128,7 +128,7 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) ...@@ -128,7 +128,7 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac)
memcpy(vf->mac_addr, mac, ETH_ALEN); memcpy(vf->mac_addr, mac, ETH_ALEN);
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.vf_id = cpu_to_le16(vf->fw_fid); req.fid = cpu_to_le16(vf->fw_fid);
req.flags = cpu_to_le32(vf->func_flags); req.flags = cpu_to_le32(vf->func_flags);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR);
memcpy(req.dflt_mac_addr, mac, ETH_ALEN); memcpy(req.dflt_mac_addr, mac, ETH_ALEN);
...@@ -159,7 +159,7 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos) ...@@ -159,7 +159,7 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos)
return 0; return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.vf_id = cpu_to_le16(vf->fw_fid); req.fid = cpu_to_le16(vf->fw_fid);
req.flags = cpu_to_le32(vf->func_flags); req.flags = cpu_to_le32(vf->func_flags);
req.dflt_vlan = cpu_to_le16(vlan_tag); req.dflt_vlan = cpu_to_le16(vlan_tag);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN);
...@@ -198,7 +198,7 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, ...@@ -198,7 +198,7 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate,
if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate)
return 0; return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.vf_id = cpu_to_le16(vf->fw_fid); req.fid = cpu_to_le16(vf->fw_fid);
req.flags = cpu_to_le32(vf->func_flags); req.flags = cpu_to_le32(vf->func_flags);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW);
req.max_bw = cpu_to_le32(max_tx_rate); req.max_bw = cpu_to_le32(max_tx_rate);
...@@ -363,10 +363,11 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) ...@@ -363,10 +363,11 @@ static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
} }
/* only call by PF to reserve resources for VF */ /* only call by PF to reserve resources for VF */
static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs) static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
{ {
u32 rc = 0, mtu, i; u32 rc = 0, mtu, i;
u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics;
u16 vf_ring_grps;
struct hwrm_func_cfg_input req = {0}; struct hwrm_func_cfg_input req = {0};
struct bnxt_pf_info *pf = &bp->pf; struct bnxt_pf_info *pf = &bp->pf;
...@@ -378,18 +379,18 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs) ...@@ -378,18 +379,18 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
* be removed once new HWRM provides HW ring groups capability in * be removed once new HWRM provides HW ring groups capability in
* hwrm_func_qcap. * hwrm_func_qcap.
*/ */
vf_cp_rings = min_t(u16, bp->pf.max_cp_rings, bp->pf.max_stat_ctxs); vf_cp_rings = min_t(u16, pf->max_cp_rings, pf->max_stat_ctxs);
vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / *num_vfs; vf_cp_rings = (vf_cp_rings - bp->cp_nr_rings) / num_vfs;
/* TODO: restore this logic below once the WA above is removed */ /* TODO: restore this logic below once the WA above is removed */
/* vf_cp_rings = (bp->pf.max_cp_rings - bp->cp_nr_rings) / *num_vfs; */ /* vf_cp_rings = (pf->max_cp_rings - bp->cp_nr_rings) / num_vfs; */
vf_stat_ctx = (bp->pf.max_stat_ctxs - bp->num_stat_ctxs) / *num_vfs; vf_stat_ctx = (pf->max_stat_ctxs - bp->num_stat_ctxs) / num_vfs;
if (bp->flags & BNXT_FLAG_AGG_RINGS) if (bp->flags & BNXT_FLAG_AGG_RINGS)
vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings * 2) / vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings * 2) /
*num_vfs; num_vfs;
else else
vf_rx_rings = (bp->pf.max_rx_rings - bp->rx_nr_rings) / vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
*num_vfs; vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
vf_tx_rings = (bp->pf.max_tx_rings - bp->tx_nr_rings) / *num_vfs; vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
FUNC_CFG_REQ_ENABLES_MRU | FUNC_CFG_REQ_ENABLES_MRU |
...@@ -399,7 +400,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs) ...@@ -399,7 +400,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS |
FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS |
FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS |
FUNC_CFG_REQ_ENABLES_NUM_VNICS); FUNC_CFG_REQ_ENABLES_NUM_VNICS |
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS);
mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; mtu = bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
req.mru = cpu_to_le16(mtu); req.mru = cpu_to_le16(mtu);
...@@ -409,6 +411,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs) ...@@ -409,6 +411,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); req.num_cmpl_rings = cpu_to_le16(vf_cp_rings);
req.num_tx_rings = cpu_to_le16(vf_tx_rings); req.num_tx_rings = cpu_to_le16(vf_tx_rings);
req.num_rx_rings = cpu_to_le16(vf_rx_rings); req.num_rx_rings = cpu_to_le16(vf_rx_rings);
req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req.num_l2_ctxs = cpu_to_le16(4); req.num_l2_ctxs = cpu_to_le16(4);
vf_vnics = 1; vf_vnics = 1;
...@@ -417,22 +420,24 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs) ...@@ -417,22 +420,24 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int *num_vfs)
req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx);
mutex_lock(&bp->hwrm_cmd_lock); mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < *num_vfs; i++) { for (i = 0; i < num_vfs; i++) {
req.vf_id = cpu_to_le16(pf->first_vf_id + i); req.fid = cpu_to_le16(pf->first_vf_id + i);
rc = _hwrm_send_message(bp, &req, sizeof(req), rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT); HWRM_CMD_TIMEOUT);
if (rc) if (rc)
break; break;
bp->pf.active_vfs = i + 1; pf->active_vfs = i + 1;
bp->pf.vf[i].fw_fid = le16_to_cpu(req.vf_id); pf->vf[i].fw_fid = le16_to_cpu(req.fid);
} }
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
if (!rc) { if (!rc) {
bp->pf.max_pf_tx_rings = bp->tx_nr_rings; pf->max_tx_rings -= vf_tx_rings * num_vfs;
if (bp->flags & BNXT_FLAG_AGG_RINGS) pf->max_rx_rings -= vf_rx_rings * num_vfs;
bp->pf.max_pf_rx_rings = bp->rx_nr_rings * 2; pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
else pf->max_cp_rings -= vf_cp_rings * num_vfs;
bp->pf.max_pf_rx_rings = bp->rx_nr_rings; pf->max_rsscos_ctxs -= num_vfs;
pf->max_stat_ctxs -= vf_stat_ctx * num_vfs;
pf->max_vnics -= vf_vnics * num_vfs;
} }
return rc; return rc;
} }
...@@ -492,7 +497,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) ...@@ -492,7 +497,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
goto err_out1; goto err_out1;
/* Reserve resources for VFs */ /* Reserve resources for VFs */
rc = bnxt_hwrm_func_cfg(bp, num_vfs); rc = bnxt_hwrm_func_cfg(bp, *num_vfs);
if (rc) if (rc)
goto err_out2; goto err_out2;
...@@ -536,8 +541,8 @@ void bnxt_sriov_disable(struct bnxt *bp) ...@@ -536,8 +541,8 @@ void bnxt_sriov_disable(struct bnxt *bp)
bnxt_free_vf_resources(bp); bnxt_free_vf_resources(bp);
bp->pf.active_vfs = 0; bp->pf.active_vfs = 0;
bp->pf.max_pf_rx_rings = bp->pf.max_rx_rings; /* Reclaim all resources for the PF. */
bp->pf.max_pf_tx_rings = bp->pf.max_tx_rings; bnxt_hwrm_func_qcaps(bp);
} }
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs) int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
...@@ -595,6 +600,7 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, ...@@ -595,6 +600,7 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
/* Set the new target id */ /* Set the new target id */
req.target_id = cpu_to_le16(vf->fw_fid); req.target_id = cpu_to_le16(vf->fw_fid);
req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
req.encap_resp_len = cpu_to_le16(msg_size); req.encap_resp_len = cpu_to_le16(msg_size);
req.encap_resp_addr = encap_resp_addr; req.encap_resp_addr = encap_resp_addr;
req.encap_resp_cmpl_ring = encap_resp_cpr; req.encap_resp_cmpl_ring = encap_resp_cpr;
...@@ -629,6 +635,7 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, ...@@ -629,6 +635,7 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1);
/* Set the new target id */ /* Set the new target id */
req.target_id = cpu_to_le16(vf->fw_fid); req.target_id = cpu_to_le16(vf->fw_fid);
req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
mutex_lock(&bp->hwrm_cmd_lock); mutex_lock(&bp->hwrm_cmd_lock);
...@@ -660,6 +667,7 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, ...@@ -660,6 +667,7 @@ static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf,
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1);
/* Set the new target id */ /* Set the new target id */
req.target_id = cpu_to_le16(vf->fw_fid); req.target_id = cpu_to_le16(vf->fw_fid);
req.encap_resp_target_id = cpu_to_le16(vf->fw_fid);
memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size);
mutex_lock(&bp->hwrm_cmd_lock); mutex_lock(&bp->hwrm_cmd_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment