Commit 8f718fa6 authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-fixes'

Michael Chan says:

====================
bnxt_en: bug fixes.

Various bug fixes for the VF/PF link change logic, VF resource checking,
potential firmware response corruption on NVRAM and DCB parameters,
and reading the wrong register for PCIe link speed on the VF.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3efc93c2 5b1e1a9c
......@@ -214,6 +214,8 @@ static const u16 bnxt_async_events_arr[] = {
ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
};
static struct workqueue_struct *bnxt_pf_wq;
static bool bnxt_vf_pciid(enum board_idx idx)
{
return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF);
......@@ -1024,12 +1026,28 @@ static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_napi *bnapi,
return 0;
}
static void bnxt_queue_sp_work(struct bnxt *bp)
{
if (BNXT_PF(bp))
queue_work(bnxt_pf_wq, &bp->sp_task);
else
schedule_work(&bp->sp_task);
}
static void bnxt_cancel_sp_work(struct bnxt *bp)
{
if (BNXT_PF(bp))
flush_workqueue(bnxt_pf_wq);
else
cancel_work_sync(&bp->sp_task);
}
static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
{
if (!rxr->bnapi->in_reset) {
rxr->bnapi->in_reset = true;
set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
schedule_work(&bp->sp_task);
bnxt_queue_sp_work(bp);
}
rxr->rx_next_cons = 0xffff;
}
......@@ -1717,7 +1735,7 @@ static int bnxt_async_event_process(struct bnxt *bp,
default:
goto async_event_process_exit;
}
schedule_work(&bp->sp_task);
bnxt_queue_sp_work(bp);
async_event_process_exit:
bnxt_ulp_async_events(bp, cmpl);
return 0;
......@@ -1751,7 +1769,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
schedule_work(&bp->sp_task);
bnxt_queue_sp_work(bp);
break;
case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
......@@ -3448,6 +3466,12 @@ int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
}
int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
int timeout)
{
return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
}
int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
{
int rc;
......@@ -6327,7 +6351,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
}
if (link_re_init) {
mutex_lock(&bp->link_lock);
rc = bnxt_update_phy_setting(bp);
mutex_unlock(&bp->link_lock);
if (rc)
netdev_warn(bp->dev, "failed to update phy settings\n");
}
......@@ -6647,7 +6673,7 @@ static void bnxt_set_rx_mode(struct net_device *dev)
vnic->rx_mask = mask;
set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
schedule_work(&bp->sp_task);
bnxt_queue_sp_work(bp);
}
}
......@@ -6920,7 +6946,7 @@ static void bnxt_tx_timeout(struct net_device *dev)
netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
schedule_work(&bp->sp_task);
bnxt_queue_sp_work(bp);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
......@@ -6952,7 +6978,7 @@ static void bnxt_timer(unsigned long data)
if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
bp->stats_coal_ticks) {
set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
schedule_work(&bp->sp_task);
bnxt_queue_sp_work(bp);
}
bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
......@@ -7025,30 +7051,28 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
bnxt_hwrm_port_qstats(bp);
/* These functions below will clear BNXT_STATE_IN_SP_TASK. They
* must be the last functions to be called before exiting.
*/
if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
int rc = 0;
int rc;
mutex_lock(&bp->link_lock);
if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
&bp->sp_event))
bnxt_hwrm_phy_qcaps(bp);
bnxt_rtnl_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
rc = bnxt_update_link(bp, true);
bnxt_rtnl_unlock_sp(bp);
rc = bnxt_update_link(bp, true);
mutex_unlock(&bp->link_lock);
if (rc)
netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
rc);
}
if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
bnxt_rtnl_lock_sp(bp);
if (test_bit(BNXT_STATE_OPEN, &bp->state))
bnxt_get_port_module_status(bp);
bnxt_rtnl_unlock_sp(bp);
mutex_lock(&bp->link_lock);
bnxt_get_port_module_status(bp);
mutex_unlock(&bp->link_lock);
}
/* These functions below will clear BNXT_STATE_IN_SP_TASK. They
* must be the last functions to be called before exiting.
*/
if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, false);
......@@ -7433,7 +7457,7 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
spin_unlock_bh(&bp->ntp_fltr_lock);
set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
schedule_work(&bp->sp_task);
bnxt_queue_sp_work(bp);
return new_fltr->sw_id;
......@@ -7516,7 +7540,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
if (bp->vxlan_port_cnt == 1) {
bp->vxlan_port = ti->port;
set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
schedule_work(&bp->sp_task);
bnxt_queue_sp_work(bp);
}
break;
case UDP_TUNNEL_TYPE_GENEVE:
......@@ -7533,7 +7557,7 @@ static void bnxt_udp_tunnel_add(struct net_device *dev,
return;
}
schedule_work(&bp->sp_task);
bnxt_queue_sp_work(bp);
}
static void bnxt_udp_tunnel_del(struct net_device *dev,
......@@ -7572,7 +7596,7 @@ static void bnxt_udp_tunnel_del(struct net_device *dev,
return;
}
schedule_work(&bp->sp_task);
bnxt_queue_sp_work(bp);
}
static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
......@@ -7720,7 +7744,7 @@ static void bnxt_remove_one(struct pci_dev *pdev)
pci_disable_pcie_error_reporting(pdev);
unregister_netdev(dev);
bnxt_shutdown_tc(bp);
cancel_work_sync(&bp->sp_task);
bnxt_cancel_sp_work(bp);
bp->sp_event = 0;
bnxt_clear_int_mode(bp);
......@@ -7748,6 +7772,7 @@ static int bnxt_probe_phy(struct bnxt *bp)
rc);
return rc;
}
mutex_init(&bp->link_lock);
rc = bnxt_update_link(bp, false);
if (rc) {
......@@ -7946,7 +7971,7 @@ static void bnxt_parse_log_pcie_link(struct bnxt *bp)
enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN;
enum pci_bus_speed speed = PCI_SPEED_UNKNOWN;
if (pcie_get_minimum_link(bp->pdev, &speed, &width) ||
if (pcie_get_minimum_link(pci_physfn(bp->pdev), &speed, &width) ||
speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
netdev_info(bp->dev, "Failed to determine PCIe Link Info\n");
else
......@@ -8138,8 +8163,17 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
else
device_set_wakeup_capable(&pdev->dev, false);
if (BNXT_PF(bp))
if (BNXT_PF(bp)) {
if (!bnxt_pf_wq) {
bnxt_pf_wq =
create_singlethread_workqueue("bnxt_pf_wq");
if (!bnxt_pf_wq) {
dev_err(&pdev->dev, "Unable to create workqueue.\n");
goto init_err_pci_clean;
}
}
bnxt_init_tc(bp);
}
rc = register_netdev(dev);
if (rc)
......@@ -8375,4 +8409,17 @@ static struct pci_driver bnxt_pci_driver = {
#endif
};
module_pci_driver(bnxt_pci_driver);
static int __init bnxt_init(void)
{
return pci_register_driver(&bnxt_pci_driver);
}
static void __exit bnxt_exit(void)
{
pci_unregister_driver(&bnxt_pci_driver);
if (bnxt_pf_wq)
destroy_workqueue(bnxt_pf_wq);
}
module_init(bnxt_init);
module_exit(bnxt_exit);
......@@ -1290,6 +1290,10 @@ struct bnxt {
unsigned long *ntp_fltr_bmap;
int ntp_fltr_count;
/* To protect link related settings during link changes and
* ethtool settings changes.
*/
struct mutex link_lock;
struct bnxt_link_info link_info;
struct ethtool_eee eee;
u32 lpi_tmr_lo;
......@@ -1358,6 +1362,7 @@ void bnxt_set_ring_params(struct bnxt *);
int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode);
void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16);
int _hwrm_send_message(struct bnxt *, void *, u32, int);
int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout);
int hwrm_send_message(struct bnxt *, void *, u32, int);
int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
......
......@@ -50,7 +50,9 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1);
req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
u8 *pri2cos = &resp->pri0_cos_queue_id;
int i, j;
......@@ -66,6 +68,7 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets)
}
}
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
......@@ -119,9 +122,13 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
int rc, i;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) {
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
data = &resp->queue_id0 + offsetof(struct bnxt_cos2bw_cfg, queue_id);
for (i = 0; i < bp->max_tc; i++, data += sizeof(cos2bw) - 4) {
......@@ -143,6 +150,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets)
}
}
}
mutex_unlock(&bp->hwrm_cmd_lock);
return 0;
}
......@@ -240,12 +248,17 @@ static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc)
int rc;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc) {
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
pri_mask = le32_to_cpu(resp->flags);
pfc->pfc_en = pri_mask;
mutex_unlock(&bp->hwrm_cmd_lock);
return 0;
}
......
......@@ -1052,6 +1052,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
u32 ethtool_speed;
ethtool_link_ksettings_zero_link_mode(lk_ksettings, supported);
mutex_lock(&bp->link_lock);
bnxt_fw_to_ethtool_support_spds(link_info, lk_ksettings);
ethtool_link_ksettings_zero_link_mode(lk_ksettings, advertising);
......@@ -1099,6 +1100,7 @@ static int bnxt_get_link_ksettings(struct net_device *dev,
base->port = PORT_FIBRE;
}
base->phy_address = link_info->phy_addr;
mutex_unlock(&bp->link_lock);
return 0;
}
......@@ -1190,6 +1192,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
if (!BNXT_SINGLE_PF(bp))
return -EOPNOTSUPP;
mutex_lock(&bp->link_lock);
if (base->autoneg == AUTONEG_ENABLE) {
BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising, lk_ksettings,
advertising);
......@@ -1234,6 +1237,7 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
rc = bnxt_hwrm_set_link_setting(bp, set_pause, false);
set_setting_exit:
mutex_unlock(&bp->link_lock);
return rc;
}
......@@ -1805,7 +1809,8 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
req.dir_ordinal = cpu_to_le16(ordinal);
req.dir_ext = cpu_to_le16(ext);
req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ;
rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc == 0) {
if (index)
*index = le16_to_cpu(output->dir_idx);
......@@ -1814,6 +1819,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal,
if (data_length)
*data_length = le32_to_cpu(output->dir_data_length);
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
......
......@@ -502,6 +502,7 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
int rc = 0, vfs_supported;
int min_rx_rings, min_tx_rings, min_rss_ctxs;
int tx_ok = 0, rx_ok = 0, rss_ok = 0;
int avail_cp, avail_stat;
/* Check if we can enable requested num of vf's. At a mininum
* we require 1 RX 1 TX rings for each VF. In this minimum conf
......@@ -509,6 +510,10 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
*/
vfs_supported = *num_vfs;
avail_cp = bp->pf.max_cp_rings - bp->cp_nr_rings;
avail_stat = bp->pf.max_stat_ctxs - bp->num_stat_ctxs;
avail_cp = min_t(int, avail_cp, avail_stat);
while (vfs_supported) {
min_rx_rings = vfs_supported;
min_tx_rings = vfs_supported;
......@@ -523,10 +528,12 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
min_rx_rings)
rx_ok = 1;
}
if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings)
if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings ||
avail_cp < min_rx_rings)
rx_ok = 0;
if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings &&
avail_cp >= min_tx_rings)
tx_ok = 1;
if (bp->pf.max_rsscos_ctxs - bp->rsscos_nr_ctxs >= min_rss_ctxs)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment