Commit 5e458531 authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-updates'

Michael Chan says:

====================
bnxt_en: updates for net-next.

This patch series for net-next contains cleanups, new features and minor
fixes.  The driver specific busy polling code is removed to use busy
polling support in core networking.  Hardware RFS support is enhanced with
added ipv6 flows support and VF support.  A new scheme to allocate TX
rings from the firmware is implemented for newer chips and firmware.  Plus
some misc. cleanups, minor fixes, and to add the maintainer entry.  Please
review.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ae7cd93e 3f0d80b6
...@@ -2605,6 +2605,12 @@ L: netdev@vger.kernel.org ...@@ -2605,6 +2605,12 @@ L: netdev@vger.kernel.org
S: Supported S: Supported
F: drivers/net/ethernet/broadcom/bnx2x/ F: drivers/net/ethernet/broadcom/bnx2x/
BROADCOM BNXT_EN 50 GIGABIT ETHERNET DRIVER
M: Michael Chan <michael.chan@broadcom.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/bnxt/
BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
M: Florian Fainelli <f.fainelli@gmail.com> M: Florian Fainelli <f.fainelli@gmail.com>
M: Ray Jui <rjui@broadcom.com> M: Ray Jui <rjui@broadcom.com>
......
...@@ -39,9 +39,6 @@ ...@@ -39,9 +39,6 @@
#include <net/checksum.h> #include <net/checksum.h>
#include <net/ip6_checksum.h> #include <net/ip6_checksum.h>
#include <net/udp_tunnel.h> #include <net/udp_tunnel.h>
#ifdef CONFIG_NET_RX_BUSY_POLL
#include <net/busy_poll.h>
#endif
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/prefetch.h> #include <linux/prefetch.h>
#include <linux/cache.h> #include <linux/cache.h>
...@@ -1130,7 +1127,6 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info, ...@@ -1130,7 +1127,6 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NULL; return NULL;
} }
tcp_gro_complete(skb);
if (nw_off) { /* tunnel */ if (nw_off) { /* tunnel */
struct udphdr *uh = NULL; struct udphdr *uh = NULL;
...@@ -1180,6 +1176,8 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp, ...@@ -1180,6 +1176,8 @@ static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
RX_TPA_END_CMP_PAYLOAD_OFFSET) >> RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT; RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb); skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
if (likely(skb))
tcp_gro_complete(skb);
#endif #endif
return skb; return skb;
} }
...@@ -1356,11 +1354,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, ...@@ -1356,11 +1354,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
rc = -ENOMEM; rc = -ENOMEM;
if (likely(skb)) { if (likely(skb)) {
skb_record_rx_queue(skb, bnapi->index); skb_record_rx_queue(skb, bnapi->index);
skb_mark_napi_id(skb, &bnapi->napi); napi_gro_receive(&bnapi->napi, skb);
if (bnxt_busy_polling(bnapi))
netif_receive_skb(skb);
else
napi_gro_receive(&bnapi->napi, skb);
rc = 1; rc = 1;
} }
goto next_rx_no_prod; goto next_rx_no_prod;
...@@ -1460,11 +1454,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons, ...@@ -1460,11 +1454,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_napi *bnapi, u32 *raw_cons,
} }
skb_record_rx_queue(skb, bnapi->index); skb_record_rx_queue(skb, bnapi->index);
skb_mark_napi_id(skb, &bnapi->napi); napi_gro_receive(&bnapi->napi, skb);
if (bnxt_busy_polling(bnapi))
netif_receive_skb(skb);
else
napi_gro_receive(&bnapi->napi, skb);
rc = 1; rc = 1;
next_rx: next_rx:
...@@ -1782,9 +1772,6 @@ static int bnxt_poll(struct napi_struct *napi, int budget) ...@@ -1782,9 +1772,6 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int work_done = 0; int work_done = 0;
if (!bnxt_lock_napi(bnapi))
return budget;
while (1) { while (1) {
work_done += bnxt_poll_work(bp, bnapi, budget - work_done); work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
...@@ -1792,42 +1779,16 @@ static int bnxt_poll(struct napi_struct *napi, int budget) ...@@ -1792,42 +1779,16 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
break; break;
if (!bnxt_has_work(bp, cpr)) { if (!bnxt_has_work(bp, cpr)) {
napi_complete(napi); if (napi_complete_done(napi, work_done))
BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons); BNXT_CP_DB_REARM(cpr->cp_doorbell,
cpr->cp_raw_cons);
break; break;
} }
} }
mmiowb(); mmiowb();
bnxt_unlock_napi(bnapi);
return work_done; return work_done;
} }
#ifdef CONFIG_NET_RX_BUSY_POLL
static int bnxt_busy_poll(struct napi_struct *napi)
{
struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
struct bnxt *bp = bnapi->bp;
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
int rx_work, budget = 4;
if (atomic_read(&bp->intr_sem) != 0)
return LL_FLUSH_FAILED;
if (!bp->link_info.link_up)
return LL_FLUSH_FAILED;
if (!bnxt_lock_poll(bnapi))
return LL_FLUSH_BUSY;
rx_work = bnxt_poll_work(bp, bnapi, budget);
BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
bnxt_unlock_poll(bnapi);
return rx_work;
}
#endif
static void bnxt_free_tx_skbs(struct bnxt *bp) static void bnxt_free_tx_skbs(struct bnxt *bp)
{ {
int i, max_idx; int i, max_idx;
...@@ -2535,7 +2496,7 @@ void bnxt_set_ring_params(struct bnxt *bp) ...@@ -2535,7 +2496,7 @@ void bnxt_set_ring_params(struct bnxt *bp)
agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE); agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
bp->flags &= ~BNXT_FLAG_JUMBO; bp->flags &= ~BNXT_FLAG_JUMBO;
if (rx_space > PAGE_SIZE) { if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
u32 jumbo_factor; u32 jumbo_factor;
bp->flags |= BNXT_FLAG_JUMBO; bp->flags |= BNXT_FLAG_JUMBO;
...@@ -2669,6 +2630,10 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp) ...@@ -2669,6 +2630,10 @@ static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
goto out; goto out;
} }
if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
!(vnic->flags & BNXT_VNIC_RSS_FLAG))
continue;
/* Allocate rss table and hash key */ /* Allocate rss table and hash key */
vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
&vnic->rss_table_dma_addr, &vnic->rss_table_dma_addr,
...@@ -2993,6 +2958,45 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) ...@@ -2993,6 +2958,45 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
return rc; return rc;
} }
static void bnxt_disable_int(struct bnxt *bp)
{
int i;
if (!bp->bnapi)
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
}
}
static void bnxt_disable_int_sync(struct bnxt *bp)
{
int i;
atomic_inc(&bp->intr_sem);
bnxt_disable_int(bp);
for (i = 0; i < bp->cp_nr_rings; i++)
synchronize_irq(bp->irq_tbl[i].vector);
}
static void bnxt_enable_int(struct bnxt *bp)
{
int i;
atomic_set(&bp->intr_sem, 0);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
}
}
void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
u16 cmpl_ring, u16 target_id) u16 cmpl_ring, u16 target_id)
{ {
...@@ -3312,10 +3316,26 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, ...@@ -3312,10 +3316,26 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
req.ip_protocol = keys->basic.ip_proto; req.ip_protocol = keys->basic.ip_proto;
req.src_ipaddr[0] = keys->addrs.v4addrs.src; if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); int i;
req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); req.ethertype = htons(ETH_P_IPV6);
req.ip_addr_type =
CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
*(struct in6_addr *)&req.src_ipaddr[0] =
keys->addrs.v6addrs.src;
*(struct in6_addr *)&req.dst_ipaddr[0] =
keys->addrs.v6addrs.dst;
for (i = 0; i < 4; i++) {
req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
}
} else {
req.src_ipaddr[0] = keys->addrs.v4addrs.src;
req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
}
req.src_port = keys->ports.src; req.src_port = keys->ports.src;
req.src_port_mask = cpu_to_be16(0xffff); req.src_port_mask = cpu_to_be16(0xffff);
...@@ -3562,6 +3582,12 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) ...@@ -3562,6 +3582,12 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU); VNIC_CFG_REQ_ENABLES_MRU);
} else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
req.rss_rule =
cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
VNIC_CFG_REQ_ENABLES_MRU);
req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
} else { } else {
req.rss_rule = cpu_to_le16(0xffff); req.rss_rule = cpu_to_le16(0xffff);
} }
...@@ -3665,6 +3691,27 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, ...@@ -3665,6 +3691,27 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
return rc; return rc;
} }
static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
{
struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_vnic_qcaps_input req = {0};
int rc;
if (bp->hwrm_spec_code < 0x10600)
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
mutex_lock(&bp->hwrm_cmd_lock);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc) {
if (resp->flags &
cpu_to_le32(VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
}
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
{ {
u16 i; u16 i;
...@@ -3811,6 +3858,30 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, ...@@ -3811,6 +3858,30 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
return rc; return rc;
} }
static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
{
int rc;
if (BNXT_PF(bp)) {
struct hwrm_func_cfg_input req = {0};
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(0xffff);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
req.async_event_cr = cpu_to_le16(idx);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
} else {
struct hwrm_func_vf_cfg_input req = {0};
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
req.enables =
cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
req.async_event_cr = cpu_to_le16(idx);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
return rc;
}
static int bnxt_hwrm_ring_alloc(struct bnxt *bp) static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
{ {
int i, rc = 0; int i, rc = 0;
...@@ -3827,6 +3898,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) ...@@ -3827,6 +3898,12 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
goto err_out; goto err_out;
BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons); BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id; bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
if (!i) {
rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
if (rc)
netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
}
} }
for (i = 0; i < bp->tx_nr_rings; i++) { for (i = 0; i < bp->tx_nr_rings; i++) {
...@@ -3977,6 +4054,12 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) ...@@ -3977,6 +4054,12 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
} }
} }
/* The completion rings are about to be freed. After that the
* IRQ doorbell will not work anymore. So we need to disable
* IRQ here.
*/
bnxt_disable_int_sync(bp);
for (i = 0; i < bp->cp_nr_rings; i++) { for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
...@@ -3992,6 +4075,50 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) ...@@ -3992,6 +4075,50 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
} }
} }
/* Caller must hold bp->hwrm_cmd_lock */
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
{
struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
struct hwrm_func_qcfg_input req = {0};
int rc;
if (bp->hwrm_spec_code < 0x10601)
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
req.fid = cpu_to_le16(fid);
rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (!rc)
*tx_rings = le16_to_cpu(resp->alloc_tx_rings);
return rc;
}
int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings)
{
struct hwrm_func_cfg_input req = {0};
int rc;
if (bp->hwrm_spec_code < 0x10601)
return 0;
if (BNXT_VF(bp))
return 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
req.fid = cpu_to_le16(0xffff);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS);
req.num_tx_rings = cpu_to_le16(*tx_rings);
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
return rc;
mutex_lock(&bp->hwrm_cmd_lock);
rc = __bnxt_hwrm_get_tx_rings(bp, 0xffff, tx_rings);
mutex_unlock(&bp->hwrm_cmd_lock);
return rc;
}
static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs, static void bnxt_hwrm_set_coal_params(struct bnxt *bp, u32 max_bufs,
u32 buf_tmrs, u16 flags, u32 buf_tmrs, u16 flags,
struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req) struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
...@@ -4463,8 +4590,12 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, ...@@ -4463,8 +4590,12 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
{ {
struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
int rc; int rc;
if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
goto skip_rss_ctx;
/* allocate context for vnic */ /* allocate context for vnic */
rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0); rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
if (rc) { if (rc) {
...@@ -4484,6 +4615,7 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) ...@@ -4484,6 +4615,7 @@ static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
bp->rsscos_nr_ctxs++; bp->rsscos_nr_ctxs++;
} }
skip_rss_ctx:
/* configure default vnic, ring grp */ /* configure default vnic, ring grp */
rc = bnxt_hwrm_vnic_cfg(bp, vnic_id); rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
if (rc) { if (rc) {
...@@ -4518,13 +4650,17 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp) ...@@ -4518,13 +4650,17 @@ static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
int i, rc = 0; int i, rc = 0;
for (i = 0; i < bp->rx_nr_rings; i++) { for (i = 0; i < bp->rx_nr_rings; i++) {
struct bnxt_vnic_info *vnic;
u16 vnic_id = i + 1; u16 vnic_id = i + 1;
u16 ring_id = i; u16 ring_id = i;
if (vnic_id >= bp->nr_vnics) if (vnic_id >= bp->nr_vnics)
break; break;
bp->vnic_info[vnic_id].flags |= BNXT_VNIC_RFS_FLAG; vnic = &bp->vnic_info[vnic_id];
vnic->flags |= BNXT_VNIC_RFS_FLAG;
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1); rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
if (rc) { if (rc) {
netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n", netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
...@@ -4698,34 +4834,6 @@ static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init) ...@@ -4698,34 +4834,6 @@ static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
return bnxt_init_chip(bp, irq_re_init); return bnxt_init_chip(bp, irq_re_init);
} }
static void bnxt_disable_int(struct bnxt *bp)
{
int i;
if (!bp->bnapi)
return;
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
BNXT_CP_DB(cpr->cp_doorbell, cpr->cp_raw_cons);
}
}
static void bnxt_enable_int(struct bnxt *bp)
{
int i;
atomic_set(&bp->intr_sem, 0);
for (i = 0; i < bp->cp_nr_rings; i++) {
struct bnxt_napi *bnapi = bp->bnapi[i];
struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
}
}
static int bnxt_set_real_num_queues(struct bnxt *bp) static int bnxt_set_real_num_queues(struct bnxt *bp)
{ {
int rc; int rc;
...@@ -4836,6 +4944,24 @@ static int bnxt_setup_int_mode(struct bnxt *bp) ...@@ -4836,6 +4944,24 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
return rc; return rc;
} }
static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
{
#if defined(CONFIG_BNXT_SRIOV)
if (BNXT_VF(bp))
return bp->vf.max_rsscos_ctxs;
#endif
return bp->pf.max_rsscos_ctxs;
}
static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
{
#if defined(CONFIG_BNXT_SRIOV)
if (BNXT_VF(bp))
return bp->vf.max_vnics;
#endif
return bp->pf.max_vnics;
}
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
{ {
#if defined(CONFIG_BNXT_SRIOV) #if defined(CONFIG_BNXT_SRIOV)
...@@ -5094,10 +5220,8 @@ static void bnxt_disable_napi(struct bnxt *bp) ...@@ -5094,10 +5220,8 @@ static void bnxt_disable_napi(struct bnxt *bp)
if (!bp->bnapi) if (!bp->bnapi)
return; return;
for (i = 0; i < bp->cp_nr_rings; i++) { for (i = 0; i < bp->cp_nr_rings; i++)
napi_disable(&bp->bnapi[i]->napi); napi_disable(&bp->bnapi[i]->napi);
bnxt_disable_poll(bp->bnapi[i]);
}
} }
static void bnxt_enable_napi(struct bnxt *bp) static void bnxt_enable_napi(struct bnxt *bp)
...@@ -5106,7 +5230,6 @@ static void bnxt_enable_napi(struct bnxt *bp) ...@@ -5106,7 +5230,6 @@ static void bnxt_enable_napi(struct bnxt *bp)
for (i = 0; i < bp->cp_nr_rings; i++) { for (i = 0; i < bp->cp_nr_rings; i++) {
bp->bnapi[i]->in_reset = false; bp->bnapi[i]->in_reset = false;
bnxt_enable_poll(bp->bnapi[i]);
napi_enable(&bp->bnapi[i]->napi); napi_enable(&bp->bnapi[i]->napi);
} }
} }
...@@ -5389,7 +5512,7 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, ...@@ -5389,7 +5512,7 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp,
{ {
u8 autoneg = bp->link_info.autoneg; u8 autoneg = bp->link_info.autoneg;
u16 fw_link_speed = bp->link_info.req_link_speed; u16 fw_link_speed = bp->link_info.req_link_speed;
u32 advertising = bp->link_info.advertising; u16 advertising = bp->link_info.advertising;
if (autoneg & BNXT_AUTONEG_SPEED) { if (autoneg & BNXT_AUTONEG_SPEED) {
req->auto_mode |= req->auto_mode |=
...@@ -5683,19 +5806,6 @@ static int bnxt_open(struct net_device *dev) ...@@ -5683,19 +5806,6 @@ static int bnxt_open(struct net_device *dev)
return __bnxt_open_nic(bp, true, true); return __bnxt_open_nic(bp, true, true);
} }
static void bnxt_disable_int_sync(struct bnxt *bp)
{
int i;
atomic_inc(&bp->intr_sem);
if (!netif_running(bp->dev))
return;
bnxt_disable_int(bp);
for (i = 0; i < bp->cp_nr_rings; i++)
synchronize_irq(bp->irq_tbl[i].vector);
}
int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
{ {
int rc = 0; int rc = 0;
...@@ -5717,13 +5827,12 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) ...@@ -5717,13 +5827,12 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state)) while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
msleep(20); msleep(20);
/* Flush rings before disabling interrupts */ /* Flush rings and and disable interrupts */
bnxt_shutdown_nic(bp, irq_re_init); bnxt_shutdown_nic(bp, irq_re_init);
/* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */ /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
bnxt_disable_napi(bp); bnxt_disable_napi(bp);
bnxt_disable_int_sync(bp);
del_timer_sync(&bp->timer); del_timer_sync(&bp->timer);
bnxt_free_skbs(bp); bnxt_free_skbs(bp);
...@@ -5980,20 +6089,36 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp) ...@@ -5980,20 +6089,36 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp)
return rc; return rc;
} }
/* If the chip and firmware supports RFS */
static bool bnxt_rfs_supported(struct bnxt *bp)
{
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
return true;
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
return true;
return false;
}
/* If runtime conditions support RFS */
static bool bnxt_rfs_capable(struct bnxt *bp) static bool bnxt_rfs_capable(struct bnxt *bp)
{ {
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
struct bnxt_pf_info *pf = &bp->pf; int vnics, max_vnics, max_rss_ctxs;
int vnics;
if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP)) if (BNXT_VF(bp) || !(bp->flags & BNXT_FLAG_MSIX_CAP))
return false; return false;
vnics = 1 + bp->rx_nr_rings; vnics = 1 + bp->rx_nr_rings;
if (vnics > pf->max_rsscos_ctxs || vnics > pf->max_vnics) { max_vnics = bnxt_get_max_func_vnics(bp);
max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
/* RSS contexts not a limiting factor */
if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
max_rss_ctxs = max_vnics;
if (vnics > max_vnics || vnics > max_rss_ctxs) {
netdev_warn(bp->dev, netdev_warn(bp->dev,
"Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n", "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
min(pf->max_rsscos_ctxs - 1, pf->max_vnics - 1)); min(max_rss_ctxs - 1, max_vnics - 1));
return false; return false;
} }
...@@ -6049,6 +6174,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features) ...@@ -6049,6 +6174,9 @@ static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
if (features & NETIF_F_LRO) if (features & NETIF_F_LRO)
flags |= BNXT_FLAG_LRO; flags |= BNXT_FLAG_LRO;
if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
flags &= ~BNXT_FLAG_TPA;
if (features & NETIF_F_HW_VLAN_CTAG_RX) if (features & NETIF_F_HW_VLAN_CTAG_RX)
flags |= BNXT_FLAG_STRIP_VLAN; flags |= BNXT_FLAG_STRIP_VLAN;
...@@ -6458,10 +6586,16 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) ...@@ -6458,10 +6586,16 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
sh = true; sh = true;
if (tc) { if (tc) {
int max_rx_rings, max_tx_rings, rc; int max_rx_rings, max_tx_rings, req_tx_rings, rsv_tx_rings, rc;
req_tx_rings = bp->tx_nr_rings_per_tc * tc;
rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh); rc = bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
if (rc || bp->tx_nr_rings_per_tc * tc > max_tx_rings) if (rc || req_tx_rings > max_tx_rings)
return -ENOMEM;
rsv_tx_rings = req_tx_rings;
if (bnxt_hwrm_reserve_tx_rings(bp, &rsv_tx_rings) ||
rsv_tx_rings < req_tx_rings)
return -ENOMEM; return -ENOMEM;
} }
...@@ -6553,12 +6687,18 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, ...@@ -6553,12 +6687,18 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
goto err_free; goto err_free;
} }
if ((fkeys->basic.n_proto != htons(ETH_P_IP)) || if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
((fkeys->basic.ip_proto != IPPROTO_TCP) && ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
(fkeys->basic.ip_proto != IPPROTO_UDP))) { (fkeys->basic.ip_proto != IPPROTO_UDP))) {
rc = -EPROTONOSUPPORT; rc = -EPROTONOSUPPORT;
goto err_free; goto err_free;
} }
if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
bp->hwrm_spec_code < 0x10601) {
rc = -EPROTONOSUPPORT;
goto err_free;
}
memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN); memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN); memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
...@@ -6765,9 +6905,6 @@ static const struct net_device_ops bnxt_netdev_ops = { ...@@ -6765,9 +6905,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
#endif #endif
.ndo_udp_tunnel_add = bnxt_udp_tunnel_add, .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
.ndo_udp_tunnel_del = bnxt_udp_tunnel_del, .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = bnxt_busy_poll,
#endif
}; };
static void bnxt_remove_one(struct pci_dev *pdev) static void bnxt_remove_one(struct pci_dev *pdev)
...@@ -6906,8 +7043,17 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx, ...@@ -6906,8 +7043,17 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
int rc; int rc;
rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared); rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
if (rc) if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
return rc; /* Not enough rings, try disabling agg rings. */
bp->flags &= ~BNXT_FLAG_AGG_RINGS;
rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
if (rc)
return rc;
bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
bp->dev->hw_features &= ~NETIF_F_LRO;
bp->dev->features &= ~NETIF_F_LRO;
bnxt_set_ring_params(bp);
}
if (bp->flags & BNXT_FLAG_ROCE_CAP) { if (bp->flags & BNXT_FLAG_ROCE_CAP) {
int max_cp, max_stat, max_irq; int max_cp, max_stat, max_irq;
...@@ -6946,6 +7092,11 @@ static int bnxt_set_dflt_rings(struct bnxt *bp) ...@@ -6946,6 +7092,11 @@ static int bnxt_set_dflt_rings(struct bnxt *bp)
return rc; return rc;
bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings); bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings); bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
rc = bnxt_hwrm_reserve_tx_rings(bp, &bp->tx_nr_rings_per_tc);
if (rc)
netdev_warn(bp->dev, "Unable to reserve tx rings\n");
bp->tx_nr_rings = bp->tx_nr_rings_per_tc; bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings; bp->tx_nr_rings + bp->rx_nr_rings;
...@@ -7097,7 +7248,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -7097,7 +7248,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
bnxt_set_tpa_flags(bp); bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp); bnxt_set_ring_params(bp);
bnxt_set_max_func_irqs(bp, max_irqs); bnxt_set_max_func_irqs(bp, max_irqs);
bnxt_set_dflt_rings(bp); rc = bnxt_set_dflt_rings(bp);
if (rc) {
netdev_err(bp->dev, "Not enough rings available.\n");
rc = -ENOMEM;
goto init_err;
}
/* Default RSS hash cfg. */ /* Default RSS hash cfg. */
bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 | bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
...@@ -7112,7 +7268,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -7112,7 +7268,8 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6; VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
} }
if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp)) { bnxt_hwrm_vnic_qcaps(bp);
if (bnxt_rfs_supported(bp)) {
dev->hw_features |= NETIF_F_NTUPLE; dev->hw_features |= NETIF_F_NTUPLE;
if (bnxt_rfs_capable(bp)) { if (bnxt_rfs_capable(bp)) {
bp->flags |= BNXT_FLAG_RFS; bp->flags |= BNXT_FLAG_RFS;
......
...@@ -654,21 +654,9 @@ struct bnxt_napi { ...@@ -654,21 +654,9 @@ struct bnxt_napi {
struct bnxt_rx_ring_info *rx_ring; struct bnxt_rx_ring_info *rx_ring;
struct bnxt_tx_ring_info *tx_ring; struct bnxt_tx_ring_info *tx_ring;
#ifdef CONFIG_NET_RX_BUSY_POLL
atomic_t poll_state;
#endif
bool in_reset; bool in_reset;
}; };
#ifdef CONFIG_NET_RX_BUSY_POLL
enum bnxt_poll_state_t {
BNXT_STATE_IDLE = 0,
BNXT_STATE_NAPI,
BNXT_STATE_POLL,
BNXT_STATE_DISABLE,
};
#endif
struct bnxt_irq { struct bnxt_irq {
irq_handler_t handler; irq_handler_t handler;
unsigned int vector; unsigned int vector;
...@@ -720,6 +708,7 @@ struct bnxt_vnic_info { ...@@ -720,6 +708,7 @@ struct bnxt_vnic_info {
#define BNXT_VNIC_RFS_FLAG 2 #define BNXT_VNIC_RFS_FLAG 2
#define BNXT_VNIC_MCAST_FLAG 4 #define BNXT_VNIC_MCAST_FLAG 4
#define BNXT_VNIC_UCAST_FLAG 8 #define BNXT_VNIC_UCAST_FLAG 8
#define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10
}; };
#if defined(CONFIG_BNXT_SRIOV) #if defined(CONFIG_BNXT_SRIOV)
...@@ -840,7 +829,7 @@ struct bnxt_link_info { ...@@ -840,7 +829,7 @@ struct bnxt_link_info {
#define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB #define BNXT_LINK_SPEED_40GB PORT_PHY_QCFG_RESP_LINK_SPEED_40GB
#define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB #define BNXT_LINK_SPEED_50GB PORT_PHY_QCFG_RESP_LINK_SPEED_50GB
u16 support_speeds; u16 support_speeds;
u16 auto_link_speeds; u16 auto_link_speeds; /* fw adv setting */
#define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB #define BNXT_LINK_SPEED_MSK_100MB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_100MB
#define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB #define BNXT_LINK_SPEED_MSK_1GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_1GB
#define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB #define BNXT_LINK_SPEED_MSK_2GB PORT_PHY_QCFG_RESP_SUPPORT_SPEEDS_2GB
...@@ -863,7 +852,7 @@ struct bnxt_link_info { ...@@ -863,7 +852,7 @@ struct bnxt_link_info {
u8 req_duplex; u8 req_duplex;
u8 req_flow_ctrl; u8 req_flow_ctrl;
u16 req_link_speed; u16 req_link_speed;
u32 advertising; u16 advertising; /* user adv setting */
bool force_link_chng; bool force_link_chng;
/* a copy of phy_qcfg output used to report link /* a copy of phy_qcfg output used to report link
...@@ -956,10 +945,12 @@ struct bnxt { ...@@ -956,10 +945,12 @@ struct bnxt {
#define BNXT_FLAG_PORT_STATS 0x400 #define BNXT_FLAG_PORT_STATS 0x400
#define BNXT_FLAG_UDP_RSS_CAP 0x800 #define BNXT_FLAG_UDP_RSS_CAP 0x800
#define BNXT_FLAG_EEE_CAP 0x1000 #define BNXT_FLAG_EEE_CAP 0x1000
#define BNXT_FLAG_NEW_RSS_CAP 0x2000
#define BNXT_FLAG_ROCEV1_CAP 0x8000 #define BNXT_FLAG_ROCEV1_CAP 0x8000
#define BNXT_FLAG_ROCEV2_CAP 0x10000 #define BNXT_FLAG_ROCEV2_CAP 0x10000
#define BNXT_FLAG_ROCE_CAP (BNXT_FLAG_ROCEV1_CAP | \ #define BNXT_FLAG_ROCE_CAP (BNXT_FLAG_ROCEV1_CAP | \
BNXT_FLAG_ROCEV2_CAP) BNXT_FLAG_ROCEV2_CAP)
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000 #define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
...@@ -1141,93 +1132,6 @@ struct bnxt { ...@@ -1141,93 +1132,6 @@ struct bnxt {
((offsetof(struct tx_port_stats, counter) + \ ((offsetof(struct tx_port_stats, counter) + \
sizeof(struct rx_port_stats) + 512) / 8) sizeof(struct rx_port_stats) + 512) / 8)
#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
{
atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
}
/* called from the NAPI poll routine to get ownership of a bnapi */
static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
{
int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
BNXT_STATE_NAPI);
return rc == BNXT_STATE_IDLE;
}
static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
{
atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
}
/* called from the busy poll routine to get ownership of a bnapi */
static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
{
int rc = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
BNXT_STATE_POLL);
return rc == BNXT_STATE_IDLE;
}
static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
{
atomic_set(&bnapi->poll_state, BNXT_STATE_IDLE);
}
static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
{
return atomic_read(&bnapi->poll_state) == BNXT_STATE_POLL;
}
static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
{
int old;
while (1) {
old = atomic_cmpxchg(&bnapi->poll_state, BNXT_STATE_IDLE,
BNXT_STATE_DISABLE);
if (old == BNXT_STATE_IDLE)
break;
usleep_range(500, 5000);
}
}
#else
static inline void bnxt_enable_poll(struct bnxt_napi *bnapi)
{
}
static inline bool bnxt_lock_napi(struct bnxt_napi *bnapi)
{
return true;
}
static inline void bnxt_unlock_napi(struct bnxt_napi *bnapi)
{
}
static inline bool bnxt_lock_poll(struct bnxt_napi *bnapi)
{
return false;
}
static inline void bnxt_unlock_poll(struct bnxt_napi *bnapi)
{
}
static inline bool bnxt_busy_polling(struct bnxt_napi *bnapi)
{
return false;
}
static inline void bnxt_disable_poll(struct bnxt_napi *bnapi)
{
}
#endif
#define I2C_DEV_ADDR_A0 0xa0 #define I2C_DEV_ADDR_A0 0xa0
#define I2C_DEV_ADDR_A2 0xa2 #define I2C_DEV_ADDR_A2 0xa2
#define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e #define SFP_EEPROM_SFF_8472_COMP_ADDR 0x5e
...@@ -1246,6 +1150,8 @@ int hwrm_send_message_silent(struct bnxt *, void *, u32, int); ...@@ -1246,6 +1150,8 @@ int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
int bmap_size); int bmap_size);
int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id); int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
int bnxt_hwrm_reserve_tx_rings(struct bnxt *bp, int *tx_rings);
int bnxt_hwrm_set_coal(struct bnxt *); int bnxt_hwrm_set_coal(struct bnxt *);
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp);
void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
......
...@@ -388,6 +388,7 @@ static int bnxt_set_channels(struct net_device *dev, ...@@ -388,6 +388,7 @@ static int bnxt_set_channels(struct net_device *dev,
{ {
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
int max_rx_rings, max_tx_rings, tcs; int max_rx_rings, max_tx_rings, tcs;
int req_tx_rings, rsv_tx_rings;
u32 rc = 0; u32 rc = 0;
bool sh = false; bool sh = false;
...@@ -423,6 +424,20 @@ static int bnxt_set_channels(struct net_device *dev, ...@@ -423,6 +424,20 @@ static int bnxt_set_channels(struct net_device *dev,
channel->tx_count > max_tx_rings)) channel->tx_count > max_tx_rings))
return -ENOMEM; return -ENOMEM;
req_tx_rings = sh ? channel->combined_count : channel->tx_count;
req_tx_rings = min_t(int, req_tx_rings, max_tx_rings);
if (tcs > 1)
req_tx_rings *= tcs;
rsv_tx_rings = req_tx_rings;
if (bnxt_hwrm_reserve_tx_rings(bp, &rsv_tx_rings))
return -ENOMEM;
if (rsv_tx_rings < req_tx_rings) {
netdev_warn(dev, "Unable to allocate the requested tx rings\n");
return -ENOMEM;
}
if (netif_running(dev)) { if (netif_running(dev)) {
if (BNXT_PF(bp)) { if (BNXT_PF(bp)) {
/* TODO CHIMP_FW: Send message to all VF's /* TODO CHIMP_FW: Send message to all VF's
...@@ -524,24 +539,49 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd) ...@@ -524,24 +539,49 @@ static int bnxt_grxclsrule(struct bnxt *bp, struct ethtool_rxnfc *cmd)
fltr_found: fltr_found:
fkeys = &fltr->fkeys; fkeys = &fltr->fkeys;
if (fkeys->basic.ip_proto == IPPROTO_TCP) if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
fs->flow_type = TCP_V4_FLOW; if (fkeys->basic.ip_proto == IPPROTO_TCP)
else if (fkeys->basic.ip_proto == IPPROTO_UDP) fs->flow_type = TCP_V4_FLOW;
fs->flow_type = UDP_V4_FLOW; else if (fkeys->basic.ip_proto == IPPROTO_UDP)
else fs->flow_type = UDP_V4_FLOW;
goto fltr_err; else
goto fltr_err;
fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src;
fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
fs->h_u.tcp_ip4_spec.ip4src = fkeys->addrs.v4addrs.src; fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst;
fs->m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0); fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
fs->h_u.tcp_ip4_spec.ip4dst = fkeys->addrs.v4addrs.dst; fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src;
fs->m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0); fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
fs->h_u.tcp_ip4_spec.psrc = fkeys->ports.src; fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst;
fs->m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0); fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
} else {
int i;
fs->h_u.tcp_ip4_spec.pdst = fkeys->ports.dst; if (fkeys->basic.ip_proto == IPPROTO_TCP)
fs->m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0); fs->flow_type = TCP_V6_FLOW;
else if (fkeys->basic.ip_proto == IPPROTO_UDP)
fs->flow_type = UDP_V6_FLOW;
else
goto fltr_err;
*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6src[0] =
fkeys->addrs.v6addrs.src;
*(struct in6_addr *)&fs->h_u.tcp_ip6_spec.ip6dst[0] =
fkeys->addrs.v6addrs.dst;
for (i = 0; i < 4; i++) {
fs->m_u.tcp_ip6_spec.ip6src[i] = cpu_to_be32(~0);
fs->m_u.tcp_ip6_spec.ip6dst[i] = cpu_to_be32(~0);
}
fs->h_u.tcp_ip6_spec.psrc = fkeys->ports.src;
fs->m_u.tcp_ip6_spec.psrc = cpu_to_be16(~0);
fs->h_u.tcp_ip6_spec.pdst = fkeys->ports.dst;
fs->m_u.tcp_ip6_spec.pdst = cpu_to_be16(~0);
}
fs->ring_cookie = fltr->rxq; fs->ring_cookie = fltr->rxq;
rc = 0; rc = 0;
...@@ -893,7 +933,7 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause) ...@@ -893,7 +933,7 @@ u32 _bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds, u8 fw_pause)
static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info, static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info *link_info,
struct ethtool_link_ksettings *lk_ksettings) struct ethtool_link_ksettings *lk_ksettings)
{ {
u16 fw_speeds = link_info->auto_link_speeds; u16 fw_speeds = link_info->advertising;
u8 fw_pause = 0; u8 fw_pause = 0;
if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
...@@ -1090,8 +1130,9 @@ static int bnxt_set_link_ksettings(struct net_device *dev, ...@@ -1090,8 +1130,9 @@ static int bnxt_set_link_ksettings(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
struct bnxt_link_info *link_info = &bp->link_info; struct bnxt_link_info *link_info = &bp->link_info;
const struct ethtool_link_settings *base = &lk_ksettings->base; const struct ethtool_link_settings *base = &lk_ksettings->base;
u32 speed, fw_advertising = 0;
bool set_pause = false; bool set_pause = false;
u16 fw_advertising = 0;
u32 speed;
int rc = 0; int rc = 0;
if (!BNXT_SINGLE_PF(bp)) if (!BNXT_SINGLE_PF(bp))
......
...@@ -2797,6 +2797,40 @@ struct hwrm_vnic_cfg_output { ...@@ -2797,6 +2797,40 @@ struct hwrm_vnic_cfg_output {
u8 valid; u8 valid;
}; };
/* hwrm_vnic_qcaps */
/* Input (24 bytes) */
struct hwrm_vnic_qcaps_input {
__le16 req_type;
__le16 cmpl_ring;
__le16 seq_id;
__le16 target_id;
__le64 resp_addr;
__le32 enables;
__le32 unused_0;
};
/* Output (24 bytes) */
struct hwrm_vnic_qcaps_output {
__le16 error_code;
__le16 req_type;
__le16 seq_id;
__le16 resp_len;
__le16 mru;
u8 unused_0;
u8 unused_1;
__le32 flags;
#define VNIC_QCAPS_RESP_FLAGS_VLAN_STRIP_CAP 0x2UL
#define VNIC_QCAPS_RESP_FLAGS_BD_STALL_CAP 0x4UL
#define VNIC_QCAPS_RESP_FLAGS_ROCE_DUAL_VNIC_CAP 0x8UL
#define VNIC_QCAPS_RESP_FLAGS_ROCE_ONLY_VNIC_CAP 0x10UL
#define VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP 0x20UL
__le32 unused_2;
u8 unused_3;
u8 unused_4;
u8 unused_5;
u8 valid;
};
/* hwrm_vnic_tpa_cfg */ /* hwrm_vnic_tpa_cfg */
/* Input (40 bytes) */ /* Input (40 bytes) */
struct hwrm_vnic_tpa_cfg_input { struct hwrm_vnic_tpa_cfg_input {
......
...@@ -416,6 +416,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) ...@@ -416,6 +416,7 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
u16 vf_ring_grps; u16 vf_ring_grps;
struct hwrm_func_cfg_input req = {0}; struct hwrm_func_cfg_input req = {0};
struct bnxt_pf_info *pf = &bp->pf; struct bnxt_pf_info *pf = &bp->pf;
int total_vf_tx_rings = 0;
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
...@@ -429,6 +430,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) ...@@ -429,6 +430,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs; vf_rx_rings = (pf->max_rx_rings - bp->rx_nr_rings) / num_vfs;
vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs; vf_ring_grps = (bp->pf.max_hw_ring_grps - bp->rx_nr_rings) / num_vfs;
vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs; vf_tx_rings = (pf->max_tx_rings - bp->tx_nr_rings) / num_vfs;
vf_vnics = (pf->max_vnics - bp->nr_vnics) / num_vfs;
vf_vnics = min_t(u16, vf_vnics, vf_rx_rings);
req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU | req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MTU |
FUNC_CFG_REQ_ENABLES_MRU | FUNC_CFG_REQ_ENABLES_MRU |
...@@ -451,7 +454,6 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) ...@@ -451,7 +454,6 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
req.num_rx_rings = cpu_to_le16(vf_rx_rings); req.num_rx_rings = cpu_to_le16(vf_rx_rings);
req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps);
req.num_l2_ctxs = cpu_to_le16(4); req.num_l2_ctxs = cpu_to_le16(4);
vf_vnics = 1;
req.num_vnics = cpu_to_le16(vf_vnics); req.num_vnics = cpu_to_le16(vf_vnics);
/* FIXME spec currently uses 1 bit for stats ctx */ /* FIXME spec currently uses 1 bit for stats ctx */
...@@ -459,6 +461,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) ...@@ -459,6 +461,8 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
mutex_lock(&bp->hwrm_cmd_lock); mutex_lock(&bp->hwrm_cmd_lock);
for (i = 0; i < num_vfs; i++) { for (i = 0; i < num_vfs; i++) {
int vf_tx_rsvd = vf_tx_rings;
req.fid = cpu_to_le16(pf->first_vf_id + i); req.fid = cpu_to_le16(pf->first_vf_id + i);
rc = _hwrm_send_message(bp, &req, sizeof(req), rc = _hwrm_send_message(bp, &req, sizeof(req),
HWRM_CMD_TIMEOUT); HWRM_CMD_TIMEOUT);
...@@ -466,10 +470,15 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) ...@@ -466,10 +470,15 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs)
break; break;
pf->active_vfs = i + 1; pf->active_vfs = i + 1;
pf->vf[i].fw_fid = le16_to_cpu(req.fid); pf->vf[i].fw_fid = le16_to_cpu(req.fid);
rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid,
&vf_tx_rsvd);
if (rc)
break;
total_vf_tx_rings += vf_tx_rsvd;
} }
mutex_unlock(&bp->hwrm_cmd_lock); mutex_unlock(&bp->hwrm_cmd_lock);
if (!rc) { if (!rc) {
pf->max_tx_rings -= vf_tx_rings * num_vfs; pf->max_tx_rings -= total_vf_tx_rings;
pf->max_rx_rings -= vf_rx_rings * num_vfs; pf->max_rx_rings -= vf_rx_rings * num_vfs;
pf->max_hw_ring_grps -= vf_ring_grps * num_vfs; pf->max_hw_ring_grps -= vf_ring_grps * num_vfs;
pf->max_cp_rings -= vf_cp_rings * num_vfs; pf->max_cp_rings -= vf_cp_rings * num_vfs;
...@@ -506,6 +515,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) ...@@ -506,6 +515,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
min_rx_rings) min_rx_rings)
rx_ok = 1; rx_ok = 1;
} }
if (bp->pf.max_vnics - bp->nr_vnics < min_rx_rings)
rx_ok = 0;
if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings) if (bp->pf.max_tx_rings - bp->tx_nr_rings >= min_tx_rings)
tx_ok = 1; tx_ok = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment