Commit 48b874cc authored by David S. Miller's avatar David S. Miller

Merge branch 'bnxt_en-combined-rx-tx-channels'

Michael Chan says:

====================
bnxt_en: Support combined and rx/tx channels.

The bnxt hardware uses a completion ring for rx and tx events.  The driver
has to process the completion ring entries sequentially for the events.
The current code only supports an rx/tx ring pair for each completion ring.
This patch series add support for using a dedicated completion ring for
rx only or tx only as an option configuarble using ethtool -L.

The benefits for using dedicated completion rings are:

1. A burst of rx packets can cause delay in processing tx events if the
completion ring is shared.  If tx queue is stopped by BQL, this can cause
delay in re-starting the tx queue.

2. A completion ring is sized according to the rx and tx ring size rounded
up to the nearest power of 2.  When the completion ring is shared, it is
sized by adding the rx and tx ring sizes and then rounded to the next power
of 2, often with a lot of wasted space.

3. Using dedicated completion ring, we can adjust the tx and rx coalescing
parameters independently for rx and tx.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c07f30ad 068c9ec6
This diff is collapsed.
...@@ -528,6 +528,7 @@ struct tx_push_bd { ...@@ -528,6 +528,7 @@ struct tx_push_bd {
}; };
struct bnxt_tx_ring_info { struct bnxt_tx_ring_info {
struct bnxt_napi *bnapi;
u16 tx_prod; u16 tx_prod;
u16 tx_cons; u16 tx_cons;
void __iomem *tx_doorbell; void __iomem *tx_doorbell;
...@@ -558,6 +559,7 @@ struct bnxt_tpa_info { ...@@ -558,6 +559,7 @@ struct bnxt_tpa_info {
}; };
struct bnxt_rx_ring_info { struct bnxt_rx_ring_info {
struct bnxt_napi *bnapi;
u16 rx_prod; u16 rx_prod;
u16 rx_agg_prod; u16 rx_agg_prod;
u16 rx_sw_agg_prod; u16 rx_sw_agg_prod;
...@@ -604,8 +606,8 @@ struct bnxt_napi { ...@@ -604,8 +606,8 @@ struct bnxt_napi {
int index; int index;
struct bnxt_cp_ring_info cp_ring; struct bnxt_cp_ring_info cp_ring;
struct bnxt_rx_ring_info rx_ring; struct bnxt_rx_ring_info *rx_ring;
struct bnxt_tx_ring_info tx_ring; struct bnxt_tx_ring_info *tx_ring;
#ifdef CONFIG_NET_RX_BUSY_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
atomic_t poll_state; atomic_t poll_state;
...@@ -875,6 +877,8 @@ struct bnxt { ...@@ -875,6 +877,8 @@ struct bnxt {
#define BNXT_FLAG_USING_MSIX 0x40 #define BNXT_FLAG_USING_MSIX 0x40
#define BNXT_FLAG_MSIX_CAP 0x80 #define BNXT_FLAG_MSIX_CAP 0x80
#define BNXT_FLAG_RFS 0x100 #define BNXT_FLAG_RFS 0x100
#define BNXT_FLAG_SHARED_RINGS 0x200
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \ #define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
BNXT_FLAG_RFS | \ BNXT_FLAG_RFS | \
BNXT_FLAG_STRIP_VLAN) BNXT_FLAG_STRIP_VLAN)
...@@ -884,6 +888,9 @@ struct bnxt { ...@@ -884,6 +888,9 @@ struct bnxt {
struct bnxt_napi **bnapi; struct bnxt_napi **bnapi;
struct bnxt_rx_ring_info *rx_ring;
struct bnxt_tx_ring_info *tx_ring;
u32 rx_buf_size; u32 rx_buf_size;
u32 rx_buf_use_size; /* useable size */ u32 rx_buf_use_size; /* useable size */
u32 rx_ring_size; u32 rx_ring_size;
...@@ -913,6 +920,8 @@ struct bnxt { ...@@ -913,6 +920,8 @@ struct bnxt {
int cp_nr_rings; int cp_nr_rings;
int num_stat_ctxs; int num_stat_ctxs;
/* grp_info indexed by completion ring index */
struct bnxt_ring_grp_info *grp_info; struct bnxt_ring_grp_info *grp_info;
struct bnxt_vnic_info *vnic_info; struct bnxt_vnic_info *vnic_info;
int nr_vnics; int nr_vnics;
...@@ -1089,5 +1098,5 @@ int bnxt_hwrm_set_pause(struct bnxt *); ...@@ -1089,5 +1098,5 @@ int bnxt_hwrm_set_pause(struct bnxt *);
int bnxt_hwrm_set_link_setting(struct bnxt *, bool); int bnxt_hwrm_set_link_setting(struct bnxt *, bool);
int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_open_nic(struct bnxt *, bool, bool);
int bnxt_close_nic(struct bnxt *, bool, bool); int bnxt_close_nic(struct bnxt *, bool, bool);
void bnxt_get_max_rings(struct bnxt *, int *, int *); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool);
#endif #endif
...@@ -211,7 +211,10 @@ static void bnxt_get_channels(struct net_device *dev, ...@@ -211,7 +211,10 @@ static void bnxt_get_channels(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
int max_rx_rings, max_tx_rings, tcs; int max_rx_rings, max_tx_rings, tcs;
bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, true);
channel->max_combined = max_rx_rings;
bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, false);
tcs = netdev_get_num_tc(dev); tcs = netdev_get_num_tc(dev);
if (tcs > 1) if (tcs > 1)
max_tx_rings /= tcs; max_tx_rings /= tcs;
...@@ -219,9 +222,12 @@ static void bnxt_get_channels(struct net_device *dev, ...@@ -219,9 +222,12 @@ static void bnxt_get_channels(struct net_device *dev,
channel->max_rx = max_rx_rings; channel->max_rx = max_rx_rings;
channel->max_tx = max_tx_rings; channel->max_tx = max_tx_rings;
channel->max_other = 0; channel->max_other = 0;
channel->max_combined = 0; if (bp->flags & BNXT_FLAG_SHARED_RINGS) {
channel->combined_count = bp->rx_nr_rings;
} else {
channel->rx_count = bp->rx_nr_rings; channel->rx_count = bp->rx_nr_rings;
channel->tx_count = bp->tx_nr_rings_per_tc; channel->tx_count = bp->tx_nr_rings_per_tc;
}
} }
static int bnxt_set_channels(struct net_device *dev, static int bnxt_set_channels(struct net_device *dev,
...@@ -230,19 +236,35 @@ static int bnxt_set_channels(struct net_device *dev, ...@@ -230,19 +236,35 @@ static int bnxt_set_channels(struct net_device *dev,
struct bnxt *bp = netdev_priv(dev); struct bnxt *bp = netdev_priv(dev);
int max_rx_rings, max_tx_rings, tcs; int max_rx_rings, max_tx_rings, tcs;
u32 rc = 0; u32 rc = 0;
bool sh = false;
if (channel->other_count)
return -EINVAL;
if (channel->other_count || channel->combined_count || if (!channel->combined_count &&
!channel->rx_count || !channel->tx_count) (!channel->rx_count || !channel->tx_count))
return -EINVAL; return -EINVAL;
bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings); if (channel->combined_count &&
(channel->rx_count || channel->tx_count))
return -EINVAL;
if (channel->combined_count)
sh = true;
bnxt_get_max_rings(bp, &max_rx_rings, &max_tx_rings, sh);
tcs = netdev_get_num_tc(dev); tcs = netdev_get_num_tc(dev);
if (tcs > 1) if (tcs > 1)
max_tx_rings /= tcs; max_tx_rings /= tcs;
if (channel->rx_count > max_rx_rings || if (sh && (channel->combined_count > max_rx_rings ||
channel->tx_count > max_tx_rings) channel->combined_count > max_tx_rings))
return -EINVAL; return -ENOMEM;
if (!sh && (channel->rx_count > max_rx_rings ||
channel->tx_count > max_tx_rings))
return -ENOMEM;
if (netif_running(dev)) { if (netif_running(dev)) {
if (BNXT_PF(bp)) { if (BNXT_PF(bp)) {
...@@ -258,12 +280,23 @@ static int bnxt_set_channels(struct net_device *dev, ...@@ -258,12 +280,23 @@ static int bnxt_set_channels(struct net_device *dev,
} }
} }
if (sh) {
bp->flags |= BNXT_FLAG_SHARED_RINGS;
bp->rx_nr_rings = channel->combined_count;
bp->tx_nr_rings_per_tc = channel->combined_count;
} else {
bp->flags &= ~BNXT_FLAG_SHARED_RINGS;
bp->rx_nr_rings = channel->rx_count; bp->rx_nr_rings = channel->rx_count;
bp->tx_nr_rings_per_tc = channel->tx_count; bp->tx_nr_rings_per_tc = channel->tx_count;
}
bp->tx_nr_rings = bp->tx_nr_rings_per_tc; bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
if (tcs > 1) if (tcs > 1)
bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs; bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tcs;
bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings);
bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
bp->tx_nr_rings + bp->rx_nr_rings;
bp->num_stat_ctxs = bp->cp_nr_rings; bp->num_stat_ctxs = bp->cp_nr_rings;
/* After changing number of rx channels, update NTUPLE feature. */ /* After changing number of rx channels, update NTUPLE feature. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment