Commit fd41f2bf authored by Florian Fainelli's avatar Florian Fainelli Committed by David S. Miller

net: systemport: Remove adaptive TX coalescing

Adaptive TX coalescing is not currently giving us any advantages and
ends up making the CPU spin more frequently until TX completion. Deny
and disable adaptive TX coalescing for now and rely on static
configuration, we can always add it back later.
Reviewed-by: default avatarTal Gilboa <talgi@mellanox.com>
Signed-off-by: default avatarFlorian Fainelli <f.fainelli@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9daae9bd
...@@ -15,7 +15,6 @@ ...@@ -15,7 +15,6 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/net_dim.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/of.h> #include <linux/of.h>
...@@ -588,7 +587,8 @@ static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv) ...@@ -588,7 +587,8 @@ static void bcm_sysport_set_rx_coalesce(struct bcm_sysport_priv *priv)
rdma_writel(priv, reg, RDMA_MBDONE_INTR); rdma_writel(priv, reg, RDMA_MBDONE_INTR);
} }
static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring) static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring,
struct ethtool_coalesce *ec)
{ {
struct bcm_sysport_priv *priv = ring->priv; struct bcm_sysport_priv *priv = ring->priv;
u32 reg; u32 reg;
...@@ -596,8 +596,8 @@ static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring) ...@@ -596,8 +596,8 @@ static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring)
reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index)); reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(ring->index));
reg &= ~(RING_INTR_THRESH_MASK | reg &= ~(RING_INTR_THRESH_MASK |
RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT); RING_TIMEOUT_MASK << RING_TIMEOUT_SHIFT);
reg |= ring->dim.coal_pkts; reg |= ec->tx_max_coalesced_frames;
reg |= DIV_ROUND_UP(ring->dim.coal_usecs * 1000, 8192) << reg |= DIV_ROUND_UP(ec->tx_coalesce_usecs * 1000, 8192) <<
RING_TIMEOUT_SHIFT; RING_TIMEOUT_SHIFT;
tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index)); tdma_writel(priv, reg, TDMA_DESC_RING_INTR_CONTROL(ring->index));
} }
...@@ -606,18 +606,12 @@ static int bcm_sysport_get_coalesce(struct net_device *dev, ...@@ -606,18 +606,12 @@ static int bcm_sysport_get_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec) struct ethtool_coalesce *ec)
{ {
struct bcm_sysport_priv *priv = netdev_priv(dev); struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *ring;
unsigned int i;
u32 reg; u32 reg;
reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0)); reg = tdma_readl(priv, TDMA_DESC_RING_INTR_CONTROL(0));
ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000; ec->tx_coalesce_usecs = (reg >> RING_TIMEOUT_SHIFT) * 8192 / 1000;
ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK; ec->tx_max_coalesced_frames = reg & RING_INTR_THRESH_MASK;
for (i = 0; i < dev->num_tx_queues; i++) {
ring = &priv->tx_rings[i];
ec->use_adaptive_tx_coalesce |= ring->dim.use_dim;
}
reg = rdma_readl(priv, RDMA_MBDONE_INTR); reg = rdma_readl(priv, RDMA_MBDONE_INTR);
...@@ -632,7 +626,6 @@ static int bcm_sysport_set_coalesce(struct net_device *dev, ...@@ -632,7 +626,6 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
struct ethtool_coalesce *ec) struct ethtool_coalesce *ec)
{ {
struct bcm_sysport_priv *priv = netdev_priv(dev); struct bcm_sysport_priv *priv = netdev_priv(dev);
struct bcm_sysport_tx_ring *ring;
unsigned int i; unsigned int i;
/* Base system clock is 125Mhz, DMA timeout is this reference clock /* Base system clock is 125Mhz, DMA timeout is this reference clock
...@@ -646,20 +639,12 @@ static int bcm_sysport_set_coalesce(struct net_device *dev, ...@@ -646,20 +639,12 @@ static int bcm_sysport_set_coalesce(struct net_device *dev,
return -EINVAL; return -EINVAL;
if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) || if ((ec->tx_coalesce_usecs == 0 && ec->tx_max_coalesced_frames == 0) ||
(ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)) (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0) ||
ec->use_adaptive_tx_coalesce)
return -EINVAL; return -EINVAL;
for (i = 0; i < dev->num_tx_queues; i++) { for (i = 0; i < dev->num_tx_queues; i++)
ring = &priv->tx_rings[i]; bcm_sysport_set_tx_coalesce(&priv->tx_rings[i], ec);
ring->dim.coal_pkts = ec->tx_max_coalesced_frames;
ring->dim.coal_usecs = ec->tx_coalesce_usecs;
if (!ec->use_adaptive_tx_coalesce && ring->dim.use_dim) {
ring->dim.coal_pkts = 1;
ring->dim.coal_usecs = 0;
}
ring->dim.use_dim = ec->use_adaptive_tx_coalesce;
bcm_sysport_set_tx_coalesce(ring);
}
priv->dim.coal_usecs = ec->rx_coalesce_usecs; priv->dim.coal_usecs = ec->rx_coalesce_usecs;
priv->dim.coal_pkts = ec->rx_max_coalesced_frames; priv->dim.coal_pkts = ec->rx_max_coalesced_frames;
...@@ -940,8 +925,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv, ...@@ -940,8 +925,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
ring->packets += pkts_compl; ring->packets += pkts_compl;
ring->bytes += bytes_compl; ring->bytes += bytes_compl;
u64_stats_update_end(&priv->syncp); u64_stats_update_end(&priv->syncp);
ring->dim.packets = pkts_compl;
ring->dim.bytes = bytes_compl;
ring->c_index = c_index; ring->c_index = c_index;
...@@ -987,7 +970,6 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) ...@@ -987,7 +970,6 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
{ {
struct bcm_sysport_tx_ring *ring = struct bcm_sysport_tx_ring *ring =
container_of(napi, struct bcm_sysport_tx_ring, napi); container_of(napi, struct bcm_sysport_tx_ring, napi);
struct net_dim_sample dim_sample;
unsigned int work_done = 0; unsigned int work_done = 0;
work_done = bcm_sysport_tx_reclaim(ring->priv, ring); work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
...@@ -1004,12 +986,6 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget) ...@@ -1004,12 +986,6 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
return 0; return 0;
} }
if (ring->dim.use_dim) {
net_dim_sample(ring->dim.event_ctr, ring->dim.packets,
ring->dim.bytes, &dim_sample);
net_dim(&ring->dim.dim, dim_sample);
}
return budget; return budget;
} }
...@@ -1089,23 +1065,6 @@ static void bcm_sysport_dim_work(struct work_struct *work) ...@@ -1089,23 +1065,6 @@ static void bcm_sysport_dim_work(struct work_struct *work)
dim->state = NET_DIM_START_MEASURE; dim->state = NET_DIM_START_MEASURE;
} }
static void bcm_sysport_dim_tx_work(struct work_struct *work)
{
struct net_dim *dim = container_of(work, struct net_dim, work);
struct bcm_sysport_net_dim *ndim =
container_of(dim, struct bcm_sysport_net_dim, dim);
struct bcm_sysport_tx_ring *ring =
container_of(ndim, struct bcm_sysport_tx_ring, dim);
struct net_dim_cq_moder cur_profile =
net_dim_get_profile(dim->mode, dim->profile_ix);
ring->dim.coal_usecs = cur_profile.usec;
ring->dim.coal_pkts = cur_profile.pkts;
bcm_sysport_set_tx_coalesce(ring);
dim->state = NET_DIM_START_MEASURE;
}
/* RX and misc interrupt routine */ /* RX and misc interrupt routine */
static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
{ {
...@@ -1152,7 +1111,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id) ...@@ -1152,7 +1111,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
continue; continue;
txr = &priv->tx_rings[ring]; txr = &priv->tx_rings[ring];
txr->dim.event_ctr++;
if (likely(napi_schedule_prep(&txr->napi))) { if (likely(napi_schedule_prep(&txr->napi))) {
intrl2_0_mask_set(priv, ring_bit); intrl2_0_mask_set(priv, ring_bit);
...@@ -1185,7 +1143,6 @@ static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id) ...@@ -1185,7 +1143,6 @@ static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
continue; continue;
txr = &priv->tx_rings[ring]; txr = &priv->tx_rings[ring];
txr->dim.event_ctr++;
if (likely(napi_schedule_prep(&txr->napi))) { if (likely(napi_schedule_prep(&txr->napi))) {
intrl2_1_mask_set(priv, BIT(ring)); intrl2_1_mask_set(priv, BIT(ring));
...@@ -1551,7 +1508,6 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv, ...@@ -1551,7 +1508,6 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
reg |= (1 << index); reg |= (1 << index);
tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN); tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
bcm_sysport_init_dim(&ring->dim, bcm_sysport_dim_tx_work);
napi_enable(&ring->napi); napi_enable(&ring->napi);
netif_dbg(priv, hw, priv->netdev, netif_dbg(priv, hw, priv->netdev,
...@@ -1582,7 +1538,6 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv, ...@@ -1582,7 +1538,6 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
return; return;
napi_disable(&ring->napi); napi_disable(&ring->napi);
cancel_work_sync(&ring->dim.dim.work);
netif_napi_del(&ring->napi); netif_napi_del(&ring->napi);
bcm_sysport_tx_clean(priv, ring); bcm_sysport_tx_clean(priv, ring);
......
...@@ -723,7 +723,6 @@ struct bcm_sysport_tx_ring { ...@@ -723,7 +723,6 @@ struct bcm_sysport_tx_ring {
struct bcm_sysport_priv *priv; /* private context backpointer */ struct bcm_sysport_priv *priv; /* private context backpointer */
unsigned long packets; /* packets statistics */ unsigned long packets; /* packets statistics */
unsigned long bytes; /* bytes statistics */ unsigned long bytes; /* bytes statistics */
struct bcm_sysport_net_dim dim; /* Net DIM context */
unsigned int switch_queue; /* switch port queue number */ unsigned int switch_queue; /* switch port queue number */
unsigned int switch_port; /* switch port queue number */ unsigned int switch_port; /* switch port queue number */
bool inspect; /* inspect switch port and queue */ bool inspect; /* inspect switch port and queue */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment