Commit 275f37ea authored by David S. Miller's avatar David S. Miller

Merge branch 'mvneta-next'

Maxime Chevallier says:

====================
net: mvneta: mqprio cleanups and shaping support

This is the second version of the series that adds some improvements to the
existing mqprio implementation in mvneta, and adds support for
egress shaping offload.

The first 3 patches are some minor cleanups, such as using the
tc_mqprio_qopt_offload structure to get access to more offloading
options, cleaning the logic to detect whether or not we should offload
mqprio setting, and allowing to have a 1 to N mapping between TCs and
queues.

The last patch adds traffic shaping offload, using mvneta's per-queue
token buckets, allowing to limit rates from 10Kbps up to 5Gbps with
10Kbps increments.

This was tested only on an Armada 3720, with traffic up to 2.5Gbps.

Changes since V1 fixes the build for 32bits kernels, using the right
div helpers as suggested by Jakub.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2f7ed29f 2551dc9e
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <net/ipv6.h> #include <net/ipv6.h>
#include <net/tso.h> #include <net/tso.h>
#include <net/page_pool.h> #include <net/page_pool.h>
#include <net/pkt_cls.h>
#include <linux/bpf_trace.h> #include <linux/bpf_trace.h>
/* Registers */ /* Registers */
...@@ -247,12 +248,39 @@ ...@@ -247,12 +248,39 @@
#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000 #define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
#define MVNETA_PORT_TX_RESET 0x3cf0 #define MVNETA_PORT_TX_RESET 0x3cf0
#define MVNETA_PORT_TX_DMA_RESET BIT(0) #define MVNETA_PORT_TX_DMA_RESET BIT(0)
#define MVNETA_TXQ_CMD1_REG 0x3e00
#define MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 BIT(3)
#define MVNETA_TXQ_CMD1_BW_LIM_EN BIT(0)
#define MVNETA_REFILL_NUM_CLK_REG 0x3e08
#define MVNETA_REFILL_MAX_NUM_CLK 0x0000ffff
#define MVNETA_TX_MTU 0x3e0c #define MVNETA_TX_MTU 0x3e0c
#define MVNETA_TX_TOKEN_SIZE 0x3e14 #define MVNETA_TX_TOKEN_SIZE 0x3e14
#define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff #define MVNETA_TX_TOKEN_SIZE_MAX 0xffffffff
#define MVNETA_TXQ_BUCKET_REFILL_REG(q) (0x3e20 + ((q) << 2))
#define MVNETA_TXQ_BUCKET_REFILL_PERIOD_MASK 0x3ff00000
#define MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT 20
#define MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX 0x0007ffff
#define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2)) #define MVNETA_TXQ_TOKEN_SIZE_REG(q) (0x3e40 + ((q) << 2))
#define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff #define MVNETA_TXQ_TOKEN_SIZE_MAX 0x7fffffff
/* The values of the bucket refill base period and refill period are taken from
* the reference manual, and adds up to a base resolution of 10Kbps. This allows
* to cover all rate-limit values from 10Kbps up to 5Gbps
*/
/* Base period for the rate limit algorithm */
#define MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS 100
/* Number of Base Period to wait between each bucket refill */
#define MVNETA_TXQ_BUCKET_REFILL_PERIOD 1000
/* The base resolution for rate limiting, in bps. Any max_rate value should be
* a multiple of that value.
*/
#define MVNETA_TXQ_RATE_LIMIT_RESOLUTION (NSEC_PER_SEC / \
(MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS * \
MVNETA_TXQ_BUCKET_REFILL_PERIOD))
#define MVNETA_LPI_CTRL_0 0x2cc0 #define MVNETA_LPI_CTRL_0 0x2cc0
#define MVNETA_LPI_CTRL_1 0x2cc4 #define MVNETA_LPI_CTRL_1 0x2cc4
#define MVNETA_LPI_REQUEST_ENABLE BIT(0) #define MVNETA_LPI_REQUEST_ENABLE BIT(0)
...@@ -492,7 +520,6 @@ struct mvneta_port { ...@@ -492,7 +520,6 @@ struct mvneta_port {
u8 mcast_count[256]; u8 mcast_count[256];
u16 tx_ring_size; u16 tx_ring_size;
u16 rx_ring_size; u16 rx_ring_size;
u8 prio_tc_map[8];
phy_interface_t phy_interface; phy_interface_t phy_interface;
struct device_node *dn; struct device_node *dn;
...@@ -4896,43 +4923,144 @@ static void mvneta_clear_rx_prio_map(struct mvneta_port *pp) ...@@ -4896,43 +4923,144 @@ static void mvneta_clear_rx_prio_map(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0); mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, 0);
} }
static void mvneta_setup_rx_prio_map(struct mvneta_port *pp) static void mvneta_map_vlan_prio_to_rxq(struct mvneta_port *pp, u8 pri, u8 rxq)
{ {
u32 val = 0; u32 val = mvreg_read(pp, MVNETA_VLAN_PRIO_TO_RXQ);
int i;
for (i = 0; i < rxq_number; i++) val &= ~MVNETA_VLAN_PRIO_RXQ_MAP(pri, 0x7);
val |= MVNETA_VLAN_PRIO_RXQ_MAP(i, pp->prio_tc_map[i]); val |= MVNETA_VLAN_PRIO_RXQ_MAP(pri, rxq);
mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val); mvreg_write(pp, MVNETA_VLAN_PRIO_TO_RXQ, val);
} }
static int mvneta_enable_per_queue_rate_limit(struct mvneta_port *pp)
{
unsigned long core_clk_rate;
u32 refill_cycles;
u32 val;
core_clk_rate = clk_get_rate(pp->clk);
if (!core_clk_rate)
return -EINVAL;
refill_cycles = MVNETA_TXQ_BUCKET_REFILL_BASE_PERIOD_NS /
(NSEC_PER_SEC / core_clk_rate);
if (refill_cycles > MVNETA_REFILL_MAX_NUM_CLK)
return -EINVAL;
/* Enable bw limit algorithm version 3 */
val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
val &= ~(MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN);
mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
/* Set the base refill rate */
mvreg_write(pp, MVNETA_REFILL_NUM_CLK_REG, refill_cycles);
return 0;
}
static void mvneta_disable_per_queue_rate_limit(struct mvneta_port *pp)
{
u32 val = mvreg_read(pp, MVNETA_TXQ_CMD1_REG);
val |= (MVNETA_TXQ_CMD1_BW_LIM_SEL_V1 | MVNETA_TXQ_CMD1_BW_LIM_EN);
mvreg_write(pp, MVNETA_TXQ_CMD1_REG, val);
}
static int mvneta_setup_queue_rates(struct mvneta_port *pp, int queue,
u64 min_rate, u64 max_rate)
{
u32 refill_val, rem;
u32 val = 0;
/* Convert to from Bps to bps */
max_rate *= 8;
if (min_rate)
return -EINVAL;
refill_val = div_u64_rem(max_rate, MVNETA_TXQ_RATE_LIMIT_RESOLUTION,
&rem);
if (rem || !refill_val ||
refill_val > MVNETA_TXQ_BUCKET_REFILL_VALUE_MAX)
return -EINVAL;
val = refill_val;
val |= (MVNETA_TXQ_BUCKET_REFILL_PERIOD <<
MVNETA_TXQ_BUCKET_REFILL_PERIOD_SHIFT);
mvreg_write(pp, MVNETA_TXQ_BUCKET_REFILL_REG(queue), val);
return 0;
}
static int mvneta_setup_mqprio(struct net_device *dev, static int mvneta_setup_mqprio(struct net_device *dev,
struct tc_mqprio_qopt *qopt) struct tc_mqprio_qopt_offload *mqprio)
{ {
struct mvneta_port *pp = netdev_priv(dev); struct mvneta_port *pp = netdev_priv(dev);
int rxq, txq, tc, ret;
u8 num_tc; u8 num_tc;
int i;
qopt->hw = TC_MQPRIO_HW_OFFLOAD_TCS; if (mqprio->qopt.hw != TC_MQPRIO_HW_OFFLOAD_TCS)
num_tc = qopt->num_tc; return 0;
num_tc = mqprio->qopt.num_tc;
if (num_tc > rxq_number) if (num_tc > rxq_number)
return -EINVAL; return -EINVAL;
if (!num_tc) {
mvneta_clear_rx_prio_map(pp); mvneta_clear_rx_prio_map(pp);
if (!num_tc) {
mvneta_disable_per_queue_rate_limit(pp);
netdev_reset_tc(dev); netdev_reset_tc(dev);
return 0; return 0;
} }
memcpy(pp->prio_tc_map, qopt->prio_tc_map, sizeof(pp->prio_tc_map)); netdev_set_num_tc(dev, mqprio->qopt.num_tc);
for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
netdev_set_tc_queue(dev, tc, mqprio->qopt.count[tc],
mqprio->qopt.offset[tc]);
for (rxq = mqprio->qopt.offset[tc];
rxq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
rxq++) {
if (rxq >= rxq_number)
return -EINVAL;
mvneta_map_vlan_prio_to_rxq(pp, tc, rxq);
}
}
if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE) {
mvneta_disable_per_queue_rate_limit(pp);
return 0;
}
if (mqprio->qopt.num_tc > txq_number)
return -EINVAL;
ret = mvneta_enable_per_queue_rate_limit(pp);
if (ret)
return ret;
mvneta_setup_rx_prio_map(pp); for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
for (txq = mqprio->qopt.offset[tc];
txq < mqprio->qopt.count[tc] + mqprio->qopt.offset[tc];
txq++) {
if (txq >= txq_number)
return -EINVAL;
netdev_set_num_tc(dev, qopt->num_tc); ret = mvneta_setup_queue_rates(pp, txq,
for (i = 0; i < qopt->num_tc; i++) mqprio->min_rate[tc],
netdev_set_tc_queue(dev, i, qopt->count[i], qopt->offset[i]); mqprio->max_rate[tc]);
if (ret)
return ret;
}
}
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment