Commit fbc08857 authored by David S. Miller's avatar David S. Miller

Merge branch 'mvneta-rss-xps'

Gregory CLEMENT says:

====================
mvneta: Introduce RSS support and XPS configuration

this series is the first step add RSS support on mvneta.

It will allow associating an ethernet interface to a given CPU through
RSS by using "ethtool -X ethX weight". Indeed, currently I only enable
one entry in the RSS lookup table. Even if it is not really RSS, it
allows to get back the irq affinity feature we lost by using the
percpu interrupt.

The main change compared to the second version is the setup for the XPS
instead of using specific hack inside the driver in the forth
patch.

Th first patch make the default queue associate to each port and no
more a global variable.

The second patch really associates the RX queues with the CPUs instead
of masking the percpu interrupts for doing it. All the RX queues are
enabled and are statically associated with the CPUs by using a modulo
of the number of present CPUs. But at this stage only one RX queue
will receive the stream.

The third patch introduces a first level of RSS support through the
ethtool functions. As explained in the introduction there is only one
entry in the RSS lookup table which permits at the end to associate an
mvneta port to a CPU through the RX queues because the mapping is
static.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6e71b299 50bf8cb6
...@@ -110,9 +110,17 @@ ...@@ -110,9 +110,17 @@
#define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2))
#define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff
#define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00
#define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq)
#define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8)
#define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2))
/* Exception Interrupt Port/Queue Cause register */ /* Exception Interrupt Port/Queue Cause register
*
* Their behavior depend of the mapping done using the PCPX2Q
* registers. For a given CPU if the bit associated to a queue is not
* set, then for the register a read from this CPU will always return
* 0 and a write won't do anything
*/
#define MVNETA_INTR_NEW_CAUSE 0x25a0 #define MVNETA_INTR_NEW_CAUSE 0x25a0
#define MVNETA_INTR_NEW_MASK 0x25a4 #define MVNETA_INTR_NEW_MASK 0x25a4
...@@ -254,6 +262,11 @@ ...@@ -254,6 +262,11 @@
#define MVNETA_TX_MTU_MAX 0x3ffff #define MVNETA_TX_MTU_MAX 0x3ffff
/* The RSS lookup table actually has 256 entries but we do not use
* them yet
*/
#define MVNETA_RSS_LU_TABLE_SIZE 1
/* TSO header size */ /* TSO header size */
#define TSO_HEADER_SIZE 128 #define TSO_HEADER_SIZE 128
...@@ -356,6 +369,7 @@ struct mvneta_port { ...@@ -356,6 +369,7 @@ struct mvneta_port {
struct mvneta_tx_queue *txqs; struct mvneta_tx_queue *txqs;
struct net_device *dev; struct net_device *dev;
struct notifier_block cpu_notifier; struct notifier_block cpu_notifier;
int rxq_def;
/* Core clock */ /* Core clock */
struct clk *clk; struct clk *clk;
...@@ -374,6 +388,8 @@ struct mvneta_port { ...@@ -374,6 +388,8 @@ struct mvneta_port {
unsigned int use_inband_status:1; unsigned int use_inband_status:1;
u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)];
u32 indir[MVNETA_RSS_LU_TABLE_SIZE];
}; };
/* The mvneta_tx_desc and mvneta_rx_desc structures describe the /* The mvneta_tx_desc and mvneta_rx_desc structures describe the
...@@ -499,6 +515,9 @@ struct mvneta_tx_queue { ...@@ -499,6 +515,9 @@ struct mvneta_tx_queue {
/* DMA address of TSO headers */ /* DMA address of TSO headers */
dma_addr_t tso_hdrs_phys; dma_addr_t tso_hdrs_phys;
/* Affinity mask for CPUs*/
cpumask_t affinity_mask;
}; };
struct mvneta_rx_queue { struct mvneta_rx_queue {
...@@ -819,7 +838,13 @@ static void mvneta_port_up(struct mvneta_port *pp) ...@@ -819,7 +838,13 @@ static void mvneta_port_up(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_TXQ_CMD, q_map); mvreg_write(pp, MVNETA_TXQ_CMD, q_map);
/* Enable all initialized RXQs. */ /* Enable all initialized RXQs. */
mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def)); for (queue = 0; queue < rxq_number; queue++) {
struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
if (rxq->descs != NULL)
q_map |= (1 << queue);
}
mvreg_write(pp, MVNETA_RXQ_CMD, q_map);
} }
/* Stop the Ethernet port activity */ /* Stop the Ethernet port activity */
...@@ -1025,6 +1050,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp) ...@@ -1025,6 +1050,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
int cpu; int cpu;
int queue; int queue;
u32 val; u32 val;
int max_cpu = num_present_cpus();
/* Clear all Cause registers */ /* Clear all Cause registers */
mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
...@@ -1040,13 +1066,33 @@ static void mvneta_defaults_set(struct mvneta_port *pp) ...@@ -1040,13 +1066,33 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
/* Enable MBUS Retry bit16 */ /* Enable MBUS Retry bit16 */
mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20);
/* Set CPU queue access map - all CPUs have access to all RX /* Set CPU queue access map. CPUs are assigned to the RX and
* queues and to all TX queues * TX queues modulo their number. If there is only one TX
* queue then it is assigned to the CPU associated to the
* default RX queue.
*/ */
for_each_present_cpu(cpu) for_each_present_cpu(cpu) {
mvreg_write(pp, MVNETA_CPU_MAP(cpu), int rxq_map = 0, txq_map = 0;
(MVNETA_CPU_RXQ_ACCESS_ALL_MASK | int rxq, txq;
MVNETA_CPU_TXQ_ACCESS_ALL_MASK));
for (rxq = 0; rxq < rxq_number; rxq++)
if ((rxq % max_cpu) == cpu)
rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
for (txq = 0; txq < txq_number; txq++)
if ((txq % max_cpu) == cpu)
txq_map |= MVNETA_CPU_TXQ_ACCESS(txq);
/* With only one TX queue we configure a special case
* which will allow to get all the irq on a single
* CPU
*/
if (txq_number == 1)
txq_map = (cpu == pp->rxq_def) ?
MVNETA_CPU_TXQ_ACCESS(1) : 0;
mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
}
/* Reset RX and TX DMAs */ /* Reset RX and TX DMAs */
mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET);
...@@ -1067,7 +1113,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp) ...@@ -1067,7 +1113,7 @@ static void mvneta_defaults_set(struct mvneta_port *pp)
mvreg_write(pp, MVNETA_ACC_MODE, val); mvreg_write(pp, MVNETA_ACC_MODE, val);
/* Update val of portCfg register accordingly with all RxQueue types */ /* Update val of portCfg register accordingly with all RxQueue types */
val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def); val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
mvreg_write(pp, MVNETA_PORT_CONFIG, val); mvreg_write(pp, MVNETA_PORT_CONFIG, val);
val = 0; val = 0;
...@@ -2101,19 +2147,19 @@ static void mvneta_set_rx_mode(struct net_device *dev) ...@@ -2101,19 +2147,19 @@ static void mvneta_set_rx_mode(struct net_device *dev)
if (dev->flags & IFF_PROMISC) { if (dev->flags & IFF_PROMISC) {
/* Accept all: Multicast + Unicast */ /* Accept all: Multicast + Unicast */
mvneta_rx_unicast_promisc_set(pp, 1); mvneta_rx_unicast_promisc_set(pp, 1);
mvneta_set_ucast_table(pp, rxq_def); mvneta_set_ucast_table(pp, pp->rxq_def);
mvneta_set_special_mcast_table(pp, rxq_def); mvneta_set_special_mcast_table(pp, pp->rxq_def);
mvneta_set_other_mcast_table(pp, rxq_def); mvneta_set_other_mcast_table(pp, pp->rxq_def);
} else { } else {
/* Accept single Unicast */ /* Accept single Unicast */
mvneta_rx_unicast_promisc_set(pp, 0); mvneta_rx_unicast_promisc_set(pp, 0);
mvneta_set_ucast_table(pp, -1); mvneta_set_ucast_table(pp, -1);
mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def); mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def);
if (dev->flags & IFF_ALLMULTI) { if (dev->flags & IFF_ALLMULTI) {
/* Accept all multicast */ /* Accept all multicast */
mvneta_set_special_mcast_table(pp, rxq_def); mvneta_set_special_mcast_table(pp, pp->rxq_def);
mvneta_set_other_mcast_table(pp, rxq_def); mvneta_set_other_mcast_table(pp, pp->rxq_def);
} else { } else {
/* Accept only initialized multicast */ /* Accept only initialized multicast */
mvneta_set_special_mcast_table(pp, -1); mvneta_set_special_mcast_table(pp, -1);
...@@ -2122,7 +2168,7 @@ static void mvneta_set_rx_mode(struct net_device *dev) ...@@ -2122,7 +2168,7 @@ static void mvneta_set_rx_mode(struct net_device *dev)
if (!netdev_mc_empty(dev)) { if (!netdev_mc_empty(dev)) {
netdev_for_each_mc_addr(ha, dev) { netdev_for_each_mc_addr(ha, dev) {
mvneta_mcast_addr_set(pp, ha->addr, mvneta_mcast_addr_set(pp, ha->addr,
rxq_def); pp->rxq_def);
} }
} }
} }
...@@ -2173,6 +2219,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget) ...@@ -2173,6 +2219,7 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
{ {
int rx_done = 0; int rx_done = 0;
u32 cause_rx_tx; u32 cause_rx_tx;
int rx_queue;
struct mvneta_port *pp = netdev_priv(napi->dev); struct mvneta_port *pp = netdev_priv(napi->dev);
struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
...@@ -2204,8 +2251,15 @@ static int mvneta_poll(struct napi_struct *napi, int budget) ...@@ -2204,8 +2251,15 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
/* For the case where the last mvneta_poll did not process all /* For the case where the last mvneta_poll did not process all
* RX packets * RX packets
*/ */
rx_queue = fls(((cause_rx_tx >> 8) & 0xff));
cause_rx_tx |= port->cause_rx_tx; cause_rx_tx |= port->cause_rx_tx;
rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]);
if (rx_queue) {
rx_queue = rx_queue - 1;
rx_done = mvneta_rx(pp, budget, &pp->rxqs[rx_queue]);
}
budget -= rx_done; budget -= rx_done;
if (budget > 0) { if (budget > 0) {
...@@ -2322,6 +2376,8 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp, ...@@ -2322,6 +2376,8 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
static int mvneta_txq_init(struct mvneta_port *pp, static int mvneta_txq_init(struct mvneta_port *pp,
struct mvneta_tx_queue *txq) struct mvneta_tx_queue *txq)
{ {
int cpu;
txq->size = pp->tx_ring_size; txq->size = pp->tx_ring_size;
/* A queue must always have room for at least one skb. /* A queue must always have room for at least one skb.
...@@ -2374,6 +2430,14 @@ static int mvneta_txq_init(struct mvneta_port *pp, ...@@ -2374,6 +2430,14 @@ static int mvneta_txq_init(struct mvneta_port *pp,
} }
mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
/* Setup XPS mapping */
if (txq_number > 1)
cpu = txq->id % num_present_cpus();
else
cpu = pp->rxq_def % num_present_cpus();
cpumask_set_cpu(cpu, &txq->affinity_mask);
netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id);
return 0; return 0;
} }
...@@ -2418,19 +2482,27 @@ static void mvneta_cleanup_txqs(struct mvneta_port *pp) ...@@ -2418,19 +2482,27 @@ static void mvneta_cleanup_txqs(struct mvneta_port *pp)
/* Cleanup all Rx queues */ /* Cleanup all Rx queues */
static void mvneta_cleanup_rxqs(struct mvneta_port *pp) static void mvneta_cleanup_rxqs(struct mvneta_port *pp)
{ {
mvneta_rxq_deinit(pp, &pp->rxqs[rxq_def]); int queue;
for (queue = 0; queue < txq_number; queue++)
mvneta_rxq_deinit(pp, &pp->rxqs[queue]);
} }
/* Init all Rx queues */ /* Init all Rx queues */
static int mvneta_setup_rxqs(struct mvneta_port *pp) static int mvneta_setup_rxqs(struct mvneta_port *pp)
{ {
int err = mvneta_rxq_init(pp, &pp->rxqs[rxq_def]); int queue;
if (err) {
netdev_err(pp->dev, "%s: can't create rxq=%d\n", for (queue = 0; queue < rxq_number; queue++) {
__func__, rxq_def); int err = mvneta_rxq_init(pp, &pp->rxqs[queue]);
mvneta_cleanup_rxqs(pp);
return err; if (err) {
netdev_err(pp->dev, "%s: can't create rxq=%d\n",
__func__, queue);
mvneta_cleanup_rxqs(pp);
return err;
}
} }
return 0; return 0;
...@@ -2454,6 +2526,31 @@ static int mvneta_setup_txqs(struct mvneta_port *pp) ...@@ -2454,6 +2526,31 @@ static int mvneta_setup_txqs(struct mvneta_port *pp)
return 0; return 0;
} }
static void mvneta_percpu_unmask_interrupt(void *arg)
{
struct mvneta_port *pp = arg;
/* All the queue are unmasked, but actually only the ones
* maped to this CPU will be unmasked
*/
mvreg_write(pp, MVNETA_INTR_NEW_MASK,
MVNETA_RX_INTR_MASK_ALL |
MVNETA_TX_INTR_MASK_ALL |
MVNETA_MISCINTR_INTR_MASK);
}
static void mvneta_percpu_mask_interrupt(void *arg)
{
struct mvneta_port *pp = arg;
/* All the queue are masked, but actually only the ones
* maped to this CPU will be masked
*/
mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
}
static void mvneta_start_dev(struct mvneta_port *pp) static void mvneta_start_dev(struct mvneta_port *pp)
{ {
unsigned int cpu; unsigned int cpu;
...@@ -2471,11 +2568,10 @@ static void mvneta_start_dev(struct mvneta_port *pp) ...@@ -2471,11 +2568,10 @@ static void mvneta_start_dev(struct mvneta_port *pp)
napi_enable(&port->napi); napi_enable(&port->napi);
} }
/* Unmask interrupts */ /* Unmask interrupts. It has to be done from each CPU */
mvreg_write(pp, MVNETA_INTR_NEW_MASK, for_each_online_cpu(cpu)
MVNETA_RX_INTR_MASK(rxq_number) | smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
MVNETA_TX_INTR_MASK(txq_number) | pp, true);
MVNETA_MISCINTR_INTR_MASK);
mvreg_write(pp, MVNETA_INTR_MISC_MASK, mvreg_write(pp, MVNETA_INTR_MISC_MASK,
MVNETA_CAUSE_PHY_STATUS_CHANGE | MVNETA_CAUSE_PHY_STATUS_CHANGE |
MVNETA_CAUSE_LINK_CHANGE | MVNETA_CAUSE_LINK_CHANGE |
...@@ -2634,7 +2730,7 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr) ...@@ -2634,7 +2730,7 @@ static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
mvneta_mac_addr_set(pp, dev->dev_addr, -1); mvneta_mac_addr_set(pp, dev->dev_addr, -1);
/* Set new addr in hw */ /* Set new addr in hw */
mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def); mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def);
eth_commit_mac_addr_change(dev, addr); eth_commit_mac_addr_change(dev, addr);
return 0; return 0;
...@@ -2751,22 +2847,45 @@ static void mvneta_percpu_disable(void *arg) ...@@ -2751,22 +2847,45 @@ static void mvneta_percpu_disable(void *arg)
static void mvneta_percpu_elect(struct mvneta_port *pp) static void mvneta_percpu_elect(struct mvneta_port *pp)
{ {
int online_cpu_idx, cpu, i = 0; int online_cpu_idx, max_cpu, cpu, i = 0;
online_cpu_idx = rxq_def % num_online_cpus(); online_cpu_idx = pp->rxq_def % num_online_cpus();
max_cpu = num_present_cpus();
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
int rxq_map = 0, txq_map = 0;
int rxq;
for (rxq = 0; rxq < rxq_number; rxq++)
if ((rxq % max_cpu) == cpu)
rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
if (i == online_cpu_idx) if (i == online_cpu_idx)
/* Enable per-CPU interrupt on the one CPU we /* Map the default receive queue queue to the
* just elected * elected CPU
*/ */
smp_call_function_single(cpu, mvneta_percpu_enable, rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def);
pp, true);
/* We update the TX queue map only if we have one
* queue. In this case we associate the TX queue to
* the CPU bound to the default RX queue
*/
if (txq_number == 1)
txq_map = (i == online_cpu_idx) ?
MVNETA_CPU_TXQ_ACCESS(1) : 0;
else else
/* Disable per-CPU interrupt on all the other CPU */ txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
smp_call_function_single(cpu, mvneta_percpu_disable, MVNETA_CPU_TXQ_ACCESS_ALL_MASK;
pp, true);
mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map);
/* Update the interrupt mask on each CPU according the
* new mapping
*/
smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
pp, true);
i++; i++;
} }
}; };
...@@ -2801,12 +2920,22 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, ...@@ -2801,12 +2920,22 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
napi_enable(&port->napi); napi_enable(&port->napi);
/* Enable per-CPU interrupts on the CPU that is
* brought up.
*/
smp_call_function_single(cpu, mvneta_percpu_enable,
pp, true);
/* Enable per-CPU interrupt on the one CPU we care /* Enable per-CPU interrupt on the one CPU we care
* about. * about.
*/ */
mvneta_percpu_elect(pp); mvneta_percpu_elect(pp);
/* Unmask all ethernet port interrupts */ /* Unmask all ethernet port interrupts, as this
* notifier is called for each CPU then the CPU to
* Queue mapping is applied
*/
mvreg_write(pp, MVNETA_INTR_NEW_MASK, mvreg_write(pp, MVNETA_INTR_NEW_MASK,
MVNETA_RX_INTR_MASK(rxq_number) | MVNETA_RX_INTR_MASK(rxq_number) |
MVNETA_TX_INTR_MASK(txq_number) | MVNETA_TX_INTR_MASK(txq_number) |
...@@ -2857,7 +2986,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, ...@@ -2857,7 +2986,7 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
static int mvneta_open(struct net_device *dev) static int mvneta_open(struct net_device *dev)
{ {
struct mvneta_port *pp = netdev_priv(dev); struct mvneta_port *pp = netdev_priv(dev);
int ret; int ret, cpu;
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
...@@ -2887,8 +3016,13 @@ static int mvneta_open(struct net_device *dev) ...@@ -2887,8 +3016,13 @@ static int mvneta_open(struct net_device *dev)
*/ */
mvneta_percpu_disable(pp); mvneta_percpu_disable(pp);
/* Elect a CPU to handle our RX queue interrupt */ /* Enable per-CPU interrupt on all the CPU to handle our RX
mvneta_percpu_elect(pp); * queue interrupts
*/
for_each_online_cpu(cpu)
smp_call_function_single(cpu, mvneta_percpu_enable,
pp, true);
/* Register a CPU notifier to handle the case where our CPU /* Register a CPU notifier to handle the case where our CPU
* might be taken offline. * might be taken offline.
...@@ -3150,6 +3284,106 @@ static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset) ...@@ -3150,6 +3284,106 @@ static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev)
{
return MVNETA_RSS_LU_TABLE_SIZE;
}
static int mvneta_ethtool_get_rxnfc(struct net_device *dev,
struct ethtool_rxnfc *info,
u32 *rules __always_unused)
{
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
info->data = rxq_number;
return 0;
case ETHTOOL_GRXFH:
return -EOPNOTSUPP;
default:
return -EOPNOTSUPP;
}
}
static int mvneta_config_rss(struct mvneta_port *pp)
{
int cpu;
u32 val;
netif_tx_stop_all_queues(pp->dev);
for_each_online_cpu(cpu)
smp_call_function_single(cpu, mvneta_percpu_mask_interrupt,
pp, true);
/* We have to synchronise on the napi of each CPU */
for_each_online_cpu(cpu) {
struct mvneta_pcpu_port *pcpu_port =
per_cpu_ptr(pp->ports, cpu);
napi_synchronize(&pcpu_port->napi);
napi_disable(&pcpu_port->napi);
}
pp->rxq_def = pp->indir[0];
/* Update unicast mapping */
mvneta_set_rx_mode(pp->dev);
/* Update val of portCfg register accordingly with all RxQueue types */
val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def);
mvreg_write(pp, MVNETA_PORT_CONFIG, val);
/* Update the elected CPU matching the new rxq_def */
mvneta_percpu_elect(pp);
/* We have to synchronise on the napi of each CPU */
for_each_online_cpu(cpu) {
struct mvneta_pcpu_port *pcpu_port =
per_cpu_ptr(pp->ports, cpu);
napi_enable(&pcpu_port->napi);
}
netif_tx_start_all_queues(pp->dev);
return 0;
}
static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct mvneta_port *pp = netdev_priv(dev);
/* We require at least one supported parameter to be changed
* and no change in any of the unsupported parameters
*/
if (key ||
(hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
return -EOPNOTSUPP;
if (!indir)
return 0;
memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE);
return mvneta_config_rss(pp);
}
static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct mvneta_port *pp = netdev_priv(dev);
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (!indir)
return 0;
memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE);
return 0;
}
static const struct net_device_ops mvneta_netdev_ops = { static const struct net_device_ops mvneta_netdev_ops = {
.ndo_open = mvneta_open, .ndo_open = mvneta_open,
.ndo_stop = mvneta_stop, .ndo_stop = mvneta_stop,
...@@ -3174,6 +3408,10 @@ const struct ethtool_ops mvneta_eth_tool_ops = { ...@@ -3174,6 +3408,10 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
.get_strings = mvneta_ethtool_get_strings, .get_strings = mvneta_ethtool_get_strings,
.get_ethtool_stats = mvneta_ethtool_get_stats, .get_ethtool_stats = mvneta_ethtool_get_stats,
.get_sset_count = mvneta_ethtool_get_sset_count, .get_sset_count = mvneta_ethtool_get_sset_count,
.get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size,
.get_rxnfc = mvneta_ethtool_get_rxnfc,
.get_rxfh = mvneta_ethtool_get_rxfh,
.set_rxfh = mvneta_ethtool_set_rxfh,
}; };
/* Initialize hw */ /* Initialize hw */
...@@ -3363,6 +3601,10 @@ static int mvneta_probe(struct platform_device *pdev) ...@@ -3363,6 +3601,10 @@ static int mvneta_probe(struct platform_device *pdev)
strcmp(managed, "in-band-status") == 0); strcmp(managed, "in-band-status") == 0);
pp->cpu_notifier.notifier_call = mvneta_percpu_notifier; pp->cpu_notifier.notifier_call = mvneta_percpu_notifier;
pp->rxq_def = rxq_def;
pp->indir[0] = rxq_def;
pp->clk = devm_clk_get(&pdev->dev, NULL); pp->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(pp->clk)) { if (IS_ERR(pp->clk)) {
err = PTR_ERR(pp->clk); err = PTR_ERR(pp->clk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment