Commit ea708584 authored by David S. Miller's avatar David S. Miller

Merge branch 'mvpp2-fixes'

Marcin Wojtas says:

====================
Fixes for the network driver of Marvell Armada 375 SoC

This is a set of three patches that fix long-lasting problems implemented in
the initial support for the Armada 375 network controller.

Due to an inappropriate concept of handling the per-CPU sent packets'
processing on TX path the driver numerous problems occured, such as RCU
stalls. Those have been fixed, of which details you can find in the commit
logs. The patches were intensively tested on top of v4.2-rc5.

I'm looking forward to any comments or remarks.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents fe1e1876 edc660fa
...@@ -27,6 +27,8 @@ ...@@ -27,6 +27,8 @@
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/phy.h> #include <linux/phy.h>
#include <linux/clk.h> #include <linux/clk.h>
#include <linux/hrtimer.h>
#include <linux/ktime.h>
#include <uapi/linux/ppp_defs.h> #include <uapi/linux/ppp_defs.h>
#include <net/ip.h> #include <net/ip.h>
#include <net/ipv6.h> #include <net/ipv6.h>
...@@ -299,6 +301,7 @@ ...@@ -299,6 +301,7 @@
/* Coalescing */ /* Coalescing */
#define MVPP2_TXDONE_COAL_PKTS_THRESH 15 #define MVPP2_TXDONE_COAL_PKTS_THRESH 15
#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
#define MVPP2_RX_COAL_PKTS 32 #define MVPP2_RX_COAL_PKTS 32
#define MVPP2_RX_COAL_USEC 100 #define MVPP2_RX_COAL_USEC 100
...@@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats { ...@@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats {
u64 tx_bytes; u64 tx_bytes;
}; };
/* Per-CPU port control */
struct mvpp2_port_pcpu {
struct hrtimer tx_done_timer;
bool timer_scheduled;
/* Tasklet for egress finalization */
struct tasklet_struct tx_done_tasklet;
};
struct mvpp2_port { struct mvpp2_port {
u8 id; u8 id;
...@@ -679,6 +690,9 @@ struct mvpp2_port { ...@@ -679,6 +690,9 @@ struct mvpp2_port {
u32 pending_cause_rx; u32 pending_cause_rx;
struct napi_struct napi; struct napi_struct napi;
/* Per-CPU port control */
struct mvpp2_port_pcpu __percpu *pcpu;
/* Flags */ /* Flags */
unsigned long flags; unsigned long flags;
...@@ -776,6 +790,9 @@ struct mvpp2_txq_pcpu { ...@@ -776,6 +790,9 @@ struct mvpp2_txq_pcpu {
/* Array of transmitted skb */ /* Array of transmitted skb */
struct sk_buff **tx_skb; struct sk_buff **tx_skb;
/* Array of transmitted buffers' physical addresses */
dma_addr_t *tx_buffs;
/* Index of last TX DMA descriptor that was inserted */ /* Index of last TX DMA descriptor that was inserted */
int txq_put_index; int txq_put_index;
...@@ -913,8 +930,6 @@ struct mvpp2_bm_pool { ...@@ -913,8 +930,6 @@ struct mvpp2_bm_pool {
/* Occupied buffers indicator */ /* Occupied buffers indicator */
atomic_t in_use; atomic_t in_use;
int in_use_thresh; int in_use_thresh;
spinlock_t lock;
}; };
struct mvpp2_buff_hdr { struct mvpp2_buff_hdr {
...@@ -963,9 +978,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu) ...@@ -963,9 +978,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
} }
static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
struct sk_buff *skb) struct sk_buff *skb,
struct mvpp2_tx_desc *tx_desc)
{ {
txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb; txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
if (skb)
txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
tx_desc->buf_phys_addr;
txq_pcpu->txq_put_index++; txq_pcpu->txq_put_index++;
if (txq_pcpu->txq_put_index == txq_pcpu->size) if (txq_pcpu->txq_put_index == txq_pcpu->size)
txq_pcpu->txq_put_index = 0; txq_pcpu->txq_put_index = 0;
...@@ -3376,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev, ...@@ -3376,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
bm_pool->pkt_size = 0; bm_pool->pkt_size = 0;
bm_pool->buf_num = 0; bm_pool->buf_num = 0;
atomic_set(&bm_pool->in_use, 0); atomic_set(&bm_pool->in_use, 0);
spin_lock_init(&bm_pool->lock);
return 0; return 0;
} }
...@@ -3647,7 +3665,6 @@ static struct mvpp2_bm_pool * ...@@ -3647,7 +3665,6 @@ static struct mvpp2_bm_pool *
mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
int pkt_size) int pkt_size)
{ {
unsigned long flags = 0;
struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
int num; int num;
...@@ -3656,8 +3673,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, ...@@ -3656,8 +3673,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
return NULL; return NULL;
} }
spin_lock_irqsave(&new_pool->lock, flags);
if (new_pool->type == MVPP2_BM_FREE) if (new_pool->type == MVPP2_BM_FREE)
new_pool->type = type; new_pool->type = type;
...@@ -3686,8 +3701,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, ...@@ -3686,8 +3701,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
if (num != pkts_num) { if (num != pkts_num) {
WARN(1, "pool %d: %d of %d allocated\n", WARN(1, "pool %d: %d of %d allocated\n",
new_pool->id, num, pkts_num); new_pool->id, num, pkts_num);
/* We need to undo the bufs_add() allocations */
spin_unlock_irqrestore(&new_pool->lock, flags);
return NULL; return NULL;
} }
} }
...@@ -3695,15 +3708,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, ...@@ -3695,15 +3708,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
mvpp2_bm_pool_bufsize_set(port->priv, new_pool, mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
spin_unlock_irqrestore(&new_pool->lock, flags);
return new_pool; return new_pool;
} }
/* Initialize pools for swf */ /* Initialize pools for swf */
static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
{ {
unsigned long flags = 0;
int rxq; int rxq;
if (!port->pool_long) { if (!port->pool_long) {
...@@ -3714,9 +3724,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) ...@@ -3714,9 +3724,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
if (!port->pool_long) if (!port->pool_long)
return -ENOMEM; return -ENOMEM;
spin_lock_irqsave(&port->pool_long->lock, flags);
port->pool_long->port_map |= (1 << port->id); port->pool_long->port_map |= (1 << port->id);
spin_unlock_irqrestore(&port->pool_long->lock, flags);
for (rxq = 0; rxq < rxq_number; rxq++) for (rxq = 0; rxq < rxq_number; rxq++)
mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
...@@ -3730,9 +3738,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) ...@@ -3730,9 +3738,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
if (!port->pool_short) if (!port->pool_short)
return -ENOMEM; return -ENOMEM;
spin_lock_irqsave(&port->pool_short->lock, flags);
port->pool_short->port_map |= (1 << port->id); port->pool_short->port_map |= (1 << port->id);
spin_unlock_irqrestore(&port->pool_short->lock, flags);
for (rxq = 0; rxq < rxq_number; rxq++) for (rxq = 0; rxq < rxq_number; rxq++)
mvpp2_rxq_short_pool_set(port, rxq, mvpp2_rxq_short_pool_set(port, rxq,
...@@ -3806,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg) ...@@ -3806,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg)
mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
(MVPP2_CAUSE_MISC_SUM_MASK | (MVPP2_CAUSE_MISC_SUM_MASK |
MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
} }
...@@ -4382,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port, ...@@ -4382,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
rxq->time_coal = usec; rxq->time_coal = usec;
} }
/* Set threshold for TX_DONE pkts coalescing */
static void mvpp2_tx_done_pkts_coal_set(void *arg)
{
struct mvpp2_port *port = arg;
int queue;
u32 val;
for (queue = 0; queue < txq_number; queue++) {
struct mvpp2_tx_queue *txq = port->txqs[queue];
val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
MVPP2_TRANSMITTED_THRESH_MASK;
mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
}
}
/* Free Tx queue skbuffs */ /* Free Tx queue skbuffs */
static void mvpp2_txq_bufs_free(struct mvpp2_port *port, static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
struct mvpp2_tx_queue *txq, struct mvpp2_tx_queue *txq,
...@@ -4407,8 +4395,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, ...@@ -4407,8 +4395,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
int i; int i;
for (i = 0; i < num; i++) { for (i = 0; i < num; i++) {
struct mvpp2_tx_desc *tx_desc = txq->descs + dma_addr_t buf_phys_addr =
txq_pcpu->txq_get_index; txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index]; struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
mvpp2_txq_inc_get(txq_pcpu); mvpp2_txq_inc_get(txq_pcpu);
...@@ -4416,8 +4404,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port, ...@@ -4416,8 +4404,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
if (!skb) if (!skb)
continue; continue;
dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr, dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
tx_desc->data_size, DMA_TO_DEVICE); skb_headlen(skb), DMA_TO_DEVICE);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
} }
...@@ -4433,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, ...@@ -4433,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
u32 cause) u32 cause)
{ {
int queue = fls(cause >> 16) - 1; int queue = fls(cause) - 1;
return port->txqs[queue]; return port->txqs[queue];
} }
...@@ -4460,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq, ...@@ -4460,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
netif_tx_wake_queue(nq); netif_tx_wake_queue(nq);
} }
static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
{
struct mvpp2_tx_queue *txq;
struct mvpp2_txq_pcpu *txq_pcpu;
unsigned int tx_todo = 0;
while (cause) {
txq = mvpp2_get_tx_queue(port, cause);
if (!txq)
break;
txq_pcpu = this_cpu_ptr(txq->pcpu);
if (txq_pcpu->count) {
mvpp2_txq_done(port, txq, txq_pcpu);
tx_todo += txq_pcpu->count;
}
cause &= ~(1 << txq->log_id);
}
return tx_todo;
}
/* Rx/Tx queue initialization/cleanup methods */ /* Rx/Tx queue initialization/cleanup methods */
/* Allocate and initialize descriptors for aggr TXQ */ /* Allocate and initialize descriptors for aggr TXQ */
...@@ -4649,12 +4660,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port, ...@@ -4649,12 +4660,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
txq_pcpu->tx_skb = kmalloc(txq_pcpu->size * txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
sizeof(*txq_pcpu->tx_skb), sizeof(*txq_pcpu->tx_skb),
GFP_KERNEL); GFP_KERNEL);
if (!txq_pcpu->tx_skb) { if (!txq_pcpu->tx_skb)
dma_free_coherent(port->dev->dev.parent, goto error;
txq->size * MVPP2_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys); txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
return -ENOMEM; sizeof(dma_addr_t), GFP_KERNEL);
} if (!txq_pcpu->tx_buffs)
goto error;
txq_pcpu->count = 0; txq_pcpu->count = 0;
txq_pcpu->reserved_num = 0; txq_pcpu->reserved_num = 0;
...@@ -4663,6 +4675,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port, ...@@ -4663,6 +4675,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
} }
return 0; return 0;
error:
for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
kfree(txq_pcpu->tx_skb);
kfree(txq_pcpu->tx_buffs);
}
dma_free_coherent(port->dev->dev.parent,
txq->size * MVPP2_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
return -ENOMEM;
} }
/* Free allocated TXQ resources */ /* Free allocated TXQ resources */
...@@ -4675,6 +4700,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port, ...@@ -4675,6 +4700,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
for_each_present_cpu(cpu) { for_each_present_cpu(cpu) {
txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
kfree(txq_pcpu->tx_skb); kfree(txq_pcpu->tx_skb);
kfree(txq_pcpu->tx_buffs);
} }
if (txq->descs) if (txq->descs)
...@@ -4805,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port) ...@@ -4805,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
goto err_cleanup; goto err_cleanup;
} }
on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
return 0; return 0;
...@@ -4887,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev) ...@@ -4887,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev)
} }
} }
static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
{
ktime_t interval;
if (!port_pcpu->timer_scheduled) {
port_pcpu->timer_scheduled = true;
interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
hrtimer_start(&port_pcpu->tx_done_timer, interval,
HRTIMER_MODE_REL_PINNED);
}
}
static void mvpp2_tx_proc_cb(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct mvpp2_port *port = netdev_priv(dev);
struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
unsigned int tx_todo, cause;
if (!netif_running(dev))
return;
port_pcpu->timer_scheduled = false;
/* Process all the Tx queues */
cause = (1 << txq_number) - 1;
tx_todo = mvpp2_tx_done(port, cause);
/* Set the timer in case not all the packets were processed */
if (tx_todo)
mvpp2_timer_set(port_pcpu);
}
static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
{
struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
struct mvpp2_port_pcpu,
tx_done_timer);
tasklet_schedule(&port_pcpu->tx_done_tasklet);
return HRTIMER_NORESTART;
}
/* Main RX/TX processing routines */ /* Main RX/TX processing routines */
/* Display more error info */ /* Display more error info */
...@@ -5144,11 +5212,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb, ...@@ -5144,11 +5212,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
if (i == (skb_shinfo(skb)->nr_frags - 1)) { if (i == (skb_shinfo(skb)->nr_frags - 1)) {
/* Last descriptor */ /* Last descriptor */
tx_desc->command = MVPP2_TXD_L_DESC; tx_desc->command = MVPP2_TXD_L_DESC;
mvpp2_txq_inc_put(txq_pcpu, skb); mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
} else { } else {
/* Descriptor in the middle: Not First, Not Last */ /* Descriptor in the middle: Not First, Not Last */
tx_desc->command = 0; tx_desc->command = 0;
mvpp2_txq_inc_put(txq_pcpu, NULL); mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
} }
} }
...@@ -5214,12 +5282,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) ...@@ -5214,12 +5282,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
/* First and Last descriptor */ /* First and Last descriptor */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
tx_desc->command = tx_cmd; tx_desc->command = tx_cmd;
mvpp2_txq_inc_put(txq_pcpu, skb); mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
} else { } else {
/* First but not Last */ /* First but not Last */
tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
tx_desc->command = tx_cmd; tx_desc->command = tx_cmd;
mvpp2_txq_inc_put(txq_pcpu, NULL); mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
/* Continue with other skb fragments */ /* Continue with other skb fragments */
if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
...@@ -5255,6 +5323,17 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev) ...@@ -5255,6 +5323,17 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
/* Finalize TX processing */
if (txq_pcpu->count >= txq->done_pkts_coal)
mvpp2_txq_done(port, txq, txq_pcpu);
/* Set the timer in case not all frags were processed */
if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
mvpp2_timer_set(port_pcpu);
}
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -5268,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause) ...@@ -5268,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause)
netdev_err(dev, "tx fifo underrun error\n"); netdev_err(dev, "tx fifo underrun error\n");
} }
static void mvpp2_txq_done_percpu(void *arg) static int mvpp2_poll(struct napi_struct *napi, int budget)
{ {
struct mvpp2_port *port = arg; u32 cause_rx_tx, cause_rx, cause_misc;
u32 cause_rx_tx, cause_tx, cause_misc; int rx_done = 0;
struct mvpp2_port *port = netdev_priv(napi->dev);
/* Rx/Tx cause register /* Rx/Tx cause register
* *
...@@ -5285,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg) ...@@ -5285,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg)
*/ */
cause_rx_tx = mvpp2_read(port->priv, cause_rx_tx = mvpp2_read(port->priv,
MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
if (cause_misc) { if (cause_misc) {
...@@ -5297,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg) ...@@ -5297,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg)
cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
} }
/* Release TX descriptors */
if (cause_tx) {
struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
if (txq_pcpu->count)
mvpp2_txq_done(port, txq, txq_pcpu);
}
}
static int mvpp2_poll(struct napi_struct *napi, int budget)
{
u32 cause_rx_tx, cause_rx;
int rx_done = 0;
struct mvpp2_port *port = netdev_priv(napi->dev);
on_each_cpu(mvpp2_txq_done_percpu, port, 1);
cause_rx_tx = mvpp2_read(port->priv,
MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
/* Process RX packets */ /* Process RX packets */
...@@ -5561,6 +5621,8 @@ static int mvpp2_open(struct net_device *dev) ...@@ -5561,6 +5621,8 @@ static int mvpp2_open(struct net_device *dev)
static int mvpp2_stop(struct net_device *dev) static int mvpp2_stop(struct net_device *dev)
{ {
struct mvpp2_port *port = netdev_priv(dev); struct mvpp2_port *port = netdev_priv(dev);
struct mvpp2_port_pcpu *port_pcpu;
int cpu;
mvpp2_stop_dev(port); mvpp2_stop_dev(port);
mvpp2_phy_disconnect(port); mvpp2_phy_disconnect(port);
...@@ -5569,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev) ...@@ -5569,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev)
on_each_cpu(mvpp2_interrupts_mask, port, 1); on_each_cpu(mvpp2_interrupts_mask, port, 1);
free_irq(port->irq, port); free_irq(port->irq, port);
for_each_present_cpu(cpu) {
port_pcpu = per_cpu_ptr(port->pcpu, cpu);
hrtimer_cancel(&port_pcpu->tx_done_timer);
port_pcpu->timer_scheduled = false;
tasklet_kill(&port_pcpu->tx_done_tasklet);
}
mvpp2_cleanup_rxqs(port); mvpp2_cleanup_rxqs(port);
mvpp2_cleanup_txqs(port); mvpp2_cleanup_txqs(port);
...@@ -5784,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, ...@@ -5784,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
txq->done_pkts_coal = c->tx_max_coalesced_frames; txq->done_pkts_coal = c->tx_max_coalesced_frames;
} }
on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
return 0; return 0;
} }
...@@ -6035,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, ...@@ -6035,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
{ {
struct device_node *phy_node; struct device_node *phy_node;
struct mvpp2_port *port; struct mvpp2_port *port;
struct mvpp2_port_pcpu *port_pcpu;
struct net_device *dev; struct net_device *dev;
struct resource *res; struct resource *res;
const char *dt_mac_addr; const char *dt_mac_addr;
...@@ -6044,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, ...@@ -6044,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
int features; int features;
int phy_mode; int phy_mode;
int priv_common_regs_num = 2; int priv_common_regs_num = 2;
int err, i; int err, i, cpu;
dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number, dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
rxq_number); rxq_number);
...@@ -6135,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev, ...@@ -6135,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev,
} }
mvpp2_port_power_up(port); mvpp2_port_power_up(port);
port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
if (!port->pcpu) {
err = -ENOMEM;
goto err_free_txq_pcpu;
}
for_each_present_cpu(cpu) {
port_pcpu = per_cpu_ptr(port->pcpu, cpu);
hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
HRTIMER_MODE_REL_PINNED);
port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
port_pcpu->timer_scheduled = false;
tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
(unsigned long)dev);
}
netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT); netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
features = NETIF_F_SG | NETIF_F_IP_CSUM; features = NETIF_F_SG | NETIF_F_IP_CSUM;
dev->features = features | NETIF_F_RXCSUM; dev->features = features | NETIF_F_RXCSUM;
...@@ -6144,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev, ...@@ -6144,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
err = register_netdev(dev); err = register_netdev(dev);
if (err < 0) { if (err < 0) {
dev_err(&pdev->dev, "failed to register netdev\n"); dev_err(&pdev->dev, "failed to register netdev\n");
goto err_free_txq_pcpu; goto err_free_port_pcpu;
} }
netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
...@@ -6153,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev, ...@@ -6153,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
priv->port_list[id] = port; priv->port_list[id] = port;
return 0; return 0;
err_free_port_pcpu:
free_percpu(port->pcpu);
err_free_txq_pcpu: err_free_txq_pcpu:
for (i = 0; i < txq_number; i++) for (i = 0; i < txq_number; i++)
free_percpu(port->txqs[i]->pcpu); free_percpu(port->txqs[i]->pcpu);
...@@ -6171,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port) ...@@ -6171,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
int i; int i;
unregister_netdev(port->dev); unregister_netdev(port->dev);
free_percpu(port->pcpu);
free_percpu(port->stats); free_percpu(port->stats);
for (i = 0; i < txq_number; i++) for (i = 0; i < txq_number; i++)
free_percpu(port->txqs[i]->pcpu); free_percpu(port->txqs[i]->pcpu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment