Commit 558e35cc authored by Nicolas Ferre's avatar Nicolas Ferre Committed by David S. Miller

net: macb: WoL support for GEM type of Ethernet controller

Adapt the Wake-on-Lan feature to the Cadence GEM Ethernet controller.
This controller has different register layout and cannot be handled by
previous code.
We disable completely interrupts on all the queues but the queue 0.
Handling of WoL interrupt is done in another interrupt handler
positioned depending on the controller version used, just between
suspend() and resume() calls.
It allows to lower pressure on the generic interrupt hot path by
removing the need to handle 2 tests for each IRQ: the first figuring out
the controller revision, the second for actually knowing if the WoL bit
is set.

Queue management in suspend()/resume() functions inspired from RFC patch
by Harini Katakam <harinik@xilinx.com>, thanks!

Cc: Claudiu Beznea <claudiu.beznea@microchip.com>
Cc: Harini Katakam <harini.katakam@xilinx.com>
Signed-off-by: default avatarNicolas Ferre <nicolas.ferre@microchip.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent a8b7b2d0
...@@ -90,6 +90,7 @@ ...@@ -90,6 +90,7 @@
#define GEM_SA3T 0x009C /* Specific3 Top */ #define GEM_SA3T 0x009C /* Specific3 Top */
#define GEM_SA4B 0x00A0 /* Specific4 Bottom */ #define GEM_SA4B 0x00A0 /* Specific4 Bottom */
#define GEM_SA4T 0x00A4 /* Specific4 Top */ #define GEM_SA4T 0x00A4 /* Specific4 Top */
#define GEM_WOL 0x00b8 /* Wake on LAN */
#define GEM_EFTSH 0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */ #define GEM_EFTSH 0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */
#define GEM_EFRSH 0x00ec /* PTP Event Frame Received Seconds Register 47:32 */ #define GEM_EFRSH 0x00ec /* PTP Event Frame Received Seconds Register 47:32 */
#define GEM_PEFTSH 0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */ #define GEM_PEFTSH 0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */
...@@ -396,6 +397,8 @@ ...@@ -396,6 +397,8 @@
#define MACB_PDRSFT_SIZE 1 #define MACB_PDRSFT_SIZE 1
#define MACB_SRI_OFFSET 26 /* TSU Seconds Register Increment */ #define MACB_SRI_OFFSET 26 /* TSU Seconds Register Increment */
#define MACB_SRI_SIZE 1 #define MACB_SRI_SIZE 1
#define GEM_WOL_OFFSET 28 /* Enable wake-on-lan interrupt */
#define GEM_WOL_SIZE 1
/* Timer increment fields */ /* Timer increment fields */
#define MACB_TI_CNS_OFFSET 0 #define MACB_TI_CNS_OFFSET 0
......
...@@ -1517,6 +1517,35 @@ static void macb_tx_restart(struct macb_queue *queue) ...@@ -1517,6 +1517,35 @@ static void macb_tx_restart(struct macb_queue *queue)
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
} }
static irqreturn_t gem_wol_interrupt(int irq, void *dev_id)
{
struct macb_queue *queue = dev_id;
struct macb *bp = queue->bp;
u32 status;
status = queue_readl(queue, ISR);
if (unlikely(!status))
return IRQ_NONE;
spin_lock(&bp->lock);
if (status & GEM_BIT(WOL)) {
queue_writel(queue, IDR, GEM_BIT(WOL));
gem_writel(bp, WOL, 0);
netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
(unsigned int)(queue - bp->queues),
(unsigned long)status);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, GEM_BIT(WOL));
pm_wakeup_event(&bp->pdev->dev, 0);
}
spin_unlock(&bp->lock);
return IRQ_HANDLED;
}
static irqreturn_t macb_interrupt(int irq, void *dev_id) static irqreturn_t macb_interrupt(int irq, void *dev_id)
{ {
struct macb_queue *queue = dev_id; struct macb_queue *queue = dev_id;
...@@ -3316,6 +3345,8 @@ static const struct ethtool_ops macb_ethtool_ops = { ...@@ -3316,6 +3345,8 @@ static const struct ethtool_ops macb_ethtool_ops = {
static const struct ethtool_ops gem_ethtool_ops = { static const struct ethtool_ops gem_ethtool_ops = {
.get_regs_len = macb_get_regs_len, .get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs, .get_regs = macb_get_regs,
.get_wol = macb_get_wol,
.set_wol = macb_set_wol,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
.get_ts_info = macb_get_ts_info, .get_ts_info = macb_get_ts_info,
.get_ethtool_stats = gem_get_ethtool_stats, .get_ethtool_stats = gem_get_ethtool_stats,
...@@ -4567,33 +4598,68 @@ static int __maybe_unused macb_suspend(struct device *dev) ...@@ -4567,33 +4598,68 @@ static int __maybe_unused macb_suspend(struct device *dev)
struct macb_queue *queue = bp->queues; struct macb_queue *queue = bp->queues;
unsigned long flags; unsigned long flags;
unsigned int q; unsigned int q;
int err;
if (!netif_running(netdev)) if (!netif_running(netdev))
return 0; return 0;
if (bp->wol & MACB_WOL_ENABLED) { if (bp->wol & MACB_WOL_ENABLED) {
macb_writel(bp, IER, MACB_BIT(WOL)); spin_lock_irqsave(&bp->lock, flags);
macb_writel(bp, WOL, MACB_BIT(MAG)); /* Flush all status bits */
enable_irq_wake(bp->queues[0].irq); macb_writel(bp, TSR, -1);
netif_device_detach(netdev); macb_writel(bp, RSR, -1);
} else {
netif_device_detach(netdev);
for (q = 0, queue = bp->queues; q < bp->num_queues; for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue) ++q, ++queue) {
napi_disable(&queue->napi); /* Disable all interrupts */
queue_writel(queue, IDR, -1);
queue_readl(queue, ISR);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, -1);
}
/* Change interrupt handler and
* Enable WoL IRQ on queue 0
*/
if (macb_is_gem(bp)) {
devm_free_irq(dev, bp->queues[0].irq, bp->queues);
err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt,
IRQF_SHARED, netdev->name, bp->queues);
if (err) {
dev_err(dev,
"Unable to request IRQ %d (error %d)\n",
bp->queues[0].irq, err);
spin_unlock_irqrestore(&bp->lock, flags);
return err;
}
queue_writel(bp->queues, IER, GEM_BIT(WOL));
gem_writel(bp, WOL, MACB_BIT(MAG));
} else {
queue_writel(bp->queues, IER, MACB_BIT(WOL));
macb_writel(bp, WOL, MACB_BIT(MAG));
}
spin_unlock_irqrestore(&bp->lock, flags);
enable_irq_wake(bp->queues[0].irq);
}
netif_device_detach(netdev);
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue)
napi_disable(&queue->napi);
if (!(bp->wol & MACB_WOL_ENABLED)) {
rtnl_lock(); rtnl_lock();
phylink_stop(bp->phylink); phylink_stop(bp->phylink);
rtnl_unlock(); rtnl_unlock();
spin_lock_irqsave(&bp->lock, flags); spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp); macb_reset_hw(bp);
spin_unlock_irqrestore(&bp->lock, flags); spin_unlock_irqrestore(&bp->lock, flags);
}
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
if (netdev->hw_features & NETIF_F_NTUPLE) if (netdev->hw_features & NETIF_F_NTUPLE)
bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
}
if (bp->ptp_info) if (bp->ptp_info)
bp->ptp_info->ptp_remove(netdev); bp->ptp_info->ptp_remove(netdev);
...@@ -4608,7 +4674,9 @@ static int __maybe_unused macb_resume(struct device *dev) ...@@ -4608,7 +4674,9 @@ static int __maybe_unused macb_resume(struct device *dev)
struct net_device *netdev = dev_get_drvdata(dev); struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev); struct macb *bp = netdev_priv(netdev);
struct macb_queue *queue = bp->queues; struct macb_queue *queue = bp->queues;
unsigned long flags;
unsigned int q; unsigned int q;
int err;
if (!netif_running(netdev)) if (!netif_running(netdev))
return 0; return 0;
...@@ -4617,29 +4685,60 @@ static int __maybe_unused macb_resume(struct device *dev) ...@@ -4617,29 +4685,60 @@ static int __maybe_unused macb_resume(struct device *dev)
pm_runtime_force_resume(dev); pm_runtime_force_resume(dev);
if (bp->wol & MACB_WOL_ENABLED) { if (bp->wol & MACB_WOL_ENABLED) {
macb_writel(bp, IDR, MACB_BIT(WOL)); spin_lock_irqsave(&bp->lock, flags);
macb_writel(bp, WOL, 0); /* Disable WoL */
disable_irq_wake(bp->queues[0].irq); if (macb_is_gem(bp)) {
} else { queue_writel(bp->queues, IDR, GEM_BIT(WOL));
macb_writel(bp, NCR, MACB_BIT(MPE)); gem_writel(bp, WOL, 0);
} else {
if (netdev->hw_features & NETIF_F_NTUPLE) queue_writel(bp->queues, IDR, MACB_BIT(WOL));
gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); macb_writel(bp, WOL, 0);
}
/* Clear ISR on queue 0 */
queue_readl(bp->queues, ISR);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(bp->queues, ISR, -1);
/* Replace interrupt handler on queue 0 */
devm_free_irq(dev, bp->queues[0].irq, bp->queues);
err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt,
IRQF_SHARED, netdev->name, bp->queues);
if (err) {
dev_err(dev,
"Unable to request IRQ %d (error %d)\n",
bp->queues[0].irq, err);
spin_unlock_irqrestore(&bp->lock, flags);
return err;
}
spin_unlock_irqrestore(&bp->lock, flags);
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) disable_irq_wake(bp->queues[0].irq);
macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
for (q = 0, queue = bp->queues; q < bp->num_queues; /* Now make sure we disable phy before moving
++q, ++queue) * to common restore path
napi_enable(&queue->napi); */
rtnl_lock(); rtnl_lock();
phylink_start(bp->phylink); phylink_stop(bp->phylink);
rtnl_unlock(); rtnl_unlock();
} }
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue)
napi_enable(&queue->napi);
if (netdev->hw_features & NETIF_F_NTUPLE)
gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
macb_writel(bp, NCR, MACB_BIT(MPE));
macb_init_hw(bp); macb_init_hw(bp);
macb_set_rx_mode(netdev); macb_set_rx_mode(netdev);
macb_restore_features(bp); macb_restore_features(bp);
rtnl_lock();
phylink_start(bp->phylink);
rtnl_unlock();
netif_device_attach(netdev); netif_device_attach(netdev);
if (bp->ptp_info) if (bp->ptp_info)
bp->ptp_info->ptp_init(netdev); bp->ptp_info->ptp_init(netdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment