Commit c637dbce authored by David S. Miller's avatar David S. Miller

Merge branch 'tg3-net'

Prashant Sreedharan says:

====================
tg3: synchronize_irq() should be called without taking locks

v2: Added Reported-by, Tested-by fields and reference to the thread that
    reported the problem

This series addresses the problem reported by Peter Hurley in mail thread
https://lkml.org/lkml/2015/1/12/1082
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b0d11b42 932f19de
...@@ -7413,6 +7413,8 @@ static inline void tg3_netif_start(struct tg3 *tp) ...@@ -7413,6 +7413,8 @@ static inline void tg3_netif_start(struct tg3 *tp)
} }
static void tg3_irq_quiesce(struct tg3 *tp) static void tg3_irq_quiesce(struct tg3 *tp)
__releases(tp->lock)
__acquires(tp->lock)
{ {
int i; int i;
...@@ -7421,8 +7423,12 @@ static void tg3_irq_quiesce(struct tg3 *tp) ...@@ -7421,8 +7423,12 @@ static void tg3_irq_quiesce(struct tg3 *tp)
tp->irq_sync = 1; tp->irq_sync = 1;
smp_mb(); smp_mb();
spin_unlock_bh(&tp->lock);
for (i = 0; i < tp->irq_cnt; i++) for (i = 0; i < tp->irq_cnt; i++)
synchronize_irq(tp->napi[i].irq_vec); synchronize_irq(tp->napi[i].irq_vec);
spin_lock_bh(&tp->lock);
} }
/* Fully shutdown all tg3 driver activity elsewhere in the system. /* Fully shutdown all tg3 driver activity elsewhere in the system.
...@@ -9018,6 +9024,8 @@ static void tg3_restore_clk(struct tg3 *tp) ...@@ -9018,6 +9024,8 @@ static void tg3_restore_clk(struct tg3 *tp)
/* tp->lock is held. */ /* tp->lock is held. */
static int tg3_chip_reset(struct tg3 *tp) static int tg3_chip_reset(struct tg3 *tp)
__releases(tp->lock)
__acquires(tp->lock)
{ {
u32 val; u32 val;
void (*write_op)(struct tg3 *, u32, u32); void (*write_op)(struct tg3 *, u32, u32);
...@@ -9073,9 +9081,13 @@ static int tg3_chip_reset(struct tg3 *tp) ...@@ -9073,9 +9081,13 @@ static int tg3_chip_reset(struct tg3 *tp)
} }
smp_mb(); smp_mb();
tg3_full_unlock(tp);
for (i = 0; i < tp->irq_cnt; i++) for (i = 0; i < tp->irq_cnt; i++)
synchronize_irq(tp->napi[i].irq_vec); synchronize_irq(tp->napi[i].irq_vec);
tg3_full_lock(tp, 0);
if (tg3_asic_rev(tp) == ASIC_REV_57780) { if (tg3_asic_rev(tp) == ASIC_REV_57780) {
val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
...@@ -10903,11 +10915,13 @@ static void tg3_timer(unsigned long __opaque) ...@@ -10903,11 +10915,13 @@ static void tg3_timer(unsigned long __opaque)
{ {
struct tg3 *tp = (struct tg3 *) __opaque; struct tg3 *tp = (struct tg3 *) __opaque;
if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
goto restart_timer;
spin_lock(&tp->lock); spin_lock(&tp->lock);
if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
spin_unlock(&tp->lock);
goto restart_timer;
}
if (tg3_asic_rev(tp) == ASIC_REV_5717 || if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
tg3_flag(tp, 57765_CLASS)) tg3_flag(tp, 57765_CLASS))
tg3_chk_missed_msi(tp); tg3_chk_missed_msi(tp);
...@@ -11101,11 +11115,13 @@ static void tg3_reset_task(struct work_struct *work) ...@@ -11101,11 +11115,13 @@ static void tg3_reset_task(struct work_struct *work)
struct tg3 *tp = container_of(work, struct tg3, reset_task); struct tg3 *tp = container_of(work, struct tg3, reset_task);
int err; int err;
rtnl_lock();
tg3_full_lock(tp, 0); tg3_full_lock(tp, 0);
if (!netif_running(tp->dev)) { if (!netif_running(tp->dev)) {
tg3_flag_clear(tp, RESET_TASK_PENDING); tg3_flag_clear(tp, RESET_TASK_PENDING);
tg3_full_unlock(tp); tg3_full_unlock(tp);
rtnl_unlock();
return; return;
} }
...@@ -11138,6 +11154,7 @@ static void tg3_reset_task(struct work_struct *work) ...@@ -11138,6 +11154,7 @@ static void tg3_reset_task(struct work_struct *work)
tg3_phy_start(tp); tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING); tg3_flag_clear(tp, RESET_TASK_PENDING);
rtnl_unlock();
} }
static int tg3_request_irq(struct tg3 *tp, int irq_num) static int tg3_request_irq(struct tg3 *tp, int irq_num)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment