Commit 63b66d12 authored by Ron Mercer's avatar Ron Mercer Committed by Jeff Garzik

qla3xxx: bugfix tx reset after stress conditions.

To Reproduce the Problem:
To reproduce this panic consistently, we run an intensive network
application like 'netperf' and then switch to a different console.
After waiting for a couple of seconds, you will see a tx reset has occured.

Reason:
We enable interrupts even if we were not running.

Solution:
Now we will enable interrupts only after we are ready to give up the poll
routine.
Signed-off-by: default avatarRon Mercer <ron.mercer@qlogic.com>
Signed-off-by: default avatarJeff Garzik <jeff@garzik.org>
parent 0f8ab89e
...@@ -1921,10 +1921,11 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, ...@@ -1921,10 +1921,11 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
struct net_rsp_iocb *net_rsp; struct net_rsp_iocb *net_rsp;
struct net_device *ndev = qdev->ndev; struct net_device *ndev = qdev->ndev;
unsigned long hw_flags; unsigned long hw_flags;
int work_done = 0;
/* While there are entries in the completion queue. */ /* While there are entries in the completion queue. */
while ((cpu_to_le32(*(qdev->prsp_producer_index)) != while ((cpu_to_le32(*(qdev->prsp_producer_index)) !=
qdev->rsp_consumer_index) && (*rx_cleaned < work_to_do)) { qdev->rsp_consumer_index) && (work_done < work_to_do)) {
net_rsp = qdev->rsp_current; net_rsp = qdev->rsp_current;
switch (net_rsp->opcode) { switch (net_rsp->opcode) {
...@@ -1975,37 +1976,41 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev, ...@@ -1975,37 +1976,41 @@ static int ql_tx_rx_clean(struct ql3_adapter *qdev,
} else { } else {
qdev->rsp_current++; qdev->rsp_current++;
} }
work_done = *tx_cleaned + *rx_cleaned;
} }
spin_lock_irqsave(&qdev->hw_lock, hw_flags); if(work_done) {
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
ql_update_lrg_bufq_prod_index(qdev); ql_update_lrg_bufq_prod_index(qdev);
if (qdev->small_buf_release_cnt >= 16) { if (qdev->small_buf_release_cnt >= 16) {
while (qdev->small_buf_release_cnt >= 16) { while (qdev->small_buf_release_cnt >= 16) {
qdev->small_buf_q_producer_index++; qdev->small_buf_q_producer_index++;
if (qdev->small_buf_q_producer_index == if (qdev->small_buf_q_producer_index ==
NUM_SBUFQ_ENTRIES) NUM_SBUFQ_ENTRIES)
qdev->small_buf_q_producer_index = 0; qdev->small_buf_q_producer_index = 0;
qdev->small_buf_release_cnt -= 8; qdev->small_buf_release_cnt -= 8;
} }
ql_write_common_reg(qdev, wmb();
&port_regs->CommonRegs. ql_write_common_reg(qdev,
rxSmallQProducerIndex, &port_regs->CommonRegs.
qdev->small_buf_q_producer_index); rxSmallQProducerIndex,
} qdev->small_buf_q_producer_index);
ql_write_common_reg(qdev, }
&port_regs->CommonRegs.rspQConsumerIndex,
qdev->rsp_consumer_index);
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
if (unlikely(netif_queue_stopped(qdev->ndev))) { spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
if (netif_queue_stopped(qdev->ndev) &&
(atomic_read(&qdev->tx_count) > (NUM_REQ_Q_ENTRIES / 4))) if (unlikely(netif_queue_stopped(qdev->ndev))) {
netif_wake_queue(qdev->ndev); if (netif_queue_stopped(qdev->ndev) &&
(atomic_read(&qdev->tx_count) >
(NUM_REQ_Q_ENTRIES / 4)))
netif_wake_queue(qdev->ndev);
}
} }
return *tx_cleaned + *rx_cleaned; return *tx_cleaned + *rx_cleaned;
...@@ -2016,6 +2021,8 @@ static int ql_poll(struct net_device *ndev, int *budget) ...@@ -2016,6 +2021,8 @@ static int ql_poll(struct net_device *ndev, int *budget)
struct ql3_adapter *qdev = netdev_priv(ndev); struct ql3_adapter *qdev = netdev_priv(ndev);
int work_to_do = min(*budget, ndev->quota); int work_to_do = min(*budget, ndev->quota);
int rx_cleaned = 0, tx_cleaned = 0; int rx_cleaned = 0, tx_cleaned = 0;
unsigned long hw_flags;
struct ql3xxx_port_registers __iomem *port_regs = qdev->mem_map_registers;
if (!netif_carrier_ok(ndev)) if (!netif_carrier_ok(ndev))
goto quit_polling; goto quit_polling;
...@@ -2027,6 +2034,13 @@ static int ql_poll(struct net_device *ndev, int *budget) ...@@ -2027,6 +2034,13 @@ static int ql_poll(struct net_device *ndev, int *budget)
if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) { if ((!tx_cleaned && !rx_cleaned) || !netif_running(ndev)) {
quit_polling: quit_polling:
netif_rx_complete(ndev); netif_rx_complete(ndev);
spin_lock_irqsave(&qdev->hw_lock, hw_flags);
ql_write_common_reg(qdev,
&port_regs->CommonRegs.rspQConsumerIndex,
qdev->rsp_consumer_index);
spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
ql_enable_interrupts(qdev); ql_enable_interrupts(qdev);
return 0; return 0;
} }
...@@ -2079,11 +2093,10 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id) ...@@ -2079,11 +2093,10 @@ static irqreturn_t ql3xxx_isr(int irq, void *dev_id)
queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0); queue_delayed_work(qdev->workqueue, &qdev->reset_work, 0);
spin_unlock(&qdev->adapter_lock); spin_unlock(&qdev->adapter_lock);
} else if (value & ISP_IMR_DISABLE_CMPL_INT) { } else if (value & ISP_IMR_DISABLE_CMPL_INT) {
ql_disable_interrupts(qdev); if (likely(netif_rx_schedule_prep(ndev))) {
if (likely(netif_rx_schedule_prep(ndev))) ql_disable_interrupts(qdev);
__netif_rx_schedule(ndev); __netif_rx_schedule(ndev);
else }
ql_enable_interrupts(qdev);
} else { } else {
return IRQ_NONE; return IRQ_NONE;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment