Commit 4422b003 authored by Francois Romieu's avatar Francois Romieu Committed by Jeff Garzik

cxgb: delete non NAPI code from the driver.

Compile-tested only.
Signed-off-by: default avatarFrancois Romieu <romieu@fr.zoreil.com>
Signed-off-by: default avatarJeff Garzik <jgarzik@redhat.com>
parent 29d0a2b0
...@@ -2378,14 +2378,6 @@ config CHELSIO_T1_1G ...@@ -2378,14 +2378,6 @@ config CHELSIO_T1_1G
Enables support for Chelsio's gigabit Ethernet PCI cards. If you Enables support for Chelsio's gigabit Ethernet PCI cards. If you
are using only 10G cards say 'N' here. are using only 10G cards say 'N' here.
config CHELSIO_T1_NAPI
bool "Use Rx Polling (NAPI)"
depends on CHELSIO_T1
default y
help
NAPI is a driver API designed to reduce CPU and interrupt load
when the driver is receiving lots of packets from the card.
config CHELSIO_T3 config CHELSIO_T3
tristate "Chelsio Communications T3 10Gb Ethernet support" tristate "Chelsio Communications T3 10Gb Ethernet support"
depends on PCI && INET depends on PCI && INET
......
...@@ -1153,9 +1153,7 @@ static int __devinit init_one(struct pci_dev *pdev, ...@@ -1153,9 +1153,7 @@ static int __devinit init_one(struct pci_dev *pdev,
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
netdev->poll_controller = t1_netpoll; netdev->poll_controller = t1_netpoll;
#endif #endif
#ifdef CONFIG_CHELSIO_T1_NAPI
netif_napi_add(netdev, &adapter->napi, t1_poll, 64); netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
#endif
SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops); SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
} }
......
...@@ -1396,20 +1396,10 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len) ...@@ -1396,20 +1396,10 @@ static void sge_rx(struct sge *sge, struct freelQ *fl, unsigned int len)
if (unlikely(adapter->vlan_grp && p->vlan_valid)) { if (unlikely(adapter->vlan_grp && p->vlan_valid)) {
st->vlan_xtract++; st->vlan_xtract++;
#ifdef CONFIG_CHELSIO_T1_NAPI vlan_hwaccel_receive_skb(skb, adapter->vlan_grp,
vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, ntohs(p->vlan));
ntohs(p->vlan)); } else
#else
vlan_hwaccel_rx(skb, adapter->vlan_grp,
ntohs(p->vlan));
#endif
} else {
#ifdef CONFIG_CHELSIO_T1_NAPI
netif_receive_skb(skb); netif_receive_skb(skb);
#else
netif_rx(skb);
#endif
}
} }
/* /*
...@@ -1568,7 +1558,6 @@ static inline int responses_pending(const struct adapter *adapter) ...@@ -1568,7 +1558,6 @@ static inline int responses_pending(const struct adapter *adapter)
return (e->GenerationBit == Q->genbit); return (e->GenerationBit == Q->genbit);
} }
#ifdef CONFIG_CHELSIO_T1_NAPI
/* /*
* A simpler version of process_responses() that handles only pure (i.e., * A simpler version of process_responses() that handles only pure (i.e.,
* non data-carrying) responses. Such respones are too light-weight to justify * non data-carrying) responses. Such respones are too light-weight to justify
...@@ -1636,9 +1625,6 @@ int t1_poll(struct napi_struct *napi, int budget) ...@@ -1636,9 +1625,6 @@ int t1_poll(struct napi_struct *napi, int budget)
return work_done; return work_done;
} }
/*
* NAPI version of the main interrupt handler.
*/
irqreturn_t t1_interrupt(int irq, void *data) irqreturn_t t1_interrupt(int irq, void *data)
{ {
struct adapter *adapter = data; struct adapter *adapter = data;
...@@ -1656,7 +1642,8 @@ irqreturn_t t1_interrupt(int irq, void *data) ...@@ -1656,7 +1642,8 @@ irqreturn_t t1_interrupt(int irq, void *data)
else { else {
/* no data, no NAPI needed */ /* no data, no NAPI needed */
writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING); writel(sge->respQ.cidx, adapter->regs + A_SG_SLEEPING);
napi_enable(&adapter->napi); /* undo schedule_prep */ /* undo schedule_prep */
napi_enable(&adapter->napi);
} }
} }
return IRQ_HANDLED; return IRQ_HANDLED;
...@@ -1672,53 +1659,6 @@ irqreturn_t t1_interrupt(int irq, void *data) ...@@ -1672,53 +1659,6 @@ irqreturn_t t1_interrupt(int irq, void *data)
return IRQ_RETVAL(handled != 0); return IRQ_RETVAL(handled != 0);
} }
#else
/*
* Main interrupt handler, optimized assuming that we took a 'DATA'
* interrupt.
*
* 1. Clear the interrupt
* 2. Loop while we find valid descriptors and process them; accumulate
* information that can be processed after the loop
* 3. Tell the SGE at which index we stopped processing descriptors
* 4. Bookkeeping; free TX buffers, ring doorbell if there are any
* outstanding TX buffers waiting, replenish RX buffers, potentially
* reenable upper layers if they were turned off due to lack of TX
* resources which are available again.
* 5. If we took an interrupt, but no valid respQ descriptors was found we
* let the slow_intr_handler run and do error handling.
*/
irqreturn_t t1_interrupt(int irq, void *cookie)
{
int work_done;
struct adapter *adapter = cookie;
struct respQ *Q = &adapter->sge->respQ;
spin_lock(&adapter->async_lock);
writel(F_PL_INTR_SGE_DATA, adapter->regs + A_PL_CAUSE);
if (likely(responses_pending(adapter)))
work_done = process_responses(adapter, -1);
else
work_done = t1_slow_intr_handler(adapter);
/*
* The unconditional clearing of the PL_CAUSE above may have raced
* with DMA completion and the corresponding generation of a response
* to cause us to miss the resulting data interrupt. The next write
* is also unconditional to recover the missed interrupt and render
* this race harmless.
*/
writel(Q->cidx, adapter->regs + A_SG_SLEEPING);
if (!work_done)
adapter->sge->stats.unhandled_irqs++;
spin_unlock(&adapter->async_lock);
return IRQ_RETVAL(work_done != 0);
}
#endif
/* /*
* Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it. * Enqueues the sk_buff onto the cmdQ[qid] and has hardware fetch it.
* *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment