Commit 5226b791 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

cxgb4: get rid of custom busy poll code

In linux-4.5, busy polling was implemented in core
NAPI stack, meaning that all custom implementation can
be removed from drivers.

Not only we remove lot of code, we also remove one spin_lock()
from driver fast path.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Cc: Ganesh Goudar <ganeshgr@chelsio.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 362108b5
......@@ -586,22 +586,6 @@ struct sge_rspq { /* state for an SGE response queue */
rspq_handler_t handler;
rspq_flush_handler_t flush_handler;
struct t4_lro_mgr lro_mgr;
#ifdef CONFIG_NET_RX_BUSY_POLL
#define CXGB_POLL_STATE_IDLE 0
#define CXGB_POLL_STATE_NAPI BIT(0) /* NAPI owns this poll */
#define CXGB_POLL_STATE_POLL BIT(1) /* poll owns this poll */
#define CXGB_POLL_STATE_NAPI_YIELD BIT(2) /* NAPI yielded this poll */
#define CXGB_POLL_STATE_POLL_YIELD BIT(3) /* poll yielded this poll */
#define CXGB_POLL_YIELD (CXGB_POLL_STATE_NAPI_YIELD | \
CXGB_POLL_STATE_POLL_YIELD)
#define CXGB_POLL_LOCKED (CXGB_POLL_STATE_NAPI | \
CXGB_POLL_STATE_POLL)
#define CXGB_POLL_USER_PEND (CXGB_POLL_STATE_POLL | \
CXGB_POLL_STATE_POLL_YIELD)
unsigned int bpoll_state;
spinlock_t bpoll_lock; /* lock for busy poll */
#endif /* CONFIG_NET_RX_BUSY_POLL */
};
struct sge_eth_stats { /* Ethernet queue statistics */
......@@ -1173,102 +1157,6 @@ static inline struct adapter *netdev2adap(const struct net_device *dev)
return netdev2pinfo(dev)->adapter;
}
#ifdef CONFIG_NET_RX_BUSY_POLL
static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
{
spin_lock_init(&q->bpoll_lock);
q->bpoll_state = CXGB_POLL_STATE_IDLE;
}
static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
{
bool rc = true;
spin_lock(&q->bpoll_lock);
if (q->bpoll_state & CXGB_POLL_LOCKED) {
q->bpoll_state |= CXGB_POLL_STATE_NAPI_YIELD;
rc = false;
} else {
q->bpoll_state = CXGB_POLL_STATE_NAPI;
}
spin_unlock(&q->bpoll_lock);
return rc;
}
static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
{
bool rc = false;
spin_lock(&q->bpoll_lock);
if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
rc = true;
q->bpoll_state = CXGB_POLL_STATE_IDLE;
spin_unlock(&q->bpoll_lock);
return rc;
}
static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
{
bool rc = true;
spin_lock_bh(&q->bpoll_lock);
if (q->bpoll_state & CXGB_POLL_LOCKED) {
q->bpoll_state |= CXGB_POLL_STATE_POLL_YIELD;
rc = false;
} else {
q->bpoll_state |= CXGB_POLL_STATE_POLL;
}
spin_unlock_bh(&q->bpoll_lock);
return rc;
}
static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
{
bool rc = false;
spin_lock_bh(&q->bpoll_lock);
if (q->bpoll_state & CXGB_POLL_STATE_POLL_YIELD)
rc = true;
q->bpoll_state = CXGB_POLL_STATE_IDLE;
spin_unlock_bh(&q->bpoll_lock);
return rc;
}
static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
{
return q->bpoll_state & CXGB_POLL_USER_PEND;
}
#else
static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q)
{
}
static inline bool cxgb_poll_lock_napi(struct sge_rspq *q)
{
return true;
}
static inline bool cxgb_poll_unlock_napi(struct sge_rspq *q)
{
return false;
}
static inline bool cxgb_poll_lock_poll(struct sge_rspq *q)
{
return false;
}
static inline bool cxgb_poll_unlock_poll(struct sge_rspq *q)
{
return false;
}
static inline bool cxgb_poll_busy_polling(struct sge_rspq *q)
{
return false;
}
#endif /* CONFIG_NET_RX_BUSY_POLL */
/* Return a version number to identify the type of adapter. The scheme is:
* - bits 0..9: chip version
* - bits 10..15: chip revision
......@@ -1325,7 +1213,6 @@ irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
int t4_sge_init(struct adapter *adap);
void t4_sge_start(struct adapter *adap);
void t4_sge_stop(struct adapter *adap);
int cxgb_busy_poll(struct napi_struct *napi);
void cxgb4_set_ethtool_ops(struct net_device *netdev);
int cxgb4_write_rss(const struct port_info *pi, const u16 *queues);
extern int dbfifo_int_thresh;
......
......@@ -744,14 +744,8 @@ static void quiesce_rx(struct adapter *adap)
for (i = 0; i < adap->sge.ingr_sz; i++) {
struct sge_rspq *q = adap->sge.ingr_map[i];
if (q && q->handler) {
if (q && q->handler)
napi_disable(&q->napi);
local_bh_disable();
while (!cxgb_poll_lock_napi(q))
mdelay(1);
local_bh_enable();
}
}
}
......@@ -782,10 +776,9 @@ static void enable_rx(struct adapter *adap)
if (!q)
continue;
if (q->handler) {
cxgb_busy_poll_init_lock(q);
if (q->handler)
napi_enable(&q->napi);
}
/* 0-increment GTS to start the timer and enable interrupts */
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
SEINTARM_V(q->intr_params) |
......@@ -2763,9 +2756,6 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_fcoe_enable = cxgb_fcoe_enable,
.ndo_fcoe_disable = cxgb_fcoe_disable,
#endif /* CONFIG_CHELSIO_T4_FCOE */
#ifdef CONFIG_NET_RX_BUSY_POLL
.ndo_busy_poll = cxgb_busy_poll,
#endif
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
.ndo_setup_tc = cxgb_setup_tc,
};
......
......@@ -408,10 +408,9 @@ static void enable_rx(struct adapter *adap, struct sge_rspq *q)
if (!q)
return;
if (q->handler) {
cxgb_busy_poll_init_lock(q);
if (q->handler)
napi_enable(&q->napi);
}
/* 0-increment GTS to start the timer and enable interrupts */
t4_write_reg(adap, MYPF_REG(SGE_PF_GTS_A),
SEINTARM_V(q->intr_params) |
......@@ -420,13 +419,8 @@ static void enable_rx(struct adapter *adap, struct sge_rspq *q)
static void quiesce_rx(struct adapter *adap, struct sge_rspq *q)
{
if (q && q->handler) {
if (q && q->handler)
napi_disable(&q->napi);
local_bh_disable();
while (!cxgb_poll_lock_napi(q))
mdelay(1);
local_bh_enable();
}
}
static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
......
......@@ -43,9 +43,7 @@
#include <linux/export.h>
#include <net/ipv6.h>
#include <net/tcp.h>
#ifdef CONFIG_NET_RX_BUSY_POLL
#include <net/busy_poll.h>
#endif /* CONFIG_NET_RX_BUSY_POLL */
#ifdef CONFIG_CHELSIO_T4_FCOE
#include <scsi/fc/fc_fcoe.h>
#endif /* CONFIG_CHELSIO_T4_FCOE */
......@@ -2059,7 +2057,6 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
csum_ok = pkt->csum_calc && !err_vec &&
(q->netdev->features & NETIF_F_RXCSUM);
if ((pkt->l2info & htonl(RXF_TCP_F)) &&
!(cxgb_poll_busy_polling(q)) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
do_gro(rxq, si, pkt);
return 0;
......@@ -2290,38 +2287,6 @@ static int process_responses(struct sge_rspq *q, int budget)
return budget - budget_left;
}
#ifdef CONFIG_NET_RX_BUSY_POLL
int cxgb_busy_poll(struct napi_struct *napi)
{
struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
unsigned int params, work_done;
u32 val;
if (!cxgb_poll_lock_poll(q))
return LL_FLUSH_BUSY;
work_done = process_responses(q, 4);
params = QINTR_TIMER_IDX_V(TIMERREG_COUNTER0_X) | QINTR_CNT_EN_V(1);
q->next_intr_params = params;
val = CIDXINC_V(work_done) | SEINTARM_V(params);
/* If we don't have access to the new User GTS (T5+), use the old
* doorbell mechanism; otherwise use the new BAR2 mechanism.
*/
if (unlikely(!q->bar2_addr))
t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS_A),
val | INGRESSQID_V((u32)q->cntxt_id));
else {
writel(val | INGRESSQID_V(q->bar2_qid),
q->bar2_addr + SGE_UDB_GTS);
wmb();
}
cxgb_poll_unlock_poll(q);
return work_done;
}
#endif /* CONFIG_NET_RX_BUSY_POLL */
/**
* napi_rx_handler - the NAPI handler for Rx processing
* @napi: the napi instance
......@@ -2340,9 +2305,6 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
int work_done;
u32 val;
if (!cxgb_poll_lock_napi(q))
return budget;
work_done = process_responses(q, budget);
if (likely(work_done < budget)) {
int timer_index;
......@@ -2382,7 +2344,6 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
q->bar2_addr + SGE_UDB_GTS);
wmb();
}
cxgb_poll_unlock_napi(q);
return work_done;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment