Commit 074975d0 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

bnx2x: Fix busy_poll vs netpoll

Commit 9a2620c8 ("bnx2x: prevent WARN during driver unload")
switched the napi/busy_lock locking mechanism from spin_lock() into
spin_lock_bh(), breaking inter-operability with netconsole, as netpoll
disables interrupts prior to calling our napi mechanism.

This switches the driver into using atomic assignments instead of the
spinlock mechanisms previously employed.

Based on initial patch from Yuval Mintz & Ariel Elior

I basically added softirq starvation avoidance, and mixture
of atomic operations, plain writes and barriers.

Note this slightly reduces the overhead for this driver when no
busy_poll sockets are in use.

Fixes: 9a2620c8 ("bnx2x: prevent WARN during driver unload")
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 48b63776
...@@ -531,20 +531,8 @@ struct bnx2x_fastpath { ...@@ -531,20 +531,8 @@ struct bnx2x_fastpath {
struct napi_struct napi; struct napi_struct napi;
#ifdef CONFIG_NET_RX_BUSY_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int state; unsigned long busy_poll_state;
#define BNX2X_FP_STATE_IDLE 0 #endif
#define BNX2X_FP_STATE_NAPI (1 << 0) /* NAPI owns this FP */
#define BNX2X_FP_STATE_POLL (1 << 1) /* poll owns this FP */
#define BNX2X_FP_STATE_DISABLED (1 << 2)
#define BNX2X_FP_STATE_NAPI_YIELD (1 << 3) /* NAPI yielded this FP */
#define BNX2X_FP_STATE_POLL_YIELD (1 << 4) /* poll yielded this FP */
#define BNX2X_FP_OWNED (BNX2X_FP_STATE_NAPI | BNX2X_FP_STATE_POLL)
#define BNX2X_FP_YIELD (BNX2X_FP_STATE_NAPI_YIELD | BNX2X_FP_STATE_POLL_YIELD)
#define BNX2X_FP_LOCKED (BNX2X_FP_OWNED | BNX2X_FP_STATE_DISABLED)
#define BNX2X_FP_USER_PEND (BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_POLL_YIELD)
/* protect state */
spinlock_t lock;
#endif /* CONFIG_NET_RX_BUSY_POLL */
union host_hc_status_block status_blk; union host_hc_status_block status_blk;
/* chip independent shortcuts into sb structure */ /* chip independent shortcuts into sb structure */
...@@ -619,104 +607,83 @@ struct bnx2x_fastpath { ...@@ -619,104 +607,83 @@ struct bnx2x_fastpath {
#define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats)) #define bnx2x_fp_qstats(bp, fp) (&((bp)->fp_stats[(fp)->index].eth_q_stats))
#ifdef CONFIG_NET_RX_BUSY_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp)
enum bnx2x_fp_state {
BNX2X_STATE_FP_NAPI = BIT(0), /* NAPI handler owns the queue */
BNX2X_STATE_FP_NAPI_REQ_BIT = 1, /* NAPI would like to own the queue */
BNX2X_STATE_FP_NAPI_REQ = BIT(1),
BNX2X_STATE_FP_POLL_BIT = 2,
BNX2X_STATE_FP_POLL = BIT(2), /* busy_poll owns the queue */
BNX2X_STATE_FP_DISABLE_BIT = 3, /* queue is dismantled */
};
static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
{ {
spin_lock_init(&fp->lock); WRITE_ONCE(fp->busy_poll_state, 0);
fp->state = BNX2X_FP_STATE_IDLE;
} }
/* called from the device poll routine to get ownership of a FP */ /* called from the device poll routine to get ownership of a FP */
static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
{ {
bool rc = true; unsigned long prev, old = READ_ONCE(fp->busy_poll_state);
spin_lock_bh(&fp->lock); while (1) {
if (fp->state & BNX2X_FP_LOCKED) { switch (old) {
WARN_ON(fp->state & BNX2X_FP_STATE_NAPI); case BNX2X_STATE_FP_POLL:
fp->state |= BNX2X_FP_STATE_NAPI_YIELD; /* make sure bnx2x_fp_lock_poll() wont starve us */
rc = false; set_bit(BNX2X_STATE_FP_NAPI_REQ_BIT,
} else { &fp->busy_poll_state);
/* we don't care if someone yielded */ /* fallthrough */
fp->state = BNX2X_FP_STATE_NAPI; case BNX2X_STATE_FP_POLL | BNX2X_STATE_FP_NAPI_REQ:
return false;
default:
break;
}
prev = cmpxchg(&fp->busy_poll_state, old, BNX2X_STATE_FP_NAPI);
if (unlikely(prev != old)) {
old = prev;
continue;
}
return true;
} }
spin_unlock_bh(&fp->lock);
return rc;
} }
/* returns true is someone tried to get the FP while napi had it */ static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
{ {
bool rc = false; smp_wmb();
fp->busy_poll_state = 0;
spin_lock_bh(&fp->lock);
WARN_ON(fp->state &
(BNX2X_FP_STATE_POLL | BNX2X_FP_STATE_NAPI_YIELD));
if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
rc = true;
/* state ==> idle, unless currently disabled */
fp->state &= BNX2X_FP_STATE_DISABLED;
spin_unlock_bh(&fp->lock);
return rc;
} }
/* called from bnx2x_low_latency_poll() */ /* called from bnx2x_low_latency_poll() */
static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
{ {
bool rc = true; return cmpxchg(&fp->busy_poll_state, 0, BNX2X_STATE_FP_POLL) == 0;
spin_lock_bh(&fp->lock);
if ((fp->state & BNX2X_FP_LOCKED)) {
fp->state |= BNX2X_FP_STATE_POLL_YIELD;
rc = false;
} else {
/* preserve yield marks */
fp->state |= BNX2X_FP_STATE_POLL;
}
spin_unlock_bh(&fp->lock);
return rc;
} }
/* returns true if someone tried to get the FP while it was locked */ static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
{ {
bool rc = false; smp_mb__before_atomic();
clear_bit(BNX2X_STATE_FP_POLL_BIT, &fp->busy_poll_state);
spin_lock_bh(&fp->lock);
WARN_ON(fp->state & BNX2X_FP_STATE_NAPI);
if (fp->state & BNX2X_FP_STATE_POLL_YIELD)
rc = true;
/* state ==> idle, unless currently disabled */
fp->state &= BNX2X_FP_STATE_DISABLED;
spin_unlock_bh(&fp->lock);
return rc;
} }
/* true if a socket is polling, even if it did not get the lock */ /* true if a socket is polling */
static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
{ {
WARN_ON(!(fp->state & BNX2X_FP_OWNED)); return READ_ONCE(fp->busy_poll_state) & BNX2X_STATE_FP_POLL;
return fp->state & BNX2X_FP_USER_PEND;
} }
/* false if fp is currently owned */ /* false if fp is currently owned */
static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp) static inline bool bnx2x_fp_ll_disable(struct bnx2x_fastpath *fp)
{ {
int rc = true; set_bit(BNX2X_STATE_FP_DISABLE_BIT, &fp->busy_poll_state);
return !bnx2x_fp_ll_polling(fp);
spin_lock_bh(&fp->lock);
if (fp->state & BNX2X_FP_OWNED)
rc = false;
fp->state |= BNX2X_FP_STATE_DISABLED;
spin_unlock_bh(&fp->lock);
return rc;
} }
#else #else
static inline void bnx2x_fp_init_lock(struct bnx2x_fastpath *fp) static inline void bnx2x_fp_busy_poll_init(struct bnx2x_fastpath *fp)
{ {
} }
...@@ -725,9 +692,8 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp) ...@@ -725,9 +692,8 @@ static inline bool bnx2x_fp_lock_napi(struct bnx2x_fastpath *fp)
return true; return true;
} }
static inline bool bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp) static inline void bnx2x_fp_unlock_napi(struct bnx2x_fastpath *fp)
{ {
return false;
} }
static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
...@@ -735,9 +701,8 @@ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp) ...@@ -735,9 +701,8 @@ static inline bool bnx2x_fp_lock_poll(struct bnx2x_fastpath *fp)
return false; return false;
} }
static inline bool bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp) static inline void bnx2x_fp_unlock_poll(struct bnx2x_fastpath *fp)
{ {
return false;
} }
static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp) static inline bool bnx2x_fp_ll_polling(struct bnx2x_fastpath *fp)
......
...@@ -1849,7 +1849,7 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp) ...@@ -1849,7 +1849,7 @@ static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
int i; int i;
for_each_rx_queue_cnic(bp, i) { for_each_rx_queue_cnic(bp, i) {
bnx2x_fp_init_lock(&bp->fp[i]); bnx2x_fp_busy_poll_init(&bp->fp[i]);
napi_enable(&bnx2x_fp(bp, i, napi)); napi_enable(&bnx2x_fp(bp, i, napi));
} }
} }
...@@ -1859,7 +1859,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp) ...@@ -1859,7 +1859,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
int i; int i;
for_each_eth_queue(bp, i) { for_each_eth_queue(bp, i) {
bnx2x_fp_init_lock(&bp->fp[i]); bnx2x_fp_busy_poll_init(&bp->fp[i]);
napi_enable(&bnx2x_fp(bp, i, napi)); napi_enable(&bnx2x_fp(bp, i, napi));
} }
} }
...@@ -3191,9 +3191,10 @@ static int bnx2x_poll(struct napi_struct *napi, int budget) ...@@ -3191,9 +3191,10 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
} }
} }
bnx2x_fp_unlock_napi(fp);
/* Fall out from the NAPI loop if needed */ /* Fall out from the NAPI loop if needed */
if (!bnx2x_fp_unlock_napi(fp) && if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
/* No need to update SB for FCoE L2 ring as long as /* No need to update SB for FCoE L2 ring as long as
* it's connected to the default SB and the SB * it's connected to the default SB and the SB
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment