Commit e7ef085d authored by David S. Miller's avatar David S. Miller

Merge branch 'napi_budget_zero'

Eric W. Biederman says:

====================
Don't receive packets when the napi budget == 0

After reading through all 120 drivers supporting netpoll I have found 16
more that process at least received packet when the napi budget == 0.

Processing more packets than your budget has always been a bug but
we haven't cared before so it looks like these drivers slipped through,
and need fixes.

As netpoll will shortly be using a budget of 0 to get the tx queue
processing with the rx queue processing we now care.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1c79a5a8 75363a46
......@@ -720,6 +720,9 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
int rx_pkt_limit = budget;
unsigned long flags;
if (rx_pkt_limit <= 0)
goto rx_not_empty;
do{
/* process receive packets until we use the quota*/
/* If we own the next entry, it's a new packet. Send it up. */
......
......@@ -872,6 +872,8 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
if (unlikely(bp->panic))
return 0;
#endif
if (budget <= 0)
return rx_pkt;
bd_cons = fp->rx_bd_cons;
bd_prod = fp->rx_bd_prod;
......
......@@ -1086,12 +1086,13 @@ static int enic_poll(struct napi_struct *napi, int budget)
unsigned int intr = enic_legacy_io_intr();
unsigned int rq_work_to_do = budget;
unsigned int wq_work_to_do = -1; /* no limit */
unsigned int work_done, rq_work_done, wq_work_done;
unsigned int work_done, rq_work_done = 0, wq_work_done;
int err;
/* Service RQ (first) and WQ
*/
if (budget > 0)
rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
rq_work_to_do, enic_rq_service, NULL);
......@@ -1141,12 +1142,13 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
unsigned int cq = enic_cq_rq(enic, rq);
unsigned int intr = enic_msix_rq_intr(enic, rq);
unsigned int work_to_do = budget;
unsigned int work_done;
unsigned int work_done = 0;
int err;
/* Service RQ
*/
if (budget > 0)
work_done = vnic_cq_service(&enic->cq[cq],
work_to_do, enic_rq_service, NULL);
......
......@@ -91,6 +91,9 @@ static int fs_enet_rx_napi(struct napi_struct *napi, int budget)
u16 pkt_len, sc;
int curidx;
if (budget <= 0)
return received;
/*
* First, grab all of the stats for the incoming packet.
* These get messed up if we get called due to a busy condition.
......
......@@ -1072,7 +1072,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
unsigned long lpar_rc;
restart_poll:
do {
while (frames_processed < budget) {
if (!ibmveth_rxq_pending_buffer(adapter))
break;
......@@ -1121,7 +1121,7 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
netdev->stats.rx_bytes += length;
frames_processed++;
}
} while (frames_processed < budget);
}
ibmveth_replenish_task(adapter);
......
......@@ -1302,6 +1302,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
u8 rx_ptype;
u64 qword;
if (budget <= 0)
return 0;
rx_desc = I40E_RX_DESC(rx_ring, i);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
......
......@@ -6946,7 +6946,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
unsigned int total_bytes = 0, total_packets = 0;
u16 cleaned_count = igb_desc_unused(rx_ring);
do {
while (likely(total_packets < budget)) {
union e1000_adv_rx_desc *rx_desc;
/* return some buffers to hardware, one at a time is too slow */
......@@ -6998,7 +6998,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
/* update budget accounting */
total_packets++;
} while (likely(total_packets < budget));
}
/* place incomplete frames back on ring for completion */
rx_ring->skb = skb;
......
......@@ -2076,7 +2076,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
do {
while (likely(total_rx_packets < budget)) {
union ixgbe_adv_rx_desc *rx_desc;
struct sk_buff *skb;
......@@ -2151,7 +2151,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
/* update budget accounting */
total_rx_packets++;
} while (likely(total_rx_packets < budget));
}
u64_stats_update_begin(&rx_ring->syncp);
rx_ring->stats.packets += total_rx_packets;
......
......@@ -2735,6 +2735,9 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
unsigned int total_bytes[2] = { 0 };
unsigned int total_packets[2] = { 0 };
if (to_do <= 0)
return work_done;
rmb();
do {
struct sky2_port *sky2;
......
......@@ -661,6 +661,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
if (!priv->port_up)
return 0;
if (budget <= 0)
return polled;
/* We assume a 1:1 mapping between CQEs and Rx descriptors, so Rx
* descriptor offset can be deduced from the CQE index instead of
* reading 'cqe->index' */
......
......@@ -2914,6 +2914,9 @@ static int rx_intr_handler(struct ring_info *ring_data, int budget)
struct RxD1 *rxdp1;
struct RxD3 *rxdp3;
if (budget <= 0)
return napi_pkts;
get_info = ring_data->rx_curr_get_info;
get_block = get_info.block_index;
memcpy(&put_info, &ring_data->rx_curr_put_info, sizeof(put_info));
......
......@@ -368,6 +368,9 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
ring->ndev->name, __func__, __LINE__);
if (ring->budget <= 0)
goto out;
do {
prefetch((char *)dtr + L1_CACHE_BYTES);
rx_priv = vxge_hw_ring_rxd_private_get(dtr);
......@@ -525,6 +528,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
if (first_dtr)
vxge_hw_ring_rxd_post_post_wmb(ringh, first_dtr);
out:
vxge_debug_entryexit(VXGE_TRACE,
"%s:%d Exiting...",
__func__, __LINE__);
......
......@@ -1955,6 +1955,9 @@ static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
int tx_descs = 0;
int spent = 0;
if (quota <= 0)
return spent;
read_ptr = channel->eventq_read_ptr;
for (;;) {
......
......@@ -1248,6 +1248,9 @@ int efx_farch_ev_process(struct efx_channel *channel, int budget)
int tx_packets = 0;
int spent = 0;
if (budget <= 0)
return spent;
read_ptr = channel->eventq_read_ptr;
for (;;) {
......
......@@ -659,6 +659,9 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
struct info_mpipe *info_mpipe =
container_of(napi, struct info_mpipe, napi);
if (budget <= 0)
goto done;
instance = info_mpipe->instance;
while ((n = gxio_mpipe_iqueue_try_peek(
&info_mpipe->iqueue,
......
......@@ -831,6 +831,9 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
unsigned int work = 0;
if (budget <= 0)
goto done;
while (priv->active) {
int index = qup->__packet_receive_read;
if (index == qsp->__packet_receive_queue.__packet_write)
......
......@@ -1645,6 +1645,9 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
int received = 0, handled;
u32 status;
if (budget <= 0)
return received;
spin_lock(&lp->rx_lock);
status = tc_readl(&tr->Int_Src);
do {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment