Commit 99b29a49 authored by Albert Huang's avatar Albert Huang Committed by Daniel Borkmann

xsk: Avoid starving the xsk further down the list

In the previous implementation, when multiple xsk sockets were
associated with a single xsk_buff_pool, a situation could arise
where the xsk_tx_list maintained data at the front for one xsk
socket while starving the xsk sockets at the back of the list.
This could result in issues such as the inability to transmit packets,
increased latency, and jitter. To address this problem, we introduce
a new variable called tx_budget_spent, which limits each xsk to transmit
a maximum of MAX_PER_SOCKET_BUDGET tx descriptors. This allocation ensures
equitable opportunities for subsequent xsk sockets to send tx descriptors.
The value of MAX_PER_SOCKET_BUDGET is set to 32.
Signed-off-by: default avatarAlbert Huang <huangjie.albert@bytedance.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Acked-by: default avatarMagnus Karlsson <magnus.karlsson@intel.com>
Link: https://lore.kernel.org/bpf/20231023125732.82261-1-huangjie.albert@bytedance.com
parent dedd6c89
...@@ -63,6 +63,13 @@ struct xdp_sock { ...@@ -63,6 +63,13 @@ struct xdp_sock {
struct xsk_queue *tx ____cacheline_aligned_in_smp; struct xsk_queue *tx ____cacheline_aligned_in_smp;
struct list_head tx_list; struct list_head tx_list;
/* record the number of tx descriptors sent by this xsk and
* when it exceeds MAX_PER_SOCKET_BUDGET, an opportunity needs
* to be given to other xsks for sending tx descriptors, thereby
* preventing other XSKs from being starved.
*/
u32 tx_budget_spent;
/* Protects generic receive. */ /* Protects generic receive. */
spinlock_t rx_lock; spinlock_t rx_lock;
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include "xsk.h" #include "xsk.h"
#define TX_BATCH_SIZE 32 #define TX_BATCH_SIZE 32
#define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE)
static DEFINE_PER_CPU(struct list_head, xskmap_flush_list); static DEFINE_PER_CPU(struct list_head, xskmap_flush_list);
...@@ -423,16 +424,25 @@ EXPORT_SYMBOL(xsk_tx_release); ...@@ -423,16 +424,25 @@ EXPORT_SYMBOL(xsk_tx_release);
bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
{ {
bool budget_exhausted = false;
struct xdp_sock *xs; struct xdp_sock *xs;
rcu_read_lock(); rcu_read_lock();
again:
list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) { list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list) {
if (xs->tx_budget_spent >= MAX_PER_SOCKET_BUDGET) {
budget_exhausted = true;
continue;
}
if (!xskq_cons_peek_desc(xs->tx, desc, pool)) { if (!xskq_cons_peek_desc(xs->tx, desc, pool)) {
if (xskq_has_descs(xs->tx)) if (xskq_has_descs(xs->tx))
xskq_cons_release(xs->tx); xskq_cons_release(xs->tx);
continue; continue;
} }
xs->tx_budget_spent++;
/* This is the backpressure mechanism for the Tx path. /* This is the backpressure mechanism for the Tx path.
* Reserve space in the completion queue and only proceed * Reserve space in the completion queue and only proceed
* if there is space in it. This avoids having to implement * if there is space in it. This avoids having to implement
...@@ -446,6 +456,14 @@ bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc) ...@@ -446,6 +456,14 @@ bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc)
return true; return true;
} }
if (budget_exhausted) {
list_for_each_entry_rcu(xs, &pool->xsk_tx_list, tx_list)
xs->tx_budget_spent = 0;
budget_exhausted = false;
goto again;
}
out: out:
rcu_read_unlock(); rcu_read_unlock();
return false; return false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment