Commit d7012f05 authored by Magnus Karlsson's avatar Magnus Karlsson Committed by Alexei Starovoitov

xsk: Consolidate to one single cached producer pointer

Currently, the xsk ring code has two cached producer pointers:
prod_head and prod_tail. This patch consolidates these two into a
single one called cached_prod to make the code simpler and easier to
maintain. This will be in line with the user space part of the the
code found in libbpf, that only uses a single cached pointer.

The Rx path only uses the two top level functions
xskq_produce_batch_desc and xskq_produce_flush_desc and they both use
prod_head and never prod_tail. So just move them over to
cached_prod.

The Tx XDP_DRV path uses xskq_produce_addr_lazy and
xskq_produce_flush_addr_n and unnecessarily operates on both prod_tail
and prod_head, so move them over to just use cached_prod by skipping
the intermediate step of updating prod_tail.

The Tx path in XDP_SKB mode uses xskq_reserve_addr and
xskq_produce_addr. They currently use both cached pointers, but we can
operate on the global producer pointer in xskq_produce_addr since it
has to be updated anyway, thus eliminating the use of both cached
pointers. We can also remove the xskq_nb_free in xskq_produce_addr
since it is already called in xskq_reserve_addr. No need to do it
twice.

When there is only one cached producer pointer, we can also simplify
xskq_nb_free by removing one argument.
Signed-off-by: default avatarMagnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/1576759171-28550-4-git-send-email-magnus.karlsson@intel.com
parent 11cc2d21
...@@ -35,8 +35,7 @@ struct xsk_queue { ...@@ -35,8 +35,7 @@ struct xsk_queue {
u64 size; u64 size;
u32 ring_mask; u32 ring_mask;
u32 nentries; u32 nentries;
u32 prod_head; u32 cached_prod;
u32 prod_tail;
u32 cons_head; u32 cons_head;
u32 cons_tail; u32 cons_tail;
struct xdp_ring *ring; struct xdp_ring *ring;
...@@ -94,39 +93,39 @@ static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q) ...@@ -94,39 +93,39 @@ static inline u64 xskq_nb_invalid_descs(struct xsk_queue *q)
static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt) static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
{ {
u32 entries = q->prod_tail - q->cons_tail; u32 entries = q->cached_prod - q->cons_tail;
if (entries == 0) { if (entries == 0) {
/* Refresh the local pointer */ /* Refresh the local pointer */
q->prod_tail = READ_ONCE(q->ring->producer); q->cached_prod = READ_ONCE(q->ring->producer);
entries = q->prod_tail - q->cons_tail; entries = q->cached_prod - q->cons_tail;
} }
return (entries > dcnt) ? dcnt : entries; return (entries > dcnt) ? dcnt : entries;
} }
static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt) static inline u32 xskq_nb_free(struct xsk_queue *q, u32 dcnt)
{ {
u32 free_entries = q->nentries - (producer - q->cons_tail); u32 free_entries = q->nentries - (q->cached_prod - q->cons_tail);
if (free_entries >= dcnt) if (free_entries >= dcnt)
return free_entries; return free_entries;
/* Refresh the local tail pointer */ /* Refresh the local tail pointer */
q->cons_tail = READ_ONCE(q->ring->consumer); q->cons_tail = READ_ONCE(q->ring->consumer);
return q->nentries - (producer - q->cons_tail); return q->nentries - (q->cached_prod - q->cons_tail);
} }
static inline bool xskq_has_addrs(struct xsk_queue *q, u32 cnt) static inline bool xskq_has_addrs(struct xsk_queue *q, u32 cnt)
{ {
u32 entries = q->prod_tail - q->cons_tail; u32 entries = q->cached_prod - q->cons_tail;
if (entries >= cnt) if (entries >= cnt)
return true; return true;
/* Refresh the local pointer. */ /* Refresh the local pointer. */
q->prod_tail = READ_ONCE(q->ring->producer); q->cached_prod = READ_ONCE(q->ring->producer);
entries = q->prod_tail - q->cons_tail; entries = q->cached_prod - q->cons_tail;
return entries >= cnt; return entries >= cnt;
} }
...@@ -220,17 +219,15 @@ static inline void xskq_discard_addr(struct xsk_queue *q) ...@@ -220,17 +219,15 @@ static inline void xskq_discard_addr(struct xsk_queue *q)
static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr) static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr)
{ {
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
unsigned int idx = q->ring->producer;
if (xskq_nb_free(q, q->prod_tail, 1) == 0)
return -ENOSPC;
/* A, matches D */ /* A, matches D */
ring->desc[q->prod_tail++ & q->ring_mask] = addr; ring->desc[idx++ & q->ring_mask] = addr;
/* Order producer and data */ /* Order producer and data */
smp_wmb(); /* B, matches C */ smp_wmb(); /* B, matches C */
WRITE_ONCE(q->ring->producer, q->prod_tail); WRITE_ONCE(q->ring->producer, idx);
return 0; return 0;
} }
...@@ -238,11 +235,11 @@ static inline int xskq_produce_addr_lazy(struct xsk_queue *q, u64 addr) ...@@ -238,11 +235,11 @@ static inline int xskq_produce_addr_lazy(struct xsk_queue *q, u64 addr)
{ {
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
if (xskq_nb_free(q, q->prod_head, 1) == 0) if (xskq_nb_free(q, 1) == 0)
return -ENOSPC; return -ENOSPC;
/* A, matches D */ /* A, matches D */
ring->desc[q->prod_head++ & q->ring_mask] = addr; ring->desc[q->cached_prod++ & q->ring_mask] = addr;
return 0; return 0;
} }
...@@ -252,17 +249,16 @@ static inline void xskq_produce_flush_addr_n(struct xsk_queue *q, ...@@ -252,17 +249,16 @@ static inline void xskq_produce_flush_addr_n(struct xsk_queue *q,
/* Order producer and data */ /* Order producer and data */
smp_wmb(); /* B, matches C */ smp_wmb(); /* B, matches C */
q->prod_tail += nb_entries; WRITE_ONCE(q->ring->producer, q->ring->producer + nb_entries);
WRITE_ONCE(q->ring->producer, q->prod_tail);
} }
static inline int xskq_reserve_addr(struct xsk_queue *q) static inline int xskq_reserve_addr(struct xsk_queue *q)
{ {
if (xskq_nb_free(q, q->prod_head, 1) == 0) if (xskq_nb_free(q, 1) == 0)
return -ENOSPC; return -ENOSPC;
/* A, matches D */ /* A, matches D */
q->prod_head++; q->cached_prod++;
return 0; return 0;
} }
...@@ -340,11 +336,11 @@ static inline int xskq_produce_batch_desc(struct xsk_queue *q, ...@@ -340,11 +336,11 @@ static inline int xskq_produce_batch_desc(struct xsk_queue *q,
struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
unsigned int idx; unsigned int idx;
if (xskq_nb_free(q, q->prod_head, 1) == 0) if (xskq_nb_free(q, 1) == 0)
return -ENOSPC; return -ENOSPC;
/* A, matches D */ /* A, matches D */
idx = (q->prod_head++) & q->ring_mask; idx = q->cached_prod++ & q->ring_mask;
ring->desc[idx].addr = addr; ring->desc[idx].addr = addr;
ring->desc[idx].len = len; ring->desc[idx].len = len;
...@@ -356,8 +352,7 @@ static inline void xskq_produce_flush_desc(struct xsk_queue *q) ...@@ -356,8 +352,7 @@ static inline void xskq_produce_flush_desc(struct xsk_queue *q)
/* Order producer and data */ /* Order producer and data */
smp_wmb(); /* B, matches C */ smp_wmb(); /* B, matches C */
q->prod_tail = q->prod_head; WRITE_ONCE(q->ring->producer, q->cached_prod);
WRITE_ONCE(q->ring->producer, q->prod_tail);
} }
static inline bool xskq_full_desc(struct xsk_queue *q) static inline bool xskq_full_desc(struct xsk_queue *q)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment