Commit bbb41728 authored by Andrii Nakryiko's avatar Andrii Nakryiko

Merge branch 'load-acquire/store-release barriers for'

Björn Töpel says:

====================

This two-patch series introduces load-acquire/store-release barriers
for the AF_XDP rings.

For most contemporary architectures, this is more effective than a
SPSC ring based on smp_{r,w,}mb() barriers. More importantly,
load-acquire/store-release semantics make the ring code easier to
follow.

This is effectively the change done in commit 6c43c091
("documentation: Update circular buffer for
load-acquire/store-release"), but for the AF_XDP rings.

Both libbpf and the kernel-side are updated.

Full details are outlined in the commits!

Thanks to the LKMM-folks (Paul/Alan/Will) for helping me out in this
complicated matter!

Changelog

v1[1]->v2:
* Expanded the commit message for patch 1, and included the LKMM
  litmus tests. Hopefully this clear things up. (Daniel)

* Clarified why the smp_mb()/smp_load_acquire() is not needed in (A);
  control dependency with load to store. (Toke)

[1] https://lore.kernel.org/bpf/20210301104318.263262-1-bjorn.topel@gmail.com/

Thanks,
Björn
====================
Signed-off-by: default avatarAndrii Nakryiko <andrii@kernel.org>
parents 299194a9 291471dd
...@@ -47,19 +47,18 @@ struct xsk_queue { ...@@ -47,19 +47,18 @@ struct xsk_queue {
u64 queue_empty_descs; u64 queue_empty_descs;
}; };
/* The structure of the shared state of the rings are the same as the /* The structure of the shared state of the rings are a simple
* ring buffer in kernel/events/ring_buffer.c. For the Rx and completion * circular buffer, as outlined in
* ring, the kernel is the producer and user space is the consumer. For * Documentation/core-api/circular-buffers.rst. For the Rx and
* the Tx and fill rings, the kernel is the consumer and user space is * completion ring, the kernel is the producer and user space is the
* the producer. * consumer. For the Tx and fill rings, the kernel is the consumer and
* user space is the producer.
* *
* producer consumer * producer consumer
* *
* if (LOAD ->consumer) { LOAD ->producer * if (LOAD ->consumer) { (A) LOAD.acq ->producer (C)
* (A) smp_rmb() (C)
* STORE $data LOAD $data * STORE $data LOAD $data
* smp_wmb() (B) smp_mb() (D) * STORE.rel ->producer (B) STORE.rel ->consumer (D)
* STORE ->producer STORE ->consumer
* } * }
* *
* (A) pairs with (D), and (B) pairs with (C). * (A) pairs with (D), and (B) pairs with (C).
...@@ -78,7 +77,8 @@ struct xsk_queue { ...@@ -78,7 +77,8 @@ struct xsk_queue {
* *
* (A) is a control dependency that separates the load of ->consumer * (A) is a control dependency that separates the load of ->consumer
* from the stores of $data. In case ->consumer indicates there is no * from the stores of $data. In case ->consumer indicates there is no
* room in the buffer to store $data we do not. So no barrier is needed. * room in the buffer to store $data we do not. The dependency will
* order both of the stores after the loads. So no barrier is needed.
* *
* (D) protects the load of the data to be observed to happen after the * (D) protects the load of the data to be observed to happen after the
* store of the consumer pointer. If we did not have this memory * store of the consumer pointer. If we did not have this memory
...@@ -227,15 +227,13 @@ static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q, ...@@ -227,15 +227,13 @@ static inline u32 xskq_cons_read_desc_batch(struct xsk_queue *q,
static inline void __xskq_cons_release(struct xsk_queue *q) static inline void __xskq_cons_release(struct xsk_queue *q)
{ {
smp_mb(); /* D, matches A */ smp_store_release(&q->ring->consumer, q->cached_cons); /* D, matchees A */
WRITE_ONCE(q->ring->consumer, q->cached_cons);
} }
static inline void __xskq_cons_peek(struct xsk_queue *q) static inline void __xskq_cons_peek(struct xsk_queue *q)
{ {
/* Refresh the local pointer */ /* Refresh the local pointer */
q->cached_prod = READ_ONCE(q->ring->producer); q->cached_prod = smp_load_acquire(&q->ring->producer); /* C, matches B */
smp_rmb(); /* C, matches B */
} }
static inline void xskq_cons_get_entries(struct xsk_queue *q) static inline void xskq_cons_get_entries(struct xsk_queue *q)
...@@ -397,9 +395,7 @@ static inline int xskq_prod_reserve_desc(struct xsk_queue *q, ...@@ -397,9 +395,7 @@ static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx) static inline void __xskq_prod_submit(struct xsk_queue *q, u32 idx)
{ {
smp_wmb(); /* B, matches C */ smp_store_release(&q->ring->producer, idx); /* B, matches C */
WRITE_ONCE(q->ring->producer, idx);
} }
static inline void xskq_prod_submit(struct xsk_queue *q) static inline void xskq_prod_submit(struct xsk_queue *q)
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#define __LIBBPF_LIBBPF_UTIL_H #define __LIBBPF_LIBBPF_UTIL_H
#include <stdbool.h> #include <stdbool.h>
#include <linux/compiler.h>
#ifdef __cplusplus #ifdef __cplusplus
extern "C" { extern "C" {
...@@ -15,29 +16,56 @@ extern "C" { ...@@ -15,29 +16,56 @@ extern "C" {
* application that uses libbpf. * application that uses libbpf.
*/ */
#if defined(__i386__) || defined(__x86_64__) #if defined(__i386__) || defined(__x86_64__)
# define libbpf_smp_rmb() asm volatile("" : : : "memory") # define libbpf_smp_store_release(p, v) \
# define libbpf_smp_wmb() asm volatile("" : : : "memory") do { \
# define libbpf_smp_mb() \ asm volatile("" : : : "memory"); \
asm volatile("lock; addl $0,-4(%%rsp)" : : : "memory", "cc") WRITE_ONCE(*p, v); \
/* Hinders stores to be observed before older loads. */ } while (0)
# define libbpf_smp_rwmb() asm volatile("" : : : "memory") # define libbpf_smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
asm volatile("" : : : "memory"); \
___p1; \
})
#elif defined(__aarch64__) #elif defined(__aarch64__)
# define libbpf_smp_rmb() asm volatile("dmb ishld" : : : "memory") # define libbpf_smp_store_release(p, v) \
# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory") asm volatile ("stlr %w1, %0" : "=Q" (*p) : "r" (v) : "memory")
# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory") # define libbpf_smp_load_acquire(p) \
# define libbpf_smp_rwmb() libbpf_smp_mb() ({ \
#elif defined(__arm__) typeof(*p) ___p1; \
/* These are only valid for armv7 and above */ asm volatile ("ldar %w0, %1" \
# define libbpf_smp_rmb() asm volatile("dmb ish" : : : "memory") : "=r" (___p1) : "Q" (*p) : "memory"); \
# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory") __p1; \
# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory") })
# define libbpf_smp_rwmb() libbpf_smp_mb() #elif defined(__riscv)
#else # define libbpf_smp_store_release(p, v) \
/* Architecture missing native barrier functions. */ do { \
# define libbpf_smp_rmb() __sync_synchronize() asm volatile ("fence rw,w" : : : "memory"); \
# define libbpf_smp_wmb() __sync_synchronize() WRITE_ONCE(*p, v); \
# define libbpf_smp_mb() __sync_synchronize() } while (0)
# define libbpf_smp_rwmb() __sync_synchronize() # define libbpf_smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
asm volatile ("fence r,rw" : : : "memory"); \
___p1; \
})
#endif
#ifndef libbpf_smp_store_release
#define libbpf_smp_store_release(p, v) \
do { \
__sync_synchronize(); \
WRITE_ONCE(*p, v); \
} while (0)
#endif
#ifndef libbpf_smp_load_acquire
#define libbpf_smp_load_acquire(p) \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
__sync_synchronize(); \
___p1; \
})
#endif #endif
#ifdef __cplusplus #ifdef __cplusplus
......
...@@ -96,7 +96,8 @@ static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb) ...@@ -96,7 +96,8 @@ static inline __u32 xsk_prod_nb_free(struct xsk_ring_prod *r, __u32 nb)
* this function. Without this optimization it whould have been * this function. Without this optimization it whould have been
* free_entries = r->cached_prod - r->cached_cons + r->size. * free_entries = r->cached_prod - r->cached_cons + r->size.
*/ */
r->cached_cons = *r->consumer + r->size; r->cached_cons = libbpf_smp_load_acquire(r->consumer);
r->cached_cons += r->size;
return r->cached_cons - r->cached_prod; return r->cached_cons - r->cached_prod;
} }
...@@ -106,7 +107,7 @@ static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb) ...@@ -106,7 +107,7 @@ static inline __u32 xsk_cons_nb_avail(struct xsk_ring_cons *r, __u32 nb)
__u32 entries = r->cached_prod - r->cached_cons; __u32 entries = r->cached_prod - r->cached_cons;
if (entries == 0) { if (entries == 0) {
r->cached_prod = *r->producer; r->cached_prod = libbpf_smp_load_acquire(r->producer);
entries = r->cached_prod - r->cached_cons; entries = r->cached_prod - r->cached_cons;
} }
...@@ -129,9 +130,7 @@ static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb) ...@@ -129,9 +130,7 @@ static inline void xsk_ring_prod__submit(struct xsk_ring_prod *prod, __u32 nb)
/* Make sure everything has been written to the ring before indicating /* Make sure everything has been written to the ring before indicating
* this to the kernel by writing the producer pointer. * this to the kernel by writing the producer pointer.
*/ */
libbpf_smp_wmb(); libbpf_smp_store_release(prod->producer, *prod->producer + nb);
*prod->producer += nb;
} }
static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx) static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __u32 *idx)
...@@ -139,11 +138,6 @@ static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __ ...@@ -139,11 +138,6 @@ static inline __u32 xsk_ring_cons__peek(struct xsk_ring_cons *cons, __u32 nb, __
__u32 entries = xsk_cons_nb_avail(cons, nb); __u32 entries = xsk_cons_nb_avail(cons, nb);
if (entries > 0) { if (entries > 0) {
/* Make sure we do not speculatively read the data before
* we have received the packet buffers from the ring.
*/
libbpf_smp_rmb();
*idx = cons->cached_cons; *idx = cons->cached_cons;
cons->cached_cons += entries; cons->cached_cons += entries;
} }
...@@ -161,9 +155,8 @@ static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb) ...@@ -161,9 +155,8 @@ static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, __u32 nb)
/* Make sure data has been read before indicating we are done /* Make sure data has been read before indicating we are done
* with the entries by updating the consumer pointer. * with the entries by updating the consumer pointer.
*/ */
libbpf_smp_rwmb(); libbpf_smp_store_release(cons->consumer, *cons->consumer + nb);
*cons->consumer += nb;
} }
static inline void *xsk_umem__get_data(void *umem_area, __u64 addr) static inline void *xsk_umem__get_data(void *umem_area, __u64 addr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment