Commit 2c5935f1 authored by Magnus Karlsson's avatar Magnus Karlsson Committed by Alexei Starovoitov

libbpf: optimize barrier for XDP socket rings

The full memory barrier in the XDP socket rings on the consumer side
between the load of the data and the store of the consumer ring is
there to protect the store from being executed before the load of the
data. If this was allowed to happen, the producer might overwrite the
data field with a new entry before the consumer got the chance to read
it.

On x86, stores are guaranteed not to be reordered with older loads, so
it does not need a full memory barrier here. A compile time barrier
would be enough. This patch introdcues a new primitive in
libbpf_util.h that implements a new barrier type (libbpf_smp_rwmb)
hindering stores to be reordered with older loads. It is then used in
the XDP socket ring access code in libbpf to improve performance.
Signed-off-by: default avatarMagnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent b7e3a280
...@@ -32,20 +32,25 @@ do { \ ...@@ -32,20 +32,25 @@ do { \
# define libbpf_smp_wmb() asm volatile("" : : : "memory") # define libbpf_smp_wmb() asm volatile("" : : : "memory")
# define libbpf_smp_mb() \ # define libbpf_smp_mb() \
asm volatile("lock; addl $0,-4(%%rsp)" : : : "memory", "cc") asm volatile("lock; addl $0,-4(%%rsp)" : : : "memory", "cc")
/* Hinders stores to be observed before older loads. */
# define libbpf_smp_rwmb() asm volatile("" : : : "memory")
#elif defined(__aarch64__) #elif defined(__aarch64__)
# define libbpf_smp_rmb() asm volatile("dmb ishld" : : : "memory") # define libbpf_smp_rmb() asm volatile("dmb ishld" : : : "memory")
# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory") # define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory")
# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory") # define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory")
# define libbpf_smp_rwmb() libbpf_smp_mb()
#elif defined(__arm__) #elif defined(__arm__)
/* These are only valid for armv7 and above */ /* These are only valid for armv7 and above */
# define libbpf_smp_rmb() asm volatile("dmb ish" : : : "memory") # define libbpf_smp_rmb() asm volatile("dmb ish" : : : "memory")
# define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory") # define libbpf_smp_wmb() asm volatile("dmb ishst" : : : "memory")
# define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory") # define libbpf_smp_mb() asm volatile("dmb ish" : : : "memory")
# define libbpf_smp_rwmb() libbpf_smp_mb()
#else #else
# warning Architecture missing native barrier functions in libbpf_util.h. # warning Architecture missing native barrier functions in libbpf_util.h.
# define libbpf_smp_rmb() __sync_synchronize() # define libbpf_smp_rmb() __sync_synchronize()
# define libbpf_smp_wmb() __sync_synchronize() # define libbpf_smp_wmb() __sync_synchronize()
# define libbpf_smp_mb() __sync_synchronize() # define libbpf_smp_mb() __sync_synchronize()
# define libbpf_smp_rwmb() __sync_synchronize()
#endif #endif
#ifdef __cplusplus #ifdef __cplusplus
......
...@@ -152,7 +152,7 @@ static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, size_t nb) ...@@ -152,7 +152,7 @@ static inline void xsk_ring_cons__release(struct xsk_ring_cons *cons, size_t nb)
/* Make sure data has been read before indicating we are done /* Make sure data has been read before indicating we are done
* with the entries by updating the consumer pointer. * with the entries by updating the consumer pointer.
*/ */
libbpf_smp_mb(); libbpf_smp_rwmb();
*cons->consumer += nb; *cons->consumer += nb;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment