Commit e3144ff5 authored by David S. Miller's avatar David S. Miller

Merge branch 'rfs-lockless-annotate'

Eric Dumazet says:

====================
rfs: annotate lockless accesses

rfs runs without locks held, so we should annotate
read and writes to shared variables.

It should prevent compilers forcing writes
in the following situation:

  if (var != val)
     var = val;

A compiler could indeed simply avoid the conditional:

    var = val;

This matters if var is shared between many cpus.

v2: aligns one closing bracket (Simon)
    adds Fixes: tags (Jakub)
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ab39b113 5c3b74a9
...@@ -768,8 +768,11 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table, ...@@ -768,8 +768,11 @@ static inline void rps_record_sock_flow(struct rps_sock_flow_table *table,
/* We only give a hint, preemption can change CPU under us */ /* We only give a hint, preemption can change CPU under us */
val |= raw_smp_processor_id(); val |= raw_smp_processor_id();
if (table->ents[index] != val) /* The following WRITE_ONCE() is paired with the READ_ONCE()
table->ents[index] = val; * here, and another one in get_rps_cpu().
*/
if (READ_ONCE(table->ents[index]) != val)
WRITE_ONCE(table->ents[index], val);
} }
} }
......
...@@ -1152,8 +1152,12 @@ static inline void sock_rps_record_flow(const struct sock *sk) ...@@ -1152,8 +1152,12 @@ static inline void sock_rps_record_flow(const struct sock *sk)
* OR an additional socket flag * OR an additional socket flag
* [1] : sk_state and sk_prot are in the same cache line. * [1] : sk_state and sk_prot are in the same cache line.
*/ */
if (sk->sk_state == TCP_ESTABLISHED) if (sk->sk_state == TCP_ESTABLISHED) {
sock_rps_record_flow_hash(sk->sk_rxhash); /* This READ_ONCE() is paired with the WRITE_ONCE()
* from sock_rps_save_rxhash() and sock_rps_reset_rxhash().
*/
sock_rps_record_flow_hash(READ_ONCE(sk->sk_rxhash));
}
} }
#endif #endif
} }
...@@ -1162,15 +1166,19 @@ static inline void sock_rps_save_rxhash(struct sock *sk, ...@@ -1162,15 +1166,19 @@ static inline void sock_rps_save_rxhash(struct sock *sk,
const struct sk_buff *skb) const struct sk_buff *skb)
{ {
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
if (unlikely(sk->sk_rxhash != skb->hash)) /* The following WRITE_ONCE() is paired with the READ_ONCE()
sk->sk_rxhash = skb->hash; * here, and another one in sock_rps_record_flow().
*/
if (unlikely(READ_ONCE(sk->sk_rxhash) != skb->hash))
WRITE_ONCE(sk->sk_rxhash, skb->hash);
#endif #endif
} }
static inline void sock_rps_reset_rxhash(struct sock *sk) static inline void sock_rps_reset_rxhash(struct sock *sk)
{ {
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
sk->sk_rxhash = 0; /* Paired with READ_ONCE() in sock_rps_record_flow() */
WRITE_ONCE(sk->sk_rxhash, 0);
#endif #endif
} }
......
...@@ -4471,8 +4471,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, ...@@ -4471,8 +4471,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
u32 next_cpu; u32 next_cpu;
u32 ident; u32 ident;
/* First check into global flow table if there is a match */ /* First check into global flow table if there is a match.
ident = sock_flow_table->ents[hash & sock_flow_table->mask]; * This READ_ONCE() pairs with WRITE_ONCE() from rps_record_sock_flow().
*/
ident = READ_ONCE(sock_flow_table->ents[hash & sock_flow_table->mask]);
if ((ident ^ hash) & ~rps_cpu_mask) if ((ident ^ hash) & ~rps_cpu_mask)
goto try_rps; goto try_rps;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment