Commit 5e6300e7 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

net: annotate data-races around sk->sk_forward_alloc

Every time sk->sk_forward_alloc is read locklessly,
add a READ_ONCE().

Add sk_forward_alloc_add() helper to centralize updates,
to reduce number of WRITE_ONCE().

Fixes: 1da177e4 ("Linux-2.6.12-rc2")
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 66d58f04
...@@ -1053,6 +1053,12 @@ static inline void sk_wmem_queued_add(struct sock *sk, int val) ...@@ -1053,6 +1053,12 @@ static inline void sk_wmem_queued_add(struct sock *sk, int val)
WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val); WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
} }
static inline void sk_forward_alloc_add(struct sock *sk, int val)
{
/* Paired with lockless reads of sk->sk_forward_alloc */
WRITE_ONCE(sk->sk_forward_alloc, sk->sk_forward_alloc + val);
}
void sk_stream_write_space(struct sock *sk); void sk_stream_write_space(struct sock *sk);
/* OOB backlog add */ /* OOB backlog add */
...@@ -1377,7 +1383,7 @@ static inline int sk_forward_alloc_get(const struct sock *sk) ...@@ -1377,7 +1383,7 @@ static inline int sk_forward_alloc_get(const struct sock *sk)
if (sk->sk_prot->forward_alloc_get) if (sk->sk_prot->forward_alloc_get)
return sk->sk_prot->forward_alloc_get(sk); return sk->sk_prot->forward_alloc_get(sk);
#endif #endif
return sk->sk_forward_alloc; return READ_ONCE(sk->sk_forward_alloc);
} }
static inline bool __sk_stream_memory_free(const struct sock *sk, int wake) static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
...@@ -1673,14 +1679,14 @@ static inline void sk_mem_charge(struct sock *sk, int size) ...@@ -1673,14 +1679,14 @@ static inline void sk_mem_charge(struct sock *sk, int size)
{ {
if (!sk_has_account(sk)) if (!sk_has_account(sk))
return; return;
sk->sk_forward_alloc -= size; sk_forward_alloc_add(sk, -size);
} }
static inline void sk_mem_uncharge(struct sock *sk, int size) static inline void sk_mem_uncharge(struct sock *sk, int size)
{ {
if (!sk_has_account(sk)) if (!sk_has_account(sk))
return; return;
sk->sk_forward_alloc += size; sk_forward_alloc_add(sk, size);
sk_mem_reclaim(sk); sk_mem_reclaim(sk);
} }
......
...@@ -1045,7 +1045,7 @@ static int sock_reserve_memory(struct sock *sk, int bytes) ...@@ -1045,7 +1045,7 @@ static int sock_reserve_memory(struct sock *sk, int bytes)
mem_cgroup_uncharge_skmem(sk->sk_memcg, pages); mem_cgroup_uncharge_skmem(sk->sk_memcg, pages);
return -ENOMEM; return -ENOMEM;
} }
sk->sk_forward_alloc += pages << PAGE_SHIFT; sk_forward_alloc_add(sk, pages << PAGE_SHIFT);
WRITE_ONCE(sk->sk_reserved_mem, WRITE_ONCE(sk->sk_reserved_mem,
sk->sk_reserved_mem + (pages << PAGE_SHIFT)); sk->sk_reserved_mem + (pages << PAGE_SHIFT));
...@@ -3139,10 +3139,10 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind) ...@@ -3139,10 +3139,10 @@ int __sk_mem_schedule(struct sock *sk, int size, int kind)
{ {
int ret, amt = sk_mem_pages(size); int ret, amt = sk_mem_pages(size);
sk->sk_forward_alloc += amt << PAGE_SHIFT; sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
ret = __sk_mem_raise_allocated(sk, size, amt, kind); ret = __sk_mem_raise_allocated(sk, size, amt, kind);
if (!ret) if (!ret)
sk->sk_forward_alloc -= amt << PAGE_SHIFT; sk_forward_alloc_add(sk, -(amt << PAGE_SHIFT));
return ret; return ret;
} }
EXPORT_SYMBOL(__sk_mem_schedule); EXPORT_SYMBOL(__sk_mem_schedule);
...@@ -3174,7 +3174,7 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount) ...@@ -3174,7 +3174,7 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount)
void __sk_mem_reclaim(struct sock *sk, int amount) void __sk_mem_reclaim(struct sock *sk, int amount)
{ {
amount >>= PAGE_SHIFT; amount >>= PAGE_SHIFT;
sk->sk_forward_alloc -= amount << PAGE_SHIFT; sk_forward_alloc_add(sk, -(amount << PAGE_SHIFT));
__sk_mem_reduce_allocated(sk, amount); __sk_mem_reduce_allocated(sk, amount);
} }
EXPORT_SYMBOL(__sk_mem_reclaim); EXPORT_SYMBOL(__sk_mem_reclaim);
......
...@@ -3474,7 +3474,7 @@ void sk_forced_mem_schedule(struct sock *sk, int size) ...@@ -3474,7 +3474,7 @@ void sk_forced_mem_schedule(struct sock *sk, int size)
if (delta <= 0) if (delta <= 0)
return; return;
amt = sk_mem_pages(delta); amt = sk_mem_pages(delta);
sk->sk_forward_alloc += amt << PAGE_SHIFT; sk_forward_alloc_add(sk, amt << PAGE_SHIFT);
sk_memory_allocated_add(sk, amt); sk_memory_allocated_add(sk, amt);
if (mem_cgroup_sockets_enabled && sk->sk_memcg) if (mem_cgroup_sockets_enabled && sk->sk_memcg)
......
...@@ -1414,9 +1414,9 @@ static void udp_rmem_release(struct sock *sk, int size, int partial, ...@@ -1414,9 +1414,9 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
spin_lock(&sk_queue->lock); spin_lock(&sk_queue->lock);
sk->sk_forward_alloc += size; sk_forward_alloc_add(sk, size);
amt = (sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1); amt = (sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
sk->sk_forward_alloc -= amt; sk_forward_alloc_add(sk, -amt);
if (amt) if (amt)
__sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT); __sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT);
...@@ -1527,7 +1527,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb) ...@@ -1527,7 +1527,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
goto uncharge_drop; goto uncharge_drop;
} }
sk->sk_forward_alloc -= size; sk_forward_alloc_add(sk, -size);
/* no need to setup a destructor, we will explicitly release the /* no need to setup a destructor, we will explicitly release the
* forward allocated memory on dequeue * forward allocated memory on dequeue
......
...@@ -1800,7 +1800,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) ...@@ -1800,7 +1800,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
} }
/* data successfully copied into the write queue */ /* data successfully copied into the write queue */
sk->sk_forward_alloc -= total_ts; sk_forward_alloc_add(sk, -total_ts);
copied += psize; copied += psize;
dfrag->data_len += psize; dfrag->data_len += psize;
frag_truesize += psize; frag_truesize += psize;
...@@ -3257,7 +3257,7 @@ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags) ...@@ -3257,7 +3257,7 @@ void mptcp_destroy_common(struct mptcp_sock *msk, unsigned int flags)
/* move all the rx fwd alloc into the sk_mem_reclaim_final in /* move all the rx fwd alloc into the sk_mem_reclaim_final in
* inet_sock_destruct() will dispose it * inet_sock_destruct() will dispose it
*/ */
sk->sk_forward_alloc += msk->rmem_fwd_alloc; sk_forward_alloc_add(sk, msk->rmem_fwd_alloc);
msk->rmem_fwd_alloc = 0; msk->rmem_fwd_alloc = 0;
mptcp_token_destroy(msk); mptcp_token_destroy(msk);
mptcp_pm_free_anno_list(msk); mptcp_pm_free_anno_list(msk);
...@@ -3522,7 +3522,7 @@ static void mptcp_shutdown(struct sock *sk, int how) ...@@ -3522,7 +3522,7 @@ static void mptcp_shutdown(struct sock *sk, int how)
static int mptcp_forward_alloc_get(const struct sock *sk) static int mptcp_forward_alloc_get(const struct sock *sk)
{ {
return sk->sk_forward_alloc + mptcp_sk(sk)->rmem_fwd_alloc; return READ_ONCE(sk->sk_forward_alloc) + mptcp_sk(sk)->rmem_fwd_alloc;
} }
static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v) static int mptcp_ioctl_outq(const struct mptcp_sock *msk, u64 v)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment