Commit bbc20b70 authored by Eric Dumazet's avatar Eric Dumazet Committed by Jakub Kicinski

net: reduce indentation level in sk_clone_lock()

Rework initial test to jump over init code
if memory allocation has failed.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Link: https://lore.kernel.org/r/20210127152731.748663-1-eric.dumazet@gmail.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent d1f3bdd4
...@@ -1876,12 +1876,13 @@ static void sk_init_common(struct sock *sk) ...@@ -1876,12 +1876,13 @@ static void sk_init_common(struct sock *sk)
struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
{ {
struct proto *prot = READ_ONCE(sk->sk_prot); struct proto *prot = READ_ONCE(sk->sk_prot);
struct sock *newsk; struct sk_filter *filter;
bool is_charged = true; bool is_charged = true;
struct sock *newsk;
newsk = sk_prot_alloc(prot, priority, sk->sk_family); newsk = sk_prot_alloc(prot, priority, sk->sk_family);
if (newsk != NULL) { if (!newsk)
struct sk_filter *filter; goto out;
sock_copy(newsk, sk); sock_copy(newsk, sk);
...@@ -1897,10 +1898,10 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) ...@@ -1897,10 +1898,10 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_backlog.len = 0; newsk->sk_backlog.len = 0;
atomic_set(&newsk->sk_rmem_alloc, 0); atomic_set(&newsk->sk_rmem_alloc, 0);
/*
* sk_wmem_alloc set to one (see sk_free() and sock_wfree()) /* sk_wmem_alloc set to one (see sk_free() and sock_wfree()) */
*/
refcount_set(&newsk->sk_wmem_alloc, 1); refcount_set(&newsk->sk_wmem_alloc, 1);
atomic_set(&newsk->sk_omem_alloc, 0); atomic_set(&newsk->sk_omem_alloc, 0);
sk_init_common(newsk); sk_init_common(newsk);
...@@ -1963,15 +1964,13 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) ...@@ -1963,15 +1964,13 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
if (likely(newsk->sk_net_refcnt)) if (likely(newsk->sk_net_refcnt))
sock_inuse_add(sock_net(newsk), 1); sock_inuse_add(sock_net(newsk), 1);
/* /* Before updating sk_refcnt, we must commit prior changes to memory
* Before updating sk_refcnt, we must commit prior changes to memory
* (Documentation/RCU/rculist_nulls.rst for details) * (Documentation/RCU/rculist_nulls.rst for details)
*/ */
smp_wmb(); smp_wmb();
refcount_set(&newsk->sk_refcnt, 2); refcount_set(&newsk->sk_refcnt, 2);
/* /* Increment the counter in the same struct proto as the master
* Increment the counter in the same struct proto as the master
* sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
* is the same as sk->sk_prot->socks, as this field was copied * is the same as sk->sk_prot->socks, as this field was copied
* with memcpy). * with memcpy).
...@@ -1989,10 +1988,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) ...@@ -1989,10 +1988,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
if (newsk->sk_prot->sockets_allocated) if (newsk->sk_prot->sockets_allocated)
sk_sockets_allocated_inc(newsk); sk_sockets_allocated_inc(newsk);
if (sock_needs_netstamp(sk) && if (sock_needs_netstamp(sk) && newsk->sk_flags & SK_FLAGS_TIMESTAMP)
newsk->sk_flags & SK_FLAGS_TIMESTAMP)
net_enable_timestamp(); net_enable_timestamp();
}
out: out:
return newsk; return newsk;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment