Commit e8d00590 authored by Martin KaFai Lau's avatar Martin KaFai Lau Committed by Jakub Kicinski

net: inet: Open code inet_hash2 and inet_unhash2

This patch folds lhash2 related functions into __inet_hash and
inet_unhash.  This will make the removal of the listening_hash
in a latter patch easier to review.

First, this patch folds inet_hash2 into __inet_hash.

For unhash, the current call sequence is like
inet_unhash() => __inet_unhash() => inet_unhash2().
The specific testing cases in __inet_unhash() are mostly related
to TCP_LISTEN sk and its caller inet_unhash() already has
the TCP_LISTEN test, so this patch folds both __inet_unhash() and
inet_unhash2() into inet_unhash().

Note that all listening_hash users also have lhash2 initialized,
so the !h->lhash2 check is no longer needed.
Signed-off-by: default avatarMartin KaFai Lau <kafai@fb.com>
Reviewed-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 8ea1eebb
...@@ -193,40 +193,6 @@ inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk) ...@@ -193,40 +193,6 @@ inet_lhash2_bucket_sk(struct inet_hashinfo *h, struct sock *sk)
return inet_lhash2_bucket(h, hash); return inet_lhash2_bucket(h, hash);
} }
static void inet_hash2(struct inet_hashinfo *h, struct sock *sk)
{
struct inet_listen_hashbucket *ilb2;
if (!h->lhash2)
return;
ilb2 = inet_lhash2_bucket_sk(h, sk);
spin_lock(&ilb2->lock);
if (sk->sk_reuseport && sk->sk_family == AF_INET6)
hlist_add_tail_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
&ilb2->head);
else
hlist_add_head_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
&ilb2->head);
spin_unlock(&ilb2->lock);
}
static void inet_unhash2(struct inet_hashinfo *h, struct sock *sk)
{
struct inet_listen_hashbucket *ilb2;
if (!h->lhash2 ||
WARN_ON_ONCE(hlist_unhashed(&inet_csk(sk)->icsk_listen_portaddr_node)))
return;
ilb2 = inet_lhash2_bucket_sk(h, sk);
spin_lock(&ilb2->lock);
hlist_del_init_rcu(&inet_csk(sk)->icsk_listen_portaddr_node);
spin_unlock(&ilb2->lock);
}
static inline int compute_score(struct sock *sk, struct net *net, static inline int compute_score(struct sock *sk, struct net *net,
const unsigned short hnum, const __be32 daddr, const unsigned short hnum, const __be32 daddr,
const int dif, const int sdif) const int dif, const int sdif)
...@@ -631,6 +597,7 @@ static int inet_reuseport_add_sock(struct sock *sk, ...@@ -631,6 +597,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
int __inet_hash(struct sock *sk, struct sock *osk) int __inet_hash(struct sock *sk, struct sock *osk)
{ {
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct inet_listen_hashbucket *ilb2;
struct inet_listen_hashbucket *ilb; struct inet_listen_hashbucket *ilb;
int err = 0; int err = 0;
...@@ -642,22 +609,29 @@ int __inet_hash(struct sock *sk, struct sock *osk) ...@@ -642,22 +609,29 @@ int __inet_hash(struct sock *sk, struct sock *osk)
} }
WARN_ON(!sk_unhashed(sk)); WARN_ON(!sk_unhashed(sk));
ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
ilb2 = inet_lhash2_bucket_sk(hashinfo, sk);
spin_lock(&ilb->lock); spin_lock(&ilb->lock);
spin_lock(&ilb2->lock);
if (sk->sk_reuseport) { if (sk->sk_reuseport) {
err = inet_reuseport_add_sock(sk, ilb); err = inet_reuseport_add_sock(sk, ilb);
if (err) if (err)
goto unlock; goto unlock;
} }
if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport && if (IS_ENABLED(CONFIG_IPV6) && sk->sk_reuseport &&
sk->sk_family == AF_INET6) sk->sk_family == AF_INET6) {
hlist_add_tail_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
&ilb2->head);
__sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head); __sk_nulls_add_node_tail_rcu(sk, &ilb->nulls_head);
else } else {
hlist_add_head_rcu(&inet_csk(sk)->icsk_listen_portaddr_node,
&ilb2->head);
__sk_nulls_add_node_rcu(sk, &ilb->nulls_head); __sk_nulls_add_node_rcu(sk, &ilb->nulls_head);
inet_hash2(hashinfo, sk); }
sock_set_flag(sk, SOCK_RCU_FREE); sock_set_flag(sk, SOCK_RCU_FREE);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
unlock: unlock:
spin_unlock(&ilb2->lock);
spin_unlock(&ilb->lock); spin_unlock(&ilb->lock);
return err; return err;
...@@ -675,22 +649,6 @@ int inet_hash(struct sock *sk) ...@@ -675,22 +649,6 @@ int inet_hash(struct sock *sk)
} }
EXPORT_SYMBOL_GPL(inet_hash); EXPORT_SYMBOL_GPL(inet_hash);
static void __inet_unhash(struct sock *sk, struct inet_listen_hashbucket *ilb)
{
if (sk_unhashed(sk))
return;
if (rcu_access_pointer(sk->sk_reuseport_cb))
reuseport_stop_listen_sock(sk);
if (ilb) {
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
inet_unhash2(hashinfo, sk);
}
__sk_nulls_del_node_init_rcu(sk);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
}
void inet_unhash(struct sock *sk) void inet_unhash(struct sock *sk)
{ {
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo; struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
...@@ -699,20 +657,40 @@ void inet_unhash(struct sock *sk) ...@@ -699,20 +657,40 @@ void inet_unhash(struct sock *sk)
return; return;
if (sk->sk_state == TCP_LISTEN) { if (sk->sk_state == TCP_LISTEN) {
struct inet_listen_hashbucket *ilb2;
struct inet_listen_hashbucket *ilb; struct inet_listen_hashbucket *ilb;
ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)]; ilb = &hashinfo->listening_hash[inet_sk_listen_hashfn(sk)];
ilb2 = inet_lhash2_bucket_sk(hashinfo, sk);
/* Don't disable bottom halves while acquiring the lock to /* Don't disable bottom halves while acquiring the lock to
* avoid circular locking dependency on PREEMPT_RT. * avoid circular locking dependency on PREEMPT_RT.
*/ */
spin_lock(&ilb->lock); spin_lock(&ilb->lock);
__inet_unhash(sk, ilb); spin_lock(&ilb2->lock);
if (sk_unhashed(sk)) {
spin_unlock(&ilb2->lock);
spin_unlock(&ilb->lock);
return;
}
if (rcu_access_pointer(sk->sk_reuseport_cb))
reuseport_stop_listen_sock(sk);
hlist_del_init_rcu(&inet_csk(sk)->icsk_listen_portaddr_node);
__sk_nulls_del_node_init_rcu(sk);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
spin_unlock(&ilb2->lock);
spin_unlock(&ilb->lock); spin_unlock(&ilb->lock);
} else { } else {
spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash); spinlock_t *lock = inet_ehash_lockp(hashinfo, sk->sk_hash);
spin_lock_bh(lock); spin_lock_bh(lock);
__inet_unhash(sk, NULL); if (sk_unhashed(sk)) {
spin_unlock_bh(lock);
return;
}
__sk_nulls_del_node_init_rcu(sk);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
spin_unlock_bh(lock); spin_unlock_bh(lock);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment