Commit 4408d55a authored by Kuniyuki Iwashima's avatar Kuniyuki Iwashima Committed by Alexei Starovoitov

af_unix: Refactor unix_next_socket().

Currently, unix_next_socket() is overloaded depending on the 2nd argument.
If it is NULL, unix_next_socket() returns the first socket in the hash.  If
not NULL, it returns the next socket in the same hash list or the first
socket in the next non-empty hash list.

This patch refactors unix_next_socket() into two functions unix_get_first()
and unix_get_next().  unix_get_first() newly acquires a lock and returns
the first socket in the list.  unix_get_next() returns the next socket in a
list or releases a lock and falls back to unix_get_first().

In the following patch, bpf iter holds entire sockets in a list and always
releases the lock before .show().  It always calls unix_get_first() to
acquire a lock in each iteration.  So, this patch makes the change easier
to follow.
Signed-off-by: default avatarKuniyuki Iwashima <kuniyu@amazon.co.jp>
Link: https://lore.kernel.org/r/20220113002849.4384-2-kuniyu@amazon.co.jpSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 2a1aff60
......@@ -3240,49 +3240,58 @@ static struct sock *unix_from_bucket(struct seq_file *seq, loff_t *pos)
return sk;
}
static struct sock *unix_next_socket(struct seq_file *seq,
struct sock *sk,
loff_t *pos)
static struct sock *unix_get_first(struct seq_file *seq, loff_t *pos)
{
unsigned long bucket = get_bucket(*pos);
struct sock *sk;
while (sk > (struct sock *)SEQ_START_TOKEN) {
sk = sk_next(sk);
if (!sk)
goto next_bucket;
if (sock_net(sk) == seq_file_net(seq))
return sk;
}
do {
while (bucket < ARRAY_SIZE(unix_socket_table)) {
spin_lock(&unix_table_locks[bucket]);
sk = unix_from_bucket(seq, pos);
if (sk)
return sk;
next_bucket:
spin_unlock(&unix_table_locks[bucket++]);
*pos = set_bucket_offset(bucket, 1);
} while (bucket < ARRAY_SIZE(unix_socket_table));
spin_unlock(&unix_table_locks[bucket]);
*pos = set_bucket_offset(++bucket, 1);
}
return NULL;
}
static struct sock *unix_get_next(struct seq_file *seq, struct sock *sk,
loff_t *pos)
{
unsigned long bucket = get_bucket(*pos);
for (sk = sk_next(sk); sk; sk = sk_next(sk))
if (sock_net(sk) == seq_file_net(seq))
return sk;
spin_unlock(&unix_table_locks[bucket]);
*pos = set_bucket_offset(++bucket, 1);
return unix_get_first(seq, pos);
}
static void *unix_seq_start(struct seq_file *seq, loff_t *pos)
{
if (!*pos)
return SEQ_START_TOKEN;
if (get_bucket(*pos) >= ARRAY_SIZE(unix_socket_table))
return NULL;
return unix_next_socket(seq, NULL, pos);
return unix_get_first(seq, pos);
}
static void *unix_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return unix_next_socket(seq, v, pos);
if (v == SEQ_START_TOKEN)
return unix_get_first(seq, pos);
return unix_get_next(seq, v, pos);
}
static void unix_seq_stop(struct seq_file *seq, void *v)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment