Commit 1cf05e25 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'af_unix-random-improvements-for-gc'

Kuniyuki Iwashima says:

====================
af_unix: Random improvements for GC.

If more than 16000 inflight AF_UNIX sockets exist on a host, each
sendmsg() will be forced to wait for unix_gc() even if a process
is not sending any FD.

This series tries not to impose such a penalty on sane users who
do not send AF_UNIX FDs or do not have inflight sockets more than
SCM_MAX_FD * 8.

The first patch can be backported to -stable.

Cleanup patches for commit 69db702c ("io_uring/af_unix: disable
sending io_uring over sockets") and large refactoring of GC will
be followed later.

v4: https://lore.kernel.org/netdev/20231219030102.27509-1-kuniyu@amazon.com/
v3: https://lore.kernel.org/netdev/20231218075020.60826-1-kuniyu@amazon.com/
v2: https://lore.kernel.org/netdev/20231123014747.66063-1-kuniyu@amazon.com/
v1: https://lore.kernel.org/netdev/20231122013629.28554-1-kuniyu@amazon.com/
====================

Link: https://lore.kernel.org/r/20240123170856.41348-1-kuniyu@amazon.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents fb4bb62a d9f21b36
...@@ -8,13 +8,21 @@ ...@@ -8,13 +8,21 @@
#include <linux/refcount.h> #include <linux/refcount.h>
#include <net/sock.h> #include <net/sock.h>
#if IS_ENABLED(CONFIG_UNIX)
struct unix_sock *unix_get_socket(struct file *filp);
#else
static inline struct unix_sock *unix_get_socket(struct file *filp)
{
return NULL;
}
#endif
void unix_inflight(struct user_struct *user, struct file *fp); void unix_inflight(struct user_struct *user, struct file *fp);
void unix_notinflight(struct user_struct *user, struct file *fp); void unix_notinflight(struct user_struct *user, struct file *fp);
void unix_destruct_scm(struct sk_buff *skb); void unix_destruct_scm(struct sk_buff *skb);
void io_uring_destruct_scm(struct sk_buff *skb); void io_uring_destruct_scm(struct sk_buff *skb);
void unix_gc(void); void unix_gc(void);
void wait_for_unix_gc(void); void wait_for_unix_gc(struct scm_fp_list *fpl);
struct sock *unix_get_socket(struct file *filp);
struct sock *unix_peer_get(struct sock *sk); struct sock *unix_peer_get(struct sock *sk);
#define UNIX_HASH_MOD (256 - 1) #define UNIX_HASH_MOD (256 - 1)
...@@ -61,7 +69,7 @@ struct unix_sock { ...@@ -61,7 +69,7 @@ struct unix_sock {
struct mutex iolock, bindlock; struct mutex iolock, bindlock;
struct sock *peer; struct sock *peer;
struct list_head link; struct list_head link;
atomic_long_t inflight; unsigned long inflight;
spinlock_t lock; spinlock_t lock;
unsigned long gc_flags; unsigned long gc_flags;
#define UNIX_GC_CANDIDATE 0 #define UNIX_GC_CANDIDATE 0
......
...@@ -25,6 +25,7 @@ struct scm_creds { ...@@ -25,6 +25,7 @@ struct scm_creds {
struct scm_fp_list { struct scm_fp_list {
short count; short count;
short count_unix;
short max; short max;
struct user_struct *user; struct user_struct *user;
struct file *fp[SCM_MAX_FD]; struct file *fp[SCM_MAX_FD];
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <net/compat.h> #include <net/compat.h>
#include <net/scm.h> #include <net/scm.h>
#include <net/cls_cgroup.h> #include <net/cls_cgroup.h>
#include <net/af_unix.h>
/* /*
...@@ -85,6 +86,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) ...@@ -85,6 +86,7 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
return -ENOMEM; return -ENOMEM;
*fplp = fpl; *fplp = fpl;
fpl->count = 0; fpl->count = 0;
fpl->count_unix = 0;
fpl->max = SCM_MAX_FD; fpl->max = SCM_MAX_FD;
fpl->user = NULL; fpl->user = NULL;
} }
...@@ -109,6 +111,9 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp) ...@@ -109,6 +111,9 @@ static int scm_fp_copy(struct cmsghdr *cmsg, struct scm_fp_list **fplp)
fput(file); fput(file);
return -EINVAL; return -EINVAL;
} }
if (unix_get_socket(file))
fpl->count_unix++;
*fpp++ = file; *fpp++ = file;
fpl->count++; fpl->count++;
} }
......
...@@ -994,10 +994,10 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, ...@@ -994,10 +994,10 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern,
sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen; sk->sk_max_ack_backlog = net->unx.sysctl_max_dgram_qlen;
sk->sk_destruct = unix_sock_destructor; sk->sk_destruct = unix_sock_destructor;
u = unix_sk(sk); u = unix_sk(sk);
u->inflight = 0;
u->path.dentry = NULL; u->path.dentry = NULL;
u->path.mnt = NULL; u->path.mnt = NULL;
spin_lock_init(&u->lock); spin_lock_init(&u->lock);
atomic_long_set(&u->inflight, 0);
INIT_LIST_HEAD(&u->link); INIT_LIST_HEAD(&u->link);
mutex_init(&u->iolock); /* single task reading lock */ mutex_init(&u->iolock); /* single task reading lock */
mutex_init(&u->bindlock); /* single task binding lock */ mutex_init(&u->bindlock); /* single task binding lock */
...@@ -1923,11 +1923,12 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -1923,11 +1923,12 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
long timeo; long timeo;
int err; int err;
wait_for_unix_gc();
err = scm_send(sock, msg, &scm, false); err = scm_send(sock, msg, &scm, false);
if (err < 0) if (err < 0)
return err; return err;
wait_for_unix_gc(scm.fp);
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
if (msg->msg_flags&MSG_OOB) if (msg->msg_flags&MSG_OOB)
goto out; goto out;
...@@ -2199,11 +2200,12 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg, ...@@ -2199,11 +2200,12 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
bool fds_sent = false; bool fds_sent = false;
int data_len; int data_len;
wait_for_unix_gc();
err = scm_send(sock, msg, &scm, false); err = scm_send(sock, msg, &scm, false);
if (err < 0) if (err < 0)
return err; return err;
wait_for_unix_gc(scm.fp);
err = -EOPNOTSUPP; err = -EOPNOTSUPP;
if (msg->msg_flags & MSG_OOB) { if (msg->msg_flags & MSG_OOB) {
#if IS_ENABLED(CONFIG_AF_UNIX_OOB) #if IS_ENABLED(CONFIG_AF_UNIX_OOB)
......
...@@ -86,7 +86,6 @@ ...@@ -86,7 +86,6 @@
/* Internal data structures and random procedures: */ /* Internal data structures and random procedures: */
static LIST_HEAD(gc_candidates); static LIST_HEAD(gc_candidates);
static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist) struct sk_buff_head *hitlist)
...@@ -105,22 +104,17 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *), ...@@ -105,22 +104,17 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
while (nfd--) { while (nfd--) {
/* Get the socket the fd matches if it indeed does so */ /* Get the socket the fd matches if it indeed does so */
struct sock *sk = unix_get_socket(*fp++); struct unix_sock *u = unix_get_socket(*fp++);
if (sk) { /* Ignore non-candidates, they could have been added
struct unix_sock *u = unix_sk(sk); * to the queues after starting the garbage collection
/* Ignore non-candidates, they could
* have been added to the queues after
* starting the garbage collection
*/ */
if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) { if (u && test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
hit = true; hit = true;
func(u); func(u);
} }
} }
}
if (hit && hitlist != NULL) { if (hit && hitlist != NULL) {
__skb_unlink(skb, &x->sk_receive_queue); __skb_unlink(skb, &x->sk_receive_queue);
__skb_queue_tail(hitlist, skb); __skb_queue_tail(hitlist, skb);
...@@ -166,17 +160,18 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *), ...@@ -166,17 +160,18 @@ static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
static void dec_inflight(struct unix_sock *usk) static void dec_inflight(struct unix_sock *usk)
{ {
atomic_long_dec(&usk->inflight); usk->inflight--;
} }
static void inc_inflight(struct unix_sock *usk) static void inc_inflight(struct unix_sock *usk)
{ {
atomic_long_inc(&usk->inflight); usk->inflight++;
} }
static void inc_inflight_move_tail(struct unix_sock *u) static void inc_inflight_move_tail(struct unix_sock *u)
{ {
atomic_long_inc(&u->inflight); u->inflight++;
/* If this still might be part of a cycle, move it to the end /* If this still might be part of a cycle, move it to the end
* of the list, so that it's checked even if it was already * of the list, so that it's checked even if it was already
* passed over * passed over
...@@ -186,23 +181,8 @@ static void inc_inflight_move_tail(struct unix_sock *u) ...@@ -186,23 +181,8 @@ static void inc_inflight_move_tail(struct unix_sock *u)
} }
static bool gc_in_progress; static bool gc_in_progress;
#define UNIX_INFLIGHT_TRIGGER_GC 16000
void wait_for_unix_gc(void) static void __unix_gc(struct work_struct *work)
{
/* If number of inflight sockets is insane,
* force a garbage collect right now.
* Paired with the WRITE_ONCE() in unix_inflight(),
* unix_notinflight() and gc_in_progress().
*/
if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
!READ_ONCE(gc_in_progress))
unix_gc();
wait_event(unix_gc_wait, gc_in_progress == false);
}
/* The external entry point: unix_gc() */
void unix_gc(void)
{ {
struct sk_buff *next_skb, *skb; struct sk_buff *next_skb, *skb;
struct unix_sock *u; struct unix_sock *u;
...@@ -213,13 +193,6 @@ void unix_gc(void) ...@@ -213,13 +193,6 @@ void unix_gc(void)
spin_lock(&unix_gc_lock); spin_lock(&unix_gc_lock);
/* Avoid a recursive GC. */
if (gc_in_progress)
goto out;
/* Paired with READ_ONCE() in wait_for_unix_gc(). */
WRITE_ONCE(gc_in_progress, true);
/* First, select candidates for garbage collection. Only /* First, select candidates for garbage collection. Only
* in-flight sockets are considered, and from those only ones * in-flight sockets are considered, and from those only ones
* which don't have any external reference. * which don't have any external reference.
...@@ -237,14 +210,12 @@ void unix_gc(void) ...@@ -237,14 +210,12 @@ void unix_gc(void)
*/ */
list_for_each_entry_safe(u, next, &gc_inflight_list, link) { list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
long total_refs; long total_refs;
long inflight_refs;
total_refs = file_count(u->sk.sk_socket->file); total_refs = file_count(u->sk.sk_socket->file);
inflight_refs = atomic_long_read(&u->inflight);
BUG_ON(inflight_refs < 1); BUG_ON(!u->inflight);
BUG_ON(total_refs < inflight_refs); BUG_ON(total_refs < u->inflight);
if (total_refs == inflight_refs) { if (total_refs == u->inflight) {
list_move_tail(&u->link, &gc_candidates); list_move_tail(&u->link, &gc_candidates);
__set_bit(UNIX_GC_CANDIDATE, &u->gc_flags); __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
__set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
...@@ -271,7 +242,7 @@ void unix_gc(void) ...@@ -271,7 +242,7 @@ void unix_gc(void)
/* Move cursor to after the current position. */ /* Move cursor to after the current position. */
list_move(&cursor, &u->link); list_move(&cursor, &u->link);
if (atomic_long_read(&u->inflight) > 0) { if (u->inflight) {
list_move_tail(&u->link, &not_cycle_list); list_move_tail(&u->link, &not_cycle_list);
__clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags); __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
scan_children(&u->sk, inc_inflight_move_tail, NULL); scan_children(&u->sk, inc_inflight_move_tail, NULL);
...@@ -328,8 +299,39 @@ void unix_gc(void) ...@@ -328,8 +299,39 @@ void unix_gc(void)
/* Paired with READ_ONCE() in wait_for_unix_gc(). */ /* Paired with READ_ONCE() in wait_for_unix_gc(). */
WRITE_ONCE(gc_in_progress, false); WRITE_ONCE(gc_in_progress, false);
wake_up(&unix_gc_wait);
out:
spin_unlock(&unix_gc_lock); spin_unlock(&unix_gc_lock);
} }
static DECLARE_WORK(unix_gc_work, __unix_gc);
void unix_gc(void)
{
WRITE_ONCE(gc_in_progress, true);
queue_work(system_unbound_wq, &unix_gc_work);
}
#define UNIX_INFLIGHT_TRIGGER_GC 16000
#define UNIX_INFLIGHT_SANE_USER (SCM_MAX_FD * 8)
void wait_for_unix_gc(struct scm_fp_list *fpl)
{
/* If number of inflight sockets is insane,
* force a garbage collect right now.
*
* Paired with the WRITE_ONCE() in unix_inflight(),
* unix_notinflight(), and __unix_gc().
*/
if (READ_ONCE(unix_tot_inflight) > UNIX_INFLIGHT_TRIGGER_GC &&
!READ_ONCE(gc_in_progress))
unix_gc();
/* Penalise users who want to send AF_UNIX sockets
* but whose sockets have not been received yet.
*/
if (!fpl || !fpl->count_unix ||
READ_ONCE(fpl->user->unix_inflight) < UNIX_INFLIGHT_SANE_USER)
return;
if (READ_ONCE(gc_in_progress))
flush_work(&unix_gc_work);
}
...@@ -21,9 +21,8 @@ EXPORT_SYMBOL(gc_inflight_list); ...@@ -21,9 +21,8 @@ EXPORT_SYMBOL(gc_inflight_list);
DEFINE_SPINLOCK(unix_gc_lock); DEFINE_SPINLOCK(unix_gc_lock);
EXPORT_SYMBOL(unix_gc_lock); EXPORT_SYMBOL(unix_gc_lock);
struct sock *unix_get_socket(struct file *filp) struct unix_sock *unix_get_socket(struct file *filp)
{ {
struct sock *u_sock = NULL;
struct inode *inode = file_inode(filp); struct inode *inode = file_inode(filp);
/* Socket ? */ /* Socket ? */
...@@ -34,10 +33,10 @@ struct sock *unix_get_socket(struct file *filp) ...@@ -34,10 +33,10 @@ struct sock *unix_get_socket(struct file *filp)
/* PF_UNIX ? */ /* PF_UNIX ? */
if (s && ops && ops->family == PF_UNIX) if (s && ops && ops->family == PF_UNIX)
u_sock = s; return unix_sk(s);
} }
return u_sock; return NULL;
} }
EXPORT_SYMBOL(unix_get_socket); EXPORT_SYMBOL(unix_get_socket);
...@@ -46,19 +45,18 @@ EXPORT_SYMBOL(unix_get_socket); ...@@ -46,19 +45,18 @@ EXPORT_SYMBOL(unix_get_socket);
*/ */
void unix_inflight(struct user_struct *user, struct file *fp) void unix_inflight(struct user_struct *user, struct file *fp)
{ {
struct sock *s = unix_get_socket(fp); struct unix_sock *u = unix_get_socket(fp);
spin_lock(&unix_gc_lock); spin_lock(&unix_gc_lock);
if (s) { if (u) {
struct unix_sock *u = unix_sk(s); if (!u->inflight) {
if (atomic_long_inc_return(&u->inflight) == 1) {
BUG_ON(!list_empty(&u->link)); BUG_ON(!list_empty(&u->link));
list_add_tail(&u->link, &gc_inflight_list); list_add_tail(&u->link, &gc_inflight_list);
} else { } else {
BUG_ON(list_empty(&u->link)); BUG_ON(list_empty(&u->link));
} }
u->inflight++;
/* Paired with READ_ONCE() in wait_for_unix_gc() */ /* Paired with READ_ONCE() in wait_for_unix_gc() */
WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1); WRITE_ONCE(unix_tot_inflight, unix_tot_inflight + 1);
} }
...@@ -68,17 +66,16 @@ void unix_inflight(struct user_struct *user, struct file *fp) ...@@ -68,17 +66,16 @@ void unix_inflight(struct user_struct *user, struct file *fp)
void unix_notinflight(struct user_struct *user, struct file *fp) void unix_notinflight(struct user_struct *user, struct file *fp)
{ {
struct sock *s = unix_get_socket(fp); struct unix_sock *u = unix_get_socket(fp);
spin_lock(&unix_gc_lock); spin_lock(&unix_gc_lock);
if (s) { if (u) {
struct unix_sock *u = unix_sk(s); BUG_ON(!u->inflight);
BUG_ON(!atomic_long_read(&u->inflight));
BUG_ON(list_empty(&u->link)); BUG_ON(list_empty(&u->link));
if (atomic_long_dec_and_test(&u->inflight)) u->inflight--;
if (!u->inflight)
list_del_init(&u->link); list_del_init(&u->link);
/* Paired with READ_ONCE() in wait_for_unix_gc() */ /* Paired with READ_ONCE() in wait_for_unix_gc() */
WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1); WRITE_ONCE(unix_tot_inflight, unix_tot_inflight - 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment