Commit a3e4abac authored by David S. Miller's avatar David S. Miller

Merge branch 'SO_RESEVED_MEM'

Wei Wang says:

====================
net: add new socket option SO_RESERVE_MEM

This patch series introduces a new socket option SO_RESERVE_MEM.
This socket option provides a mechanism for users to reserve a certain
amount of memory for the socket to use. When this option is set, kernel
charges the user specified amount of memory to memcg, as well as
sk_forward_alloc. This amount of memory is not reclaimable and is
available in sk_forward_alloc for this socket.
With this socket option set, the networking stack spends less cycles
doing forward alloc and reclaim, which should lead to better system
performance, with the cost of an amount of pre-allocated and
unreclaimable memory, even under memory pressure.
With a tcp_stream test with 10 flows running on a simulated 100ms RTT
link, I can see the cycles spent in __sk_mem_raise_allocated() dropping
by ~0.02%. Not a whole lot, since we already have logic in
sk_mem_uncharge() to only reclaim 1MB when sk_forward_alloc has more
than 2MB free space. But on a system suffering memory pressure
constently, the savings should be more.

The first patch is the implementation of this socket option. The
following 2 patches change the tcp stack to make use of this reserved
memory when under memory pressure. This makes the tcp stack behavior
more flexible when under memory pressure, and provides a way for user to
control the distribution of the memory among its sockets.
With a TCP connection on a simulated 100ms RTT link, the default
throughput under memory pressure is ~500Kbps. With SO_RESERVE_MEM set to
100KB, the throughput under memory pressure goes up to ~3.5Mbps.

Change since v2:
- Added description for new field added in struct sock in patch 1
Change since v1:
- Added performance stats in cover letter and rebased
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4075a6a0 053f3684
...@@ -269,6 +269,7 @@ struct bpf_local_storage; ...@@ -269,6 +269,7 @@ struct bpf_local_storage;
* @sk_omem_alloc: "o" is "option" or "other" * @sk_omem_alloc: "o" is "option" or "other"
* @sk_wmem_queued: persistent queue size * @sk_wmem_queued: persistent queue size
* @sk_forward_alloc: space allocated forward * @sk_forward_alloc: space allocated forward
* @sk_reserved_mem: space reserved and non-reclaimable for the socket
* @sk_napi_id: id of the last napi context to receive data for sk * @sk_napi_id: id of the last napi context to receive data for sk
* @sk_ll_usec: usecs to busypoll when there is no data * @sk_ll_usec: usecs to busypoll when there is no data
* @sk_allocation: allocation mode * @sk_allocation: allocation mode
...@@ -409,6 +410,7 @@ struct sock { ...@@ -409,6 +410,7 @@ struct sock {
#define sk_rmem_alloc sk_backlog.rmem_alloc #define sk_rmem_alloc sk_backlog.rmem_alloc
int sk_forward_alloc; int sk_forward_alloc;
u32 sk_reserved_mem;
#ifdef CONFIG_NET_RX_BUSY_POLL #ifdef CONFIG_NET_RX_BUSY_POLL
unsigned int sk_ll_usec; unsigned int sk_ll_usec;
/* ===== mostly read cache line ===== */ /* ===== mostly read cache line ===== */
...@@ -1511,20 +1513,49 @@ sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size) ...@@ -1511,20 +1513,49 @@ sk_rmem_schedule(struct sock *sk, struct sk_buff *skb, int size)
skb_pfmemalloc(skb); skb_pfmemalloc(skb);
} }
static inline int sk_unused_reserved_mem(const struct sock *sk)
{
int unused_mem;
if (likely(!sk->sk_reserved_mem))
return 0;
unused_mem = sk->sk_reserved_mem - sk->sk_wmem_queued -
atomic_read(&sk->sk_rmem_alloc);
return unused_mem > 0 ? unused_mem : 0;
}
static inline void sk_mem_reclaim(struct sock *sk) static inline void sk_mem_reclaim(struct sock *sk)
{ {
int reclaimable;
if (!sk_has_account(sk)) if (!sk_has_account(sk))
return; return;
if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
__sk_mem_reclaim(sk, sk->sk_forward_alloc); reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
if (reclaimable >= SK_MEM_QUANTUM)
__sk_mem_reclaim(sk, reclaimable);
}
static inline void sk_mem_reclaim_final(struct sock *sk)
{
sk->sk_reserved_mem = 0;
sk_mem_reclaim(sk);
} }
static inline void sk_mem_reclaim_partial(struct sock *sk) static inline void sk_mem_reclaim_partial(struct sock *sk)
{ {
int reclaimable;
if (!sk_has_account(sk)) if (!sk_has_account(sk))
return; return;
if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
__sk_mem_reclaim(sk, sk->sk_forward_alloc - 1); reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
if (reclaimable > SK_MEM_QUANTUM)
__sk_mem_reclaim(sk, reclaimable - 1);
} }
static inline void sk_mem_charge(struct sock *sk, int size) static inline void sk_mem_charge(struct sock *sk, int size)
...@@ -1536,9 +1567,12 @@ static inline void sk_mem_charge(struct sock *sk, int size) ...@@ -1536,9 +1567,12 @@ static inline void sk_mem_charge(struct sock *sk, int size)
static inline void sk_mem_uncharge(struct sock *sk, int size) static inline void sk_mem_uncharge(struct sock *sk, int size)
{ {
int reclaimable;
if (!sk_has_account(sk)) if (!sk_has_account(sk))
return; return;
sk->sk_forward_alloc += size; sk->sk_forward_alloc += size;
reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk);
/* Avoid a possible overflow. /* Avoid a possible overflow.
* TCP send queues can make this happen, if sk_mem_reclaim() * TCP send queues can make this happen, if sk_mem_reclaim()
...@@ -1547,7 +1581,7 @@ static inline void sk_mem_uncharge(struct sock *sk, int size) ...@@ -1547,7 +1581,7 @@ static inline void sk_mem_uncharge(struct sock *sk, int size)
* If we reach 2 MBytes, reclaim 1 MBytes right now, there is * If we reach 2 MBytes, reclaim 1 MBytes right now, there is
* no need to hold that much forward allocation anyway. * no need to hold that much forward allocation anyway.
*/ */
if (unlikely(sk->sk_forward_alloc >= 1 << 21)) if (unlikely(reclaimable >= 1 << 21))
__sk_mem_reclaim(sk, 1 << 20); __sk_mem_reclaim(sk, 1 << 20);
} }
...@@ -2344,6 +2378,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk) ...@@ -2344,6 +2378,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
return; return;
val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
val = max_t(u32, val, sk_unused_reserved_mem(sk));
WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF)); WRITE_ONCE(sk->sk_sndbuf, max_t(u32, val, SOCK_MIN_SNDBUF));
} }
......
...@@ -1421,6 +1421,17 @@ static inline int tcp_full_space(const struct sock *sk) ...@@ -1421,6 +1421,17 @@ static inline int tcp_full_space(const struct sock *sk)
return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf)); return tcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf));
} }
static inline void tcp_adjust_rcv_ssthresh(struct sock *sk)
{
int unused_mem = sk_unused_reserved_mem(sk);
struct tcp_sock *tp = tcp_sk(sk);
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
if (unused_mem)
tp->rcv_ssthresh = max_t(u32, tp->rcv_ssthresh,
tcp_win_from_space(sk, unused_mem));
}
void tcp_cleanup_rbuf(struct sock *sk, int copied); void tcp_cleanup_rbuf(struct sock *sk, int copied);
/* We provision sk_rcvbuf around 200% of sk_rcvlowat. /* We provision sk_rcvbuf around 200% of sk_rcvlowat.
......
...@@ -126,6 +126,8 @@ ...@@ -126,6 +126,8 @@
#define SO_BUF_LOCK 72 #define SO_BUF_LOCK 72
#define SO_RESERVE_MEM 73
#if !defined(__KERNEL__) #if !defined(__KERNEL__)
#if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__)) #if __BITS_PER_LONG == 64 || (defined(__x86_64__) && defined(__ILP32__))
......
...@@ -947,6 +947,53 @@ void sock_set_mark(struct sock *sk, u32 val) ...@@ -947,6 +947,53 @@ void sock_set_mark(struct sock *sk, u32 val)
} }
EXPORT_SYMBOL(sock_set_mark); EXPORT_SYMBOL(sock_set_mark);
static void sock_release_reserved_memory(struct sock *sk, int bytes)
{
/* Round down bytes to multiple of pages */
bytes &= ~(SK_MEM_QUANTUM - 1);
WARN_ON(bytes > sk->sk_reserved_mem);
sk->sk_reserved_mem -= bytes;
sk_mem_reclaim(sk);
}
static int sock_reserve_memory(struct sock *sk, int bytes)
{
long allocated;
bool charged;
int pages;
if (!mem_cgroup_sockets_enabled || !sk->sk_memcg)
return -EOPNOTSUPP;
if (!bytes)
return 0;
pages = sk_mem_pages(bytes);
/* pre-charge to memcg */
charged = mem_cgroup_charge_skmem(sk->sk_memcg, pages,
GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (!charged)
return -ENOMEM;
/* pre-charge to forward_alloc */
allocated = sk_memory_allocated_add(sk, pages);
/* If the system goes into memory pressure with this
* precharge, give up and return error.
*/
if (allocated > sk_prot_mem_limits(sk, 1)) {
sk_memory_allocated_sub(sk, pages);
mem_cgroup_uncharge_skmem(sk->sk_memcg, pages);
return -ENOMEM;
}
sk->sk_forward_alloc += pages << SK_MEM_QUANTUM_SHIFT;
sk->sk_reserved_mem += pages << SK_MEM_QUANTUM_SHIFT;
return 0;
}
/* /*
* This is meant for all protocols to use and covers goings on * This is meant for all protocols to use and covers goings on
* at the socket level. Everything here is generic. * at the socket level. Everything here is generic.
...@@ -1367,6 +1414,23 @@ int sock_setsockopt(struct socket *sock, int level, int optname, ...@@ -1367,6 +1414,23 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
~SOCK_BUF_LOCK_MASK); ~SOCK_BUF_LOCK_MASK);
break; break;
case SO_RESERVE_MEM:
{
int delta;
if (val < 0) {
ret = -EINVAL;
break;
}
delta = val - sk->sk_reserved_mem;
if (delta < 0)
sock_release_reserved_memory(sk, -delta);
else
ret = sock_reserve_memory(sk, delta);
break;
}
default: default:
ret = -ENOPROTOOPT; ret = -ENOPROTOOPT;
break; break;
...@@ -1733,6 +1797,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname, ...@@ -1733,6 +1797,10 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
v.val = sk->sk_userlocks & SOCK_BUF_LOCK_MASK; v.val = sk->sk_userlocks & SOCK_BUF_LOCK_MASK;
break; break;
case SO_RESERVE_MEM:
v.val = sk->sk_reserved_mem;
break;
default: default:
/* We implement the SO_SNDLOWAT etc to not be settable /* We implement the SO_SNDLOWAT etc to not be settable
* (1003.1g 7). * (1003.1g 7).
...@@ -2045,6 +2113,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority) ...@@ -2045,6 +2113,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
newsk->sk_dst_pending_confirm = 0; newsk->sk_dst_pending_confirm = 0;
newsk->sk_wmem_queued = 0; newsk->sk_wmem_queued = 0;
newsk->sk_forward_alloc = 0; newsk->sk_forward_alloc = 0;
newsk->sk_reserved_mem = 0;
atomic_set(&newsk->sk_drops, 0); atomic_set(&newsk->sk_drops, 0);
newsk->sk_send_head = NULL; newsk->sk_send_head = NULL;
newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK; newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
......
...@@ -202,7 +202,7 @@ void sk_stream_kill_queues(struct sock *sk) ...@@ -202,7 +202,7 @@ void sk_stream_kill_queues(struct sock *sk)
WARN_ON(!skb_queue_empty(&sk->sk_write_queue)); WARN_ON(!skb_queue_empty(&sk->sk_write_queue));
/* Account for returned memory. */ /* Account for returned memory. */
sk_mem_reclaim(sk); sk_mem_reclaim_final(sk);
WARN_ON(sk->sk_wmem_queued); WARN_ON(sk->sk_wmem_queued);
WARN_ON(sk->sk_forward_alloc); WARN_ON(sk->sk_forward_alloc);
......
...@@ -135,7 +135,7 @@ void inet_sock_destruct(struct sock *sk) ...@@ -135,7 +135,7 @@ void inet_sock_destruct(struct sock *sk)
__skb_queue_purge(&sk->sk_receive_queue); __skb_queue_purge(&sk->sk_receive_queue);
__skb_queue_purge(&sk->sk_error_queue); __skb_queue_purge(&sk->sk_error_queue);
sk_mem_reclaim(sk); sk_mem_reclaim_final(sk);
if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) { if (sk->sk_type == SOCK_STREAM && sk->sk_state != TCP_CLOSE) {
pr_err("Attempt to release TCP socket in state %d %p\n", pr_err("Attempt to release TCP socket in state %d %p\n",
......
...@@ -500,8 +500,11 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb, ...@@ -500,8 +500,11 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh; room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh;
if (room <= 0)
return;
/* Check #1 */ /* Check #1 */
if (room > 0 && !tcp_under_memory_pressure(sk)) { if (!tcp_under_memory_pressure(sk)) {
unsigned int truesize = truesize_adjust(adjust, skb); unsigned int truesize = truesize_adjust(adjust, skb);
int incr; int incr;
...@@ -518,6 +521,11 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb, ...@@ -518,6 +521,11 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb,
tp->rcv_ssthresh += min(room, incr); tp->rcv_ssthresh += min(room, incr);
inet_csk(sk)->icsk_ack.quick |= 1; inet_csk(sk)->icsk_ack.quick |= 1;
} }
} else {
/* Under pressure:
* Adjust rcv_ssthresh according to reserved mem
*/
tcp_adjust_rcv_ssthresh(sk);
} }
} }
...@@ -5345,7 +5353,7 @@ static int tcp_prune_queue(struct sock *sk) ...@@ -5345,7 +5353,7 @@ static int tcp_prune_queue(struct sock *sk)
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
tcp_clamp_window(sk); tcp_clamp_window(sk);
else if (tcp_under_memory_pressure(sk)) else if (tcp_under_memory_pressure(sk))
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); tcp_adjust_rcv_ssthresh(sk);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf) if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
return 0; return 0;
...@@ -5380,7 +5388,7 @@ static int tcp_prune_queue(struct sock *sk) ...@@ -5380,7 +5388,7 @@ static int tcp_prune_queue(struct sock *sk)
return -1; return -1;
} }
static bool tcp_should_expand_sndbuf(const struct sock *sk) static bool tcp_should_expand_sndbuf(struct sock *sk)
{ {
const struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
...@@ -5391,8 +5399,18 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk) ...@@ -5391,8 +5399,18 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
return false; return false;
/* If we are under global TCP memory pressure, do not expand. */ /* If we are under global TCP memory pressure, do not expand. */
if (tcp_under_memory_pressure(sk)) if (tcp_under_memory_pressure(sk)) {
int unused_mem = sk_unused_reserved_mem(sk);
/* Adjust sndbuf according to reserved mem. But make sure
* it never goes below SOCK_MIN_SNDBUF.
* See sk_stream_moderate_sndbuf() for more details.
*/
if (unused_mem > SOCK_MIN_SNDBUF)
WRITE_ONCE(sk->sk_sndbuf, unused_mem);
return false; return false;
}
/* If we are under soft global TCP memory pressure, do not expand. */ /* If we are under soft global TCP memory pressure, do not expand. */
if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0)) if (sk_memory_allocated(sk) >= sk_prot_mem_limits(sk, 0))
......
...@@ -2967,8 +2967,7 @@ u32 __tcp_select_window(struct sock *sk) ...@@ -2967,8 +2967,7 @@ u32 __tcp_select_window(struct sock *sk)
icsk->icsk_ack.quick = 0; icsk->icsk_ack.quick = 0;
if (tcp_under_memory_pressure(sk)) if (tcp_under_memory_pressure(sk))
tp->rcv_ssthresh = min(tp->rcv_ssthresh, tcp_adjust_rcv_ssthresh(sk);
4U * tp->advmss);
/* free_space might become our new window, make sure we don't /* free_space might become our new window, make sure we don't
* increase it due to wscale. * increase it due to wscale.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment