Commit 12a686c2 authored by Adam Li's avatar Adam Li Committed by David S. Miller

net: make SK_MEMORY_PCPU_RESERV tunable

This patch adds /proc/sys/net/core/mem_pcpu_rsv sysctl file,
to make SK_MEMORY_PCPU_RESERV tunable.

Commit 3cd3399d ("net: implement per-cpu reserves for
memory_allocated") introduced per-cpu forward alloc cache:

"Implement a per-cpu cache of +1/-1 MB, to reduce number
of changes to sk->sk_prot->memory_allocated, which
would otherwise be cause of false sharing."

sk_prot->memory_allocated points to global atomic variable:
atomic_long_t tcp_memory_allocated ____cacheline_aligned_in_smp;

If increasing the per-cpu cache size from 1MB to e.g. 16MB,
changes to sk->sk_prot->memory_allocated can be further reduced.
Performance may be improved on system with many cores.
Signed-off-by: default avatarAdam Li <adamli@os.amperecomputing.com>
Reviewed-by: default avatarChristoph Lameter (Ampere) <cl@linux.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d4beef41
......@@ -206,6 +206,11 @@ Will increase power usage.
Default: 0 (off)
mem_pcpu_rsv
------------
Per-cpu reserved forward alloc cache size in page units. Default 1MB per CPU.
rmem_default
------------
......
......@@ -1443,6 +1443,7 @@ sk_memory_allocated(const struct sock *sk)
/* 1 MB per cpu, in page units */
#define SK_MEMORY_PCPU_RESERVE (1 << (20 - PAGE_SHIFT))
extern int sysctl_mem_pcpu_rsv;
static inline void
sk_memory_allocated_add(struct sock *sk, int amt)
......@@ -1451,7 +1452,7 @@ sk_memory_allocated_add(struct sock *sk, int amt)
preempt_disable();
local_reserve = __this_cpu_add_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
if (local_reserve >= SK_MEMORY_PCPU_RESERVE) {
if (local_reserve >= READ_ONCE(sysctl_mem_pcpu_rsv)) {
__this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
}
......@@ -1465,7 +1466,7 @@ sk_memory_allocated_sub(struct sock *sk, int amt)
preempt_disable();
local_reserve = __this_cpu_sub_return(*sk->sk_prot->per_cpu_fw_alloc, amt);
if (local_reserve <= -SK_MEMORY_PCPU_RESERVE) {
if (local_reserve <= -READ_ONCE(sysctl_mem_pcpu_rsv)) {
__this_cpu_sub(*sk->sk_prot->per_cpu_fw_alloc, local_reserve);
atomic_long_add(local_reserve, sk->sk_prot->memory_allocated);
}
......
......@@ -283,6 +283,7 @@ __u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
EXPORT_SYMBOL(sysctl_rmem_max);
__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
int sysctl_mem_pcpu_rsv __read_mostly = SK_MEMORY_PCPU_RESERVE;
int sysctl_tstamp_allow_data __read_mostly = 1;
......
......@@ -30,6 +30,7 @@ static int int_3600 = 3600;
static int min_sndbuf = SOCK_MIN_SNDBUF;
static int min_rcvbuf = SOCK_MIN_RCVBUF;
static int max_skb_frags = MAX_SKB_FRAGS;
static int min_mem_pcpu_rsv = SK_MEMORY_PCPU_RESERVE;
static int net_msg_warn; /* Unused, but still a sysctl */
......@@ -407,6 +408,14 @@ static struct ctl_table net_core_table[] = {
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_rcvbuf,
},
{
.procname = "mem_pcpu_rsv",
.data = &sysctl_mem_pcpu_rsv,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &min_mem_pcpu_rsv,
},
{
.procname = "dev_weight",
.data = &weight_p,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment