Commit d6f19938 authored by Yafang Shao's avatar Yafang Shao Committed by David S. Miller

net: expose sk wmem in sock_exceed_buf_limit tracepoint

Currently trace_sock_exceed_buf_limit() only show rmem info,
but wmem limit may also be hit.
So expose wmem info in this tracepoint as well.

Regarding memcg, I think it is better to introduce a new tracepoint(if
that is needed), i.e. trace_memcg_limit_hit other than show memcg info in
trace_sock_exceed_buf_limit.
Signed-off-by: default avatarYafang Shao <laoar.shao@gmail.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 335c997d
...@@ -35,6 +35,10 @@ ...@@ -35,6 +35,10 @@
EM(TCP_CLOSING) \ EM(TCP_CLOSING) \
EMe(TCP_NEW_SYN_RECV) EMe(TCP_NEW_SYN_RECV)
#define skmem_kind_names \
EM(SK_MEM_SEND) \
EMe(SK_MEM_RECV)
/* enums need to be exported to user space */ /* enums need to be exported to user space */
#undef EM #undef EM
#undef EMe #undef EMe
...@@ -44,6 +48,7 @@ ...@@ -44,6 +48,7 @@
family_names family_names
inet_protocol_names inet_protocol_names
tcp_state_names tcp_state_names
skmem_kind_names
#undef EM #undef EM
#undef EMe #undef EMe
...@@ -59,6 +64,9 @@ tcp_state_names ...@@ -59,6 +64,9 @@ tcp_state_names
#define show_tcp_state_name(val) \ #define show_tcp_state_name(val) \
__print_symbolic(val, tcp_state_names) __print_symbolic(val, tcp_state_names)
#define show_skmem_kind_names(val) \
__print_symbolic(val, skmem_kind_names)
TRACE_EVENT(sock_rcvqueue_full, TRACE_EVENT(sock_rcvqueue_full,
TP_PROTO(struct sock *sk, struct sk_buff *skb), TP_PROTO(struct sock *sk, struct sk_buff *skb),
...@@ -83,9 +91,9 @@ TRACE_EVENT(sock_rcvqueue_full, ...@@ -83,9 +91,9 @@ TRACE_EVENT(sock_rcvqueue_full,
TRACE_EVENT(sock_exceed_buf_limit, TRACE_EVENT(sock_exceed_buf_limit,
TP_PROTO(struct sock *sk, struct proto *prot, long allocated), TP_PROTO(struct sock *sk, struct proto *prot, long allocated, int kind),
TP_ARGS(sk, prot, allocated), TP_ARGS(sk, prot, allocated, kind),
TP_STRUCT__entry( TP_STRUCT__entry(
__array(char, name, 32) __array(char, name, 32)
...@@ -93,6 +101,10 @@ TRACE_EVENT(sock_exceed_buf_limit, ...@@ -93,6 +101,10 @@ TRACE_EVENT(sock_exceed_buf_limit,
__field(long, allocated) __field(long, allocated)
__field(int, sysctl_rmem) __field(int, sysctl_rmem)
__field(int, rmem_alloc) __field(int, rmem_alloc)
__field(int, sysctl_wmem)
__field(int, wmem_alloc)
__field(int, wmem_queued)
__field(int, kind)
), ),
TP_fast_assign( TP_fast_assign(
...@@ -101,17 +113,25 @@ TRACE_EVENT(sock_exceed_buf_limit, ...@@ -101,17 +113,25 @@ TRACE_EVENT(sock_exceed_buf_limit,
__entry->allocated = allocated; __entry->allocated = allocated;
__entry->sysctl_rmem = sk_get_rmem0(sk, prot); __entry->sysctl_rmem = sk_get_rmem0(sk, prot);
__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc); __entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
__entry->sysctl_wmem = sk_get_wmem0(sk, prot);
__entry->wmem_alloc = refcount_read(&sk->sk_wmem_alloc);
__entry->wmem_queued = sk->sk_wmem_queued;
__entry->kind = kind;
), ),
TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld " TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld sysctl_rmem=%d rmem_alloc=%d sysctl_wmem=%d wmem_alloc=%d wmem_queued=%d kind=%s",
"sysctl_rmem=%d rmem_alloc=%d",
__entry->name, __entry->name,
__entry->sysctl_mem[0], __entry->sysctl_mem[0],
__entry->sysctl_mem[1], __entry->sysctl_mem[1],
__entry->sysctl_mem[2], __entry->sysctl_mem[2],
__entry->allocated, __entry->allocated,
__entry->sysctl_rmem, __entry->sysctl_rmem,
__entry->rmem_alloc) __entry->rmem_alloc,
__entry->sysctl_wmem,
__entry->wmem_alloc,
__entry->wmem_queued,
show_skmem_kind_names(__entry->kind)
)
); );
TRACE_EVENT(inet_sock_set_state, TRACE_EVENT(inet_sock_set_state,
......
...@@ -2401,9 +2401,10 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) ...@@ -2401,9 +2401,10 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
{ {
struct proto *prot = sk->sk_prot; struct proto *prot = sk->sk_prot;
long allocated = sk_memory_allocated_add(sk, amt); long allocated = sk_memory_allocated_add(sk, amt);
bool charged = true;
if (mem_cgroup_sockets_enabled && sk->sk_memcg && if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
!mem_cgroup_charge_skmem(sk->sk_memcg, amt)) !(charged = mem_cgroup_charge_skmem(sk->sk_memcg, amt)))
goto suppress_allocation; goto suppress_allocation;
/* Under limit. */ /* Under limit. */
...@@ -2461,7 +2462,8 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) ...@@ -2461,7 +2462,8 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
return 1; return 1;
} }
trace_sock_exceed_buf_limit(sk, prot, allocated); if (kind == SK_MEM_SEND || (kind == SK_MEM_RECV && charged))
trace_sock_exceed_buf_limit(sk, prot, allocated, kind);
sk_memory_allocated_sub(sk, amt); sk_memory_allocated_sub(sk, amt);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment