Commit f033b688 authored by Jesper Dangaard Brouer's avatar Jesper Dangaard Brouer Committed by David S. Miller

xdp: add tracepoints for XDP mem

These tracepoints make it easier to troubleshoot XDP mem id disconnect.

The xdp:mem_disconnect tracepoint cannot be replaced via kprobe. It is
placed at the last stable place for the pointer to struct xdp_mem_allocator,
just before it's scheduled for RCU removal. It also extract info on
'safe_to_remove' and 'force'.

Detailed info about in-flight pages is not available at this layer. The next
patch will added tracepoints needed at the page_pool layer for this.
Signed-off-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent d956a048
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __LINUX_NET_XDP_PRIV_H__
#define __LINUX_NET_XDP_PRIV_H__
#include <linux/rhashtable.h>
/* Private to net/core/xdp.c, but used by trace/events/xdp.h */
struct xdp_mem_allocator {
struct xdp_mem_info mem;
union {
void *allocator;
struct page_pool *page_pool;
struct zero_copy_allocator *zc_alloc;
};
int disconnect_cnt;
unsigned long defer_start;
struct rhash_head node;
struct rcu_head rcu;
struct delayed_work defer_wq;
unsigned long defer_warn;
};
#endif /* __LINUX_NET_XDP_PRIV_H__ */
...@@ -269,6 +269,121 @@ TRACE_EVENT(xdp_devmap_xmit, ...@@ -269,6 +269,121 @@ TRACE_EVENT(xdp_devmap_xmit,
__entry->from_ifindex, __entry->to_ifindex, __entry->err) __entry->from_ifindex, __entry->to_ifindex, __entry->err)
); );
/* Expect users already include <net/xdp.h>, but not xdp_priv.h */
#include <net/xdp_priv.h>
#define __MEM_TYPE_MAP(FN) \
FN(PAGE_SHARED) \
FN(PAGE_ORDER0) \
FN(PAGE_POOL) \
FN(ZERO_COPY)
#define __MEM_TYPE_TP_FN(x) \
TRACE_DEFINE_ENUM(MEM_TYPE_##x);
#define __MEM_TYPE_SYM_FN(x) \
{ MEM_TYPE_##x, #x },
#define __MEM_TYPE_SYM_TAB \
__MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 }
__MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
TRACE_EVENT(mem_disconnect,
TP_PROTO(const struct xdp_mem_allocator *xa,
bool safe_to_remove, bool force),
TP_ARGS(xa, safe_to_remove, force),
TP_STRUCT__entry(
__field(const struct xdp_mem_allocator *, xa)
__field(u32, mem_id)
__field(u32, mem_type)
__field(const void *, allocator)
__field(bool, safe_to_remove)
__field(bool, force)
__field(int, disconnect_cnt)
),
TP_fast_assign(
__entry->xa = xa;
__entry->mem_id = xa->mem.id;
__entry->mem_type = xa->mem.type;
__entry->allocator = xa->allocator;
__entry->safe_to_remove = safe_to_remove;
__entry->force = force;
__entry->disconnect_cnt = xa->disconnect_cnt;
),
TP_printk("mem_id=%d mem_type=%s allocator=%p"
" safe_to_remove=%s force=%s disconnect_cnt=%d",
__entry->mem_id,
__print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
__entry->allocator,
__entry->safe_to_remove ? "true" : "false",
__entry->force ? "true" : "false",
__entry->disconnect_cnt
)
);
TRACE_EVENT(mem_connect,
TP_PROTO(const struct xdp_mem_allocator *xa,
const struct xdp_rxq_info *rxq),
TP_ARGS(xa, rxq),
TP_STRUCT__entry(
__field(const struct xdp_mem_allocator *, xa)
__field(u32, mem_id)
__field(u32, mem_type)
__field(const void *, allocator)
__field(const struct xdp_rxq_info *, rxq)
__field(int, ifindex)
),
TP_fast_assign(
__entry->xa = xa;
__entry->mem_id = xa->mem.id;
__entry->mem_type = xa->mem.type;
__entry->allocator = xa->allocator;
__entry->rxq = rxq;
__entry->ifindex = rxq->dev->ifindex;
),
TP_printk("mem_id=%d mem_type=%s allocator=%p"
" ifindex=%d",
__entry->mem_id,
__print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
__entry->allocator,
__entry->ifindex
)
);
TRACE_EVENT(mem_return_failed,
TP_PROTO(const struct xdp_mem_info *mem,
const struct page *page),
TP_ARGS(mem, page),
TP_STRUCT__entry(
__field(const struct page *, page)
__field(u32, mem_id)
__field(u32, mem_type)
),
TP_fast_assign(
__entry->page = page;
__entry->mem_id = mem->id;
__entry->mem_type = mem->type;
),
TP_printk("mem_id=%d mem_type=%s page=%p",
__entry->mem_id,
__print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
__entry->page
)
);
#endif /* _TRACE_XDP_H */ #endif /* _TRACE_XDP_H */
#include <trace/define_trace.h> #include <trace/define_trace.h>
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
#include <net/page_pool.h> #include <net/page_pool.h>
#include <net/xdp.h> #include <net/xdp.h>
#include <net/xdp_priv.h> /* struct xdp_mem_allocator */
#include <trace/events/xdp.h>
#define REG_STATE_NEW 0x0 #define REG_STATE_NEW 0x0
#define REG_STATE_REGISTERED 0x1 #define REG_STATE_REGISTERED 0x1
...@@ -29,21 +31,6 @@ static int mem_id_next = MEM_ID_MIN; ...@@ -29,21 +31,6 @@ static int mem_id_next = MEM_ID_MIN;
static bool mem_id_init; /* false */ static bool mem_id_init; /* false */
static struct rhashtable *mem_id_ht; static struct rhashtable *mem_id_ht;
struct xdp_mem_allocator {
struct xdp_mem_info mem;
union {
void *allocator;
struct page_pool *page_pool;
struct zero_copy_allocator *zc_alloc;
};
struct rhash_head node;
struct rcu_head rcu;
struct delayed_work defer_wq;
unsigned long defer_start;
unsigned long defer_warn;
int disconnect_cnt;
};
static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed) static u32 xdp_mem_id_hashfn(const void *data, u32 len, u32 seed)
{ {
const u32 *k = data; const u32 *k = data;
...@@ -117,7 +104,7 @@ bool __mem_id_disconnect(int id, bool force) ...@@ -117,7 +104,7 @@ bool __mem_id_disconnect(int id, bool force)
if (xa->mem.type == MEM_TYPE_PAGE_POOL) if (xa->mem.type == MEM_TYPE_PAGE_POOL)
safe_to_remove = page_pool_request_shutdown(xa->page_pool); safe_to_remove = page_pool_request_shutdown(xa->page_pool);
/* TODO: Tracepoint will be added here in next-patch */ trace_mem_disconnect(xa, safe_to_remove, force);
if ((safe_to_remove || force) && if ((safe_to_remove || force) &&
!rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params)) !rhashtable_remove_fast(mem_id_ht, &xa->node, mem_id_rht_params))
...@@ -385,6 +372,7 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq, ...@@ -385,6 +372,7 @@ int xdp_rxq_info_reg_mem_model(struct xdp_rxq_info *xdp_rxq,
mutex_unlock(&mem_id_lock); mutex_unlock(&mem_id_lock);
trace_mem_connect(xdp_alloc, xdp_rxq);
return 0; return 0;
err: err:
mutex_unlock(&mem_id_lock); mutex_unlock(&mem_id_lock);
...@@ -417,6 +405,7 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, ...@@ -417,6 +405,7 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
} else { } else {
/* Hopefully stack show who to blame for late return */ /* Hopefully stack show who to blame for late return */
WARN_ONCE(1, "page_pool gone mem.id=%d", mem->id); WARN_ONCE(1, "page_pool gone mem.id=%d", mem->id);
trace_mem_return_failed(mem, page);
put_page(page); put_page(page);
} }
rcu_read_unlock(); rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment