Commit 7bc1aec5 authored by Liam Mark's avatar Liam Mark Committed by Linus Torvalds

mm: cma: add trace events for CMA alloc perf testing

Add cma and migrate trace events to enable CMA allocation performance to
be measured via ftrace.

[georgi.djakov@linaro.org: add the CMA instance name to the cma_alloc_start trace event]
  Link: https://lkml.kernel.org/r/20210326155414.25006-1-georgi.djakov@linaro.org

Link: https://lkml.kernel.org/r/20210324160740.15901-1-georgi.djakov@linaro.orgSigned-off-by: default avatarLiam Mark <lmark@codeaurora.org>
Signed-off-by: default avatarGeorgi Djakov <georgi.djakov@linaro.org>
Acked-by: default avatarMinchan Kim <minchan@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 63f83b31
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
TRACE_EVENT(cma_alloc, DECLARE_EVENT_CLASS(cma_alloc_class,
TP_PROTO(unsigned long pfn, const struct page *page, TP_PROTO(unsigned long pfn, const struct page *page,
unsigned int count, unsigned int align), unsigned int count, unsigned int align),
...@@ -61,6 +61,46 @@ TRACE_EVENT(cma_release, ...@@ -61,6 +61,46 @@ TRACE_EVENT(cma_release,
__entry->count) __entry->count)
); );
TRACE_EVENT(cma_alloc_start,
TP_PROTO(const char *name, unsigned int count, unsigned int align),
TP_ARGS(name, count, align),
TP_STRUCT__entry(
__string(name, name)
__field(unsigned int, count)
__field(unsigned int, align)
),
TP_fast_assign(
__assign_str(name, name);
__entry->count = count;
__entry->align = align;
),
TP_printk("name=%s count=%u align=%u",
__get_str(name),
__entry->count,
__entry->align)
);
DEFINE_EVENT(cma_alloc_class, cma_alloc,
TP_PROTO(unsigned long pfn, const struct page *page,
unsigned int count, unsigned int align),
TP_ARGS(pfn, page, count, align)
);
DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry,
TP_PROTO(unsigned long pfn, const struct page *page,
unsigned int count, unsigned int align),
TP_ARGS(pfn, page, count, align)
);
#endif /* _TRACE_CMA_H */ #endif /* _TRACE_CMA_H */
/* This part must be outside protection */ /* This part must be outside protection */
......
...@@ -81,6 +81,28 @@ TRACE_EVENT(mm_migrate_pages, ...@@ -81,6 +81,28 @@ TRACE_EVENT(mm_migrate_pages,
__print_symbolic(__entry->mode, MIGRATE_MODE), __print_symbolic(__entry->mode, MIGRATE_MODE),
__print_symbolic(__entry->reason, MIGRATE_REASON)) __print_symbolic(__entry->reason, MIGRATE_REASON))
); );
TRACE_EVENT(mm_migrate_pages_start,
TP_PROTO(enum migrate_mode mode, int reason),
TP_ARGS(mode, reason),
TP_STRUCT__entry(
__field(enum migrate_mode, mode)
__field(int, reason)
),
TP_fast_assign(
__entry->mode = mode;
__entry->reason = reason;
),
TP_printk("mode=%s reason=%s",
__print_symbolic(__entry->mode, MIGRATE_MODE),
__print_symbolic(__entry->reason, MIGRATE_REASON))
);
#endif /* _TRACE_MIGRATE_H */ #endif /* _TRACE_MIGRATE_H */
/* This part must be outside protection */ /* This part must be outside protection */
......
...@@ -443,6 +443,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, ...@@ -443,6 +443,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
if (!count) if (!count)
goto out; goto out;
trace_cma_alloc_start(cma->name, count, align);
mask = cma_bitmap_aligned_mask(cma, align); mask = cma_bitmap_aligned_mask(cma, align);
offset = cma_bitmap_aligned_offset(cma, align); offset = cma_bitmap_aligned_offset(cma, align);
bitmap_maxno = cma_bitmap_maxno(cma); bitmap_maxno = cma_bitmap_maxno(cma);
...@@ -483,6 +485,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align, ...@@ -483,6 +485,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
pr_debug("%s(): memory range at %p is busy, retrying\n", pr_debug("%s(): memory range at %p is busy, retrying\n",
__func__, pfn_to_page(pfn)); __func__, pfn_to_page(pfn));
trace_cma_alloc_busy_retry(pfn, pfn_to_page(pfn), count, align);
/* try again with a bit different memory target */ /* try again with a bit different memory target */
start = bitmap_no + mask + 1; start = bitmap_no + mask + 1;
} }
......
...@@ -1418,6 +1418,8 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, ...@@ -1418,6 +1418,8 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
int rc, nr_subpages; int rc, nr_subpages;
LIST_HEAD(ret_pages); LIST_HEAD(ret_pages);
trace_mm_migrate_pages_start(mode, reason);
if (!swapwrite) if (!swapwrite)
current->flags |= PF_SWAPWRITE; current->flags |= PF_SWAPWRITE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment