mm: cma: add trace events for CMA alloc perf testing

Add cma and migrate trace events to enable CMA allocation performance to
be measured via ftrace.

[georgi.djakov@linaro.org: add the CMA instance name to the cma_alloc_start trace event]
  Link: https://lkml.kernel.org/r/20210326155414.25006-1-georgi.djakov@linaro.org

Link: https://lkml.kernel.org/r/20210324160740.15901-1-georgi.djakov@linaro.org
Signed-off-by: Liam Mark <lmark@codeaurora.org>
Signed-off-by: Georgi Djakov <georgi.djakov@linaro.org>
Acked-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Liam Mark 2021-05-04 18:37:25 -07:00 committed by Linus Torvalds
parent 63f83b31f4
commit 7bc1aec5e2
4 changed files with 69 additions and 1 deletions

View File

@ -8,7 +8,7 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
TRACE_EVENT(cma_alloc,
DECLARE_EVENT_CLASS(cma_alloc_class,
TP_PROTO(unsigned long pfn, const struct page *page,
unsigned int count, unsigned int align),
@ -61,6 +61,46 @@ TRACE_EVENT(cma_release,
__entry->count)
);
TRACE_EVENT(cma_alloc_start,
TP_PROTO(const char *name, unsigned int count, unsigned int align),
TP_ARGS(name, count, align),
TP_STRUCT__entry(
__string(name, name)
__field(unsigned int, count)
__field(unsigned int, align)
),
TP_fast_assign(
__assign_str(name, name);
__entry->count = count;
__entry->align = align;
),
TP_printk("name=%s count=%u align=%u",
__get_str(name),
__entry->count,
__entry->align)
);
DEFINE_EVENT(cma_alloc_class, cma_alloc,
TP_PROTO(unsigned long pfn, const struct page *page,
unsigned int count, unsigned int align),
TP_ARGS(pfn, page, count, align)
);
DEFINE_EVENT(cma_alloc_class, cma_alloc_busy_retry,
TP_PROTO(unsigned long pfn, const struct page *page,
unsigned int count, unsigned int align),
TP_ARGS(pfn, page, count, align)
);
#endif /* _TRACE_CMA_H */
/* This part must be outside protection */

View File

@ -81,6 +81,28 @@ TRACE_EVENT(mm_migrate_pages,
__print_symbolic(__entry->mode, MIGRATE_MODE),
__print_symbolic(__entry->reason, MIGRATE_REASON))
);
TRACE_EVENT(mm_migrate_pages_start,
TP_PROTO(enum migrate_mode mode, int reason),
TP_ARGS(mode, reason),
TP_STRUCT__entry(
__field(enum migrate_mode, mode)
__field(int, reason)
),
TP_fast_assign(
__entry->mode = mode;
__entry->reason = reason;
),
TP_printk("mode=%s reason=%s",
__print_symbolic(__entry->mode, MIGRATE_MODE),
__print_symbolic(__entry->reason, MIGRATE_REASON))
);
#endif /* _TRACE_MIGRATE_H */
/* This part must be outside protection */

View File

@ -443,6 +443,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
if (!count)
goto out;
trace_cma_alloc_start(cma->name, count, align);
mask = cma_bitmap_aligned_mask(cma, align);
offset = cma_bitmap_aligned_offset(cma, align);
bitmap_maxno = cma_bitmap_maxno(cma);
@ -483,6 +485,8 @@ struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
pr_debug("%s(): memory range at %p is busy, retrying\n",
__func__, pfn_to_page(pfn));
trace_cma_alloc_busy_retry(pfn, pfn_to_page(pfn), count, align);
/* try again with a bit different memory target */
start = bitmap_no + mask + 1;
}

View File

@ -1418,6 +1418,8 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
int rc, nr_subpages;
LIST_HEAD(ret_pages);
trace_mm_migrate_pages_start(mode, reason);
if (!swapwrite)
current->flags |= PF_SWAPWRITE;