mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 08:44:21 +08:00
0bf02a0d80
Sometimes I can see that 'perf record' piped with 'perf inject' take a long time processing build-ids. So introduce a inject-build-id benchmark to the internals benchmark suite to measure its overhead regularly. It runs the 'perf inject' command internally and feeds the given number of synthesized events (MMAP2 + SAMPLE basically). Usage: perf bench internals inject-build-id <options> -i, --iterations <n> Number of iterations used to compute average (default: 100) -m, --nr-mmaps <n> Number of mmap events for each iteration (default: 100) -n, --nr-samples <n> Number of sample events per mmap event (default: 100) -v, --verbose be more verbose (show iteration count, DSO name, etc) By default, it measures average processing time of 100 MMAP2 events and 10000 SAMPLE events. Below is a result on my laptop. $ perf bench internals inject-build-id # Running 'internals/inject-build-id' benchmark: Average build-id injection took: 25.789 msec (+- 0.202 msec) Average time per event: 2.528 usec (+- 0.020 usec) Average memory usage: 8411 KB (+- 7 KB) Committer testing: $ perf bench Usage: perf bench [<common options>] <collection> <benchmark> [<options>] # List of all available benchmark collections: sched: Scheduler and IPC benchmarks syscall: System call benchmarks mem: Memory access benchmarks numa: NUMA scheduling and MM benchmarks futex: Futex stressing benchmarks epoll: Epoll stressing benchmarks internals: Perf-internals benchmarks all: All benchmarks $ perf bench internals # List of available benchmarks for collection 'internals': synthesize: Benchmark perf event synthesis kallsyms-parse: Benchmark kallsyms parsing inject-build-id: Benchmark build-id injection $ perf bench internals inject-build-id # Running 'internals/inject-build-id' benchmark: Average build-id injection took: 14.202 msec (+- 0.059 msec) Average time per event: 1.392 usec (+- 0.006 usec) Average memory usage: 12650 KB (+- 10 KB) Average build-id-all injection took: 12.831 msec (+- 0.071 msec) Average time per event: 1.258 usec (+- 0.007 usec) Average memory usage: 11895 KB (+- 10 KB) $ $ perf stat -r5 perf bench internals inject-build-id # Running 'internals/inject-build-id' benchmark: Average build-id injection took: 14.380 msec (+- 0.056 msec) Average time per event: 1.410 usec (+- 0.006 usec) Average memory usage: 12608 KB (+- 11 KB) Average build-id-all injection took: 11.889 msec (+- 0.064 msec) Average time per event: 1.166 usec (+- 0.006 usec) Average memory usage: 11838 KB (+- 10 KB) # Running 'internals/inject-build-id' benchmark: Average build-id injection took: 14.246 msec (+- 0.065 msec) Average time per event: 1.397 usec (+- 0.006 usec) Average memory usage: 12744 KB (+- 10 KB) Average build-id-all injection took: 12.019 msec (+- 0.066 msec) Average time per event: 1.178 usec (+- 0.006 usec) Average memory usage: 11963 KB (+- 10 KB) # Running 'internals/inject-build-id' benchmark: Average build-id injection took: 14.321 msec (+- 0.067 msec) Average time per event: 1.404 usec (+- 0.007 usec) Average memory usage: 12690 KB (+- 10 KB) Average build-id-all injection took: 11.909 msec (+- 0.041 msec) Average time per event: 1.168 usec (+- 0.004 usec) Average memory usage: 11938 KB (+- 10 KB) # Running 'internals/inject-build-id' benchmark: Average build-id injection took: 14.287 msec (+- 0.059 msec) Average time per event: 1.401 usec (+- 0.006 usec) Average memory usage: 12864 KB (+- 10 KB) Average build-id-all injection took: 11.862 msec (+- 0.058 msec) Average time per event: 1.163 usec (+- 0.006 usec) Average memory usage: 12103 KB (+- 10 KB) # Running 'internals/inject-build-id' benchmark: Average build-id injection took: 14.402 msec (+- 0.053 msec) Average time per event: 1.412 usec (+- 0.005 usec) Average memory usage: 12876 KB (+- 10 KB) Average build-id-all injection took: 11.826 msec (+- 0.061 msec) Average time per event: 1.159 usec (+- 0.006 usec) Average memory usage: 12111 KB (+- 10 KB) Performance counter stats for 'perf bench internals inject-build-id' (5 runs): 4,267.48 msec task-clock:u # 1.502 CPUs utilized ( +- 0.14% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 102,092 page-faults:u # 0.024 M/sec ( +- 0.08% ) 3,894,589,578 cycles:u # 0.913 GHz ( +- 0.19% ) (83.49%) 140,078,421 stalled-cycles-frontend:u # 3.60% frontend cycles idle ( +- 0.77% ) (83.34%) 948,581,189 stalled-cycles-backend:u # 24.36% backend cycles idle ( +- 0.46% ) (83.25%) 5,835,587,719 instructions:u # 1.50 insn per cycle # 0.16 stalled cycles per insn ( +- 0.21% ) (83.24%) 1,267,423,636 branches:u # 296.996 M/sec ( +- 0.22% ) (83.12%) 17,484,290 branch-misses:u # 1.38% of all branches ( +- 0.12% ) (83.55%) 2.84176 +- 0.00222 seconds time elapsed ( +- 0.08% ) $ Acked-by: Jiri Olsa <jolsa@redhat.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Namhyung Kim <namhyung@kernel.org> Link: https://lore.kernel.org/r/20201012070214.2074921-2-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
323 lines
8.1 KiB
C
323 lines
8.1 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* builtin-bench.c
|
|
*
|
|
* General benchmarking collections provided by perf
|
|
*
|
|
* Copyright (C) 2009, Hitoshi Mitake <mitake@dcl.info.waseda.ac.jp>
|
|
*/
|
|
|
|
/*
|
|
* Available benchmark collection list:
|
|
*
|
|
* sched ... scheduler and IPC performance
|
|
* syscall ... System call performance
|
|
* mem ... memory access performance
|
|
* numa ... NUMA scheduling and MM performance
|
|
* futex ... Futex performance
|
|
* epoll ... Event poll performance
|
|
*/
|
|
#include <subcmd/parse-options.h>
|
|
#include "builtin.h"
|
|
#include "bench/bench.h"
|
|
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
#include <sys/prctl.h>
|
|
#include <linux/zalloc.h>
|
|
|
|
typedef int (*bench_fn_t)(int argc, const char **argv);
|
|
|
|
struct bench {
|
|
const char *name;
|
|
const char *summary;
|
|
bench_fn_t fn;
|
|
};
|
|
|
|
#ifdef HAVE_LIBNUMA_SUPPORT
|
|
static struct bench numa_benchmarks[] = {
|
|
{ "mem", "Benchmark for NUMA workloads", bench_numa },
|
|
{ "all", "Run all NUMA benchmarks", NULL },
|
|
{ NULL, NULL, NULL }
|
|
};
|
|
#endif
|
|
|
|
static struct bench sched_benchmarks[] = {
|
|
{ "messaging", "Benchmark for scheduling and IPC", bench_sched_messaging },
|
|
{ "pipe", "Benchmark for pipe() between two processes", bench_sched_pipe },
|
|
{ "all", "Run all scheduler benchmarks", NULL },
|
|
{ NULL, NULL, NULL }
|
|
};
|
|
|
|
static struct bench syscall_benchmarks[] = {
|
|
{ "basic", "Benchmark for basic getppid(2) calls", bench_syscall_basic },
|
|
{ "all", "Run all syscall benchmarks", NULL },
|
|
{ NULL, NULL, NULL },
|
|
};
|
|
|
|
static struct bench mem_benchmarks[] = {
|
|
{ "memcpy", "Benchmark for memcpy() functions", bench_mem_memcpy },
|
|
{ "memset", "Benchmark for memset() functions", bench_mem_memset },
|
|
{ "find_bit", "Benchmark for find_bit() functions", bench_mem_find_bit },
|
|
{ "all", "Run all memory access benchmarks", NULL },
|
|
{ NULL, NULL, NULL }
|
|
};
|
|
|
|
static struct bench futex_benchmarks[] = {
|
|
{ "hash", "Benchmark for futex hash table", bench_futex_hash },
|
|
{ "wake", "Benchmark for futex wake calls", bench_futex_wake },
|
|
{ "wake-parallel", "Benchmark for parallel futex wake calls", bench_futex_wake_parallel },
|
|
{ "requeue", "Benchmark for futex requeue calls", bench_futex_requeue },
|
|
/* pi-futexes */
|
|
{ "lock-pi", "Benchmark for futex lock_pi calls", bench_futex_lock_pi },
|
|
{ "all", "Run all futex benchmarks", NULL },
|
|
{ NULL, NULL, NULL }
|
|
};
|
|
|
|
#ifdef HAVE_EVENTFD_SUPPORT
|
|
static struct bench epoll_benchmarks[] = {
|
|
{ "wait", "Benchmark epoll concurrent epoll_waits", bench_epoll_wait },
|
|
{ "ctl", "Benchmark epoll concurrent epoll_ctls", bench_epoll_ctl },
|
|
{ "all", "Run all futex benchmarks", NULL },
|
|
{ NULL, NULL, NULL }
|
|
};
|
|
#endif // HAVE_EVENTFD_SUPPORT
|
|
|
|
static struct bench internals_benchmarks[] = {
|
|
{ "synthesize", "Benchmark perf event synthesis", bench_synthesize },
|
|
{ "kallsyms-parse", "Benchmark kallsyms parsing", bench_kallsyms_parse },
|
|
{ "inject-build-id", "Benchmark build-id injection", bench_inject_build_id },
|
|
{ NULL, NULL, NULL }
|
|
};
|
|
|
|
struct collection {
|
|
const char *name;
|
|
const char *summary;
|
|
struct bench *benchmarks;
|
|
};
|
|
|
|
static struct collection collections[] = {
|
|
{ "sched", "Scheduler and IPC benchmarks", sched_benchmarks },
|
|
{ "syscall", "System call benchmarks", syscall_benchmarks },
|
|
{ "mem", "Memory access benchmarks", mem_benchmarks },
|
|
#ifdef HAVE_LIBNUMA_SUPPORT
|
|
{ "numa", "NUMA scheduling and MM benchmarks", numa_benchmarks },
|
|
#endif
|
|
{"futex", "Futex stressing benchmarks", futex_benchmarks },
|
|
#ifdef HAVE_EVENTFD_SUPPORT
|
|
{"epoll", "Epoll stressing benchmarks", epoll_benchmarks },
|
|
#endif
|
|
{ "internals", "Perf-internals benchmarks", internals_benchmarks },
|
|
{ "all", "All benchmarks", NULL },
|
|
{ NULL, NULL, NULL }
|
|
};
|
|
|
|
/* Iterate over all benchmark collections: */
|
|
#define for_each_collection(coll) \
|
|
for (coll = collections; coll->name; coll++)
|
|
|
|
/* Iterate over all benchmarks within a collection: */
|
|
#define for_each_bench(coll, bench) \
|
|
for (bench = coll->benchmarks; bench && bench->name; bench++)
|
|
|
|
static void dump_benchmarks(struct collection *coll)
|
|
{
|
|
struct bench *bench;
|
|
|
|
printf("\n # List of available benchmarks for collection '%s':\n\n", coll->name);
|
|
|
|
for_each_bench(coll, bench)
|
|
printf("%14s: %s\n", bench->name, bench->summary);
|
|
|
|
printf("\n");
|
|
}
|
|
|
|
static const char *bench_format_str;
|
|
|
|
/* Output/formatting style, exported to benchmark modules: */
|
|
int bench_format = BENCH_FORMAT_DEFAULT;
|
|
unsigned int bench_repeat = 10; /* default number of times to repeat the run */
|
|
|
|
static const struct option bench_options[] = {
|
|
OPT_STRING('f', "format", &bench_format_str, "default|simple", "Specify the output formatting style"),
|
|
OPT_UINTEGER('r', "repeat", &bench_repeat, "Specify amount of times to repeat the run"),
|
|
OPT_END()
|
|
};
|
|
|
|
static const char * const bench_usage[] = {
|
|
"perf bench [<common options>] <collection> <benchmark> [<options>]",
|
|
NULL
|
|
};
|
|
|
|
static void print_usage(void)
|
|
{
|
|
struct collection *coll;
|
|
int i;
|
|
|
|
printf("Usage: \n");
|
|
for (i = 0; bench_usage[i]; i++)
|
|
printf("\t%s\n", bench_usage[i]);
|
|
printf("\n");
|
|
|
|
printf(" # List of all available benchmark collections:\n\n");
|
|
|
|
for_each_collection(coll)
|
|
printf("%14s: %s\n", coll->name, coll->summary);
|
|
printf("\n");
|
|
}
|
|
|
|
static int bench_str2int(const char *str)
|
|
{
|
|
if (!str)
|
|
return BENCH_FORMAT_DEFAULT;
|
|
|
|
if (!strcmp(str, BENCH_FORMAT_DEFAULT_STR))
|
|
return BENCH_FORMAT_DEFAULT;
|
|
else if (!strcmp(str, BENCH_FORMAT_SIMPLE_STR))
|
|
return BENCH_FORMAT_SIMPLE;
|
|
|
|
return BENCH_FORMAT_UNKNOWN;
|
|
}
|
|
|
|
/*
|
|
* Run a specific benchmark but first rename the running task's ->comm[]
|
|
* to something meaningful:
|
|
*/
|
|
static int run_bench(const char *coll_name, const char *bench_name, bench_fn_t fn,
|
|
int argc, const char **argv)
|
|
{
|
|
int size;
|
|
char *name;
|
|
int ret;
|
|
|
|
size = strlen(coll_name) + 1 + strlen(bench_name) + 1;
|
|
|
|
name = zalloc(size);
|
|
BUG_ON(!name);
|
|
|
|
scnprintf(name, size, "%s-%s", coll_name, bench_name);
|
|
|
|
prctl(PR_SET_NAME, name);
|
|
argv[0] = name;
|
|
|
|
ret = fn(argc, argv);
|
|
|
|
free(name);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static void run_collection(struct collection *coll)
|
|
{
|
|
struct bench *bench;
|
|
const char *argv[2];
|
|
|
|
argv[1] = NULL;
|
|
/*
|
|
* TODO:
|
|
*
|
|
* Preparing preset parameters for
|
|
* embedded, ordinary PC, HPC, etc...
|
|
* would be helpful.
|
|
*/
|
|
for_each_bench(coll, bench) {
|
|
if (!bench->fn)
|
|
break;
|
|
printf("# Running %s/%s benchmark...\n", coll->name, bench->name);
|
|
fflush(stdout);
|
|
|
|
argv[1] = bench->name;
|
|
run_bench(coll->name, bench->name, bench->fn, 1, argv);
|
|
printf("\n");
|
|
}
|
|
}
|
|
|
|
static void run_all_collections(void)
|
|
{
|
|
struct collection *coll;
|
|
|
|
for_each_collection(coll)
|
|
run_collection(coll);
|
|
}
|
|
|
|
int cmd_bench(int argc, const char **argv)
|
|
{
|
|
struct collection *coll;
|
|
int ret = 0;
|
|
|
|
if (argc < 2) {
|
|
/* No collection specified. */
|
|
print_usage();
|
|
goto end;
|
|
}
|
|
|
|
argc = parse_options(argc, argv, bench_options, bench_usage,
|
|
PARSE_OPT_STOP_AT_NON_OPTION);
|
|
|
|
bench_format = bench_str2int(bench_format_str);
|
|
if (bench_format == BENCH_FORMAT_UNKNOWN) {
|
|
printf("Unknown format descriptor: '%s'\n", bench_format_str);
|
|
goto end;
|
|
}
|
|
|
|
if (bench_repeat == 0) {
|
|
printf("Invalid repeat option: Must specify a positive value\n");
|
|
goto end;
|
|
}
|
|
|
|
if (argc < 1) {
|
|
print_usage();
|
|
goto end;
|
|
}
|
|
|
|
if (!strcmp(argv[0], "all")) {
|
|
run_all_collections();
|
|
goto end;
|
|
}
|
|
|
|
for_each_collection(coll) {
|
|
struct bench *bench;
|
|
|
|
if (strcmp(coll->name, argv[0]))
|
|
continue;
|
|
|
|
if (argc < 2) {
|
|
/* No bench specified. */
|
|
dump_benchmarks(coll);
|
|
goto end;
|
|
}
|
|
|
|
if (!strcmp(argv[1], "all")) {
|
|
run_collection(coll);
|
|
goto end;
|
|
}
|
|
|
|
for_each_bench(coll, bench) {
|
|
if (strcmp(bench->name, argv[1]))
|
|
continue;
|
|
|
|
if (bench_format == BENCH_FORMAT_DEFAULT)
|
|
printf("# Running '%s/%s' benchmark:\n", coll->name, bench->name);
|
|
fflush(stdout);
|
|
ret = run_bench(coll->name, bench->name, bench->fn, argc-1, argv+1);
|
|
goto end;
|
|
}
|
|
|
|
if (!strcmp(argv[1], "-h") || !strcmp(argv[1], "--help")) {
|
|
dump_benchmarks(coll);
|
|
goto end;
|
|
}
|
|
|
|
printf("Unknown benchmark: '%s' for collection '%s'\n", argv[1], argv[0]);
|
|
ret = 1;
|
|
goto end;
|
|
}
|
|
|
|
printf("Unknown collection: '%s'\n", argv[0]);
|
|
ret = 1;
|
|
|
|
end:
|
|
return ret;
|
|
}
|