2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-27 14:43:58 +08:00

perf/core improvements and fixes:

User visible:
 
 . Add several futex 'perf bench' microbenchmarks (Davidlohr Bueso)
 
 . Speed up thread map generation (Don Zickus)
 
 . Fix synthesizing mmaps for threads (Don Zickus)
 
 . Fix invalid output on event group stdio report  (Namhyung Kim)
 
 . Introduce 'perf kvm --list-cmds' command line option for use by
   scripts (Ramkumar Ramachandra)
 
 Documentation:
 
 . Clarify load-latency information in the 'perf mem' docs (Andi Kleen)
 
 . Clarify x86 register naming in 'perf probe' docs (Andi Kleen)
 
 Refactorigns:
 
 . hists browser refactorings to reuse code accross UIs (Namhyung Kim)
 
 Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJTI3P+AAoJENZQFvNTUqpAXhwP/iQimhGd5uYXgm2qvosUQFV+
 90GBLfBZtZybV8rxQJMeGoDrZ5Kd9ijC0fPpMXkiqvEBv2YY/q9x2GB7aOTIW54r
 sGls2JREHBx0+vutsxwpKADNSJAem3l3jM21XzXoNavSL0yWh/vWye8AZ6hO8Qi5
 +R6FrstomKLPzOtKw/SUbhWHzy9HRbMU1MSRQboPzcO1IYy0YKuiRvayl0y5x7sB
 p7vVSkjMfp4mbo0jl8PfExnhpiFBQpfMq6dG4phmZUjPb1c7pxyyCj5n8DU6oM/h
 /U/QfXW6QhJx9uJMmG/UzK1/nMyH2ahYx7MnfJdglqVD0Vp1u1mLry9hFPEuOvH7
 lGS7zlFym9AbcGp9mZJkWcM4I6Xq1/Wa3gBxLvHzZ5+9H+UZAYRa1A520iE01Op+
 lo9EEzgbmYFLwuuux2HBFMN9bZU7GjBiCdjnsIZnhI47KAJYsfx/E0X6Oc05Gnkt
 4bRE0mK970IB2ctTbWL49eR6/Go8fxXi/9/G7sfB8LUW11Sw/g683TXAMsmBk8FW
 vxuompaqvl0WLOALkZxioSlnoVoRAiNrrLxp4+1HNUYhgGkn+X7x064thc7MJODq
 Nzon/Xa5Iau1tHIOx7T0T1lv+dlNzliuEMnQnoEEckQb4K2rO/9BdzxCZto4R8gc
 7B0quZoY3ejh0VlThjqx
 =jF5D
 -----END PGP SIGNATURE-----

Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core

Pull perf/core improvements and fixes from Arnaldo:

User visible:

  * Add several futex 'perf bench' microbenchmarks (Davidlohr Bueso)

  * Speed up thread map generation (Don Zickus)

  * Fix synthesizing mmaps for threads (Don Zickus)

  * Fix invalid output on event group stdio report  (Namhyung Kim)

  * Introduce 'perf kvm --list-cmds' command line option for use by
    scripts (Ramkumar Ramachandra)

Documentation:

  * Clarify load-latency information in the 'perf mem' docs (Andi Kleen)

  * Clarify x86 register naming in 'perf probe' docs (Andi Kleen)

Refactorings:

  * hists browser refactorings to reuse code accross UIs (Namhyung Kim)

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Ingo Molnar 2014-03-18 09:23:09 +01:00
commit 0afd2d5102
27 changed files with 1101 additions and 308 deletions

View File

@ -18,6 +18,10 @@ from it, into perf.data. Perf record options are accepted and are passed through
"perf mem -t <TYPE> report" displays the result. It invokes perf report with the
right set of options to display a memory access profile.
Note that on Intel systems the memory latency reported is the use-latency,
not the pure load (or store latency). Use latency includes any pipeline
queueing delays in addition to the memory subsystem latency.
OPTIONS
-------
<command>...::

View File

@ -136,6 +136,8 @@ Each probe argument follows below syntax.
'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), local array with fixed index (e.g. array[1], var->array[0], var->pointer[2]), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.)
'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type.
On x86 systems %REG is always the short form of the register: for example %AX. %RAX or %EAX is not valid.
LINE SYNTAX
-----------
Line range is described by following syntax.

View File

@ -426,6 +426,9 @@ BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o
endif
BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o
BUILTIN_OBJS += $(OUTPUT)bench/mem-memset.o
BUILTIN_OBJS += $(OUTPUT)bench/futex-hash.o
BUILTIN_OBJS += $(OUTPUT)bench/futex-wake.o
BUILTIN_OBJS += $(OUTPUT)bench/futex-requeue.o
BUILTIN_OBJS += $(OUTPUT)builtin-diff.o
BUILTIN_OBJS += $(OUTPUT)builtin-evlist.o

View File

@ -31,6 +31,9 @@ extern int bench_sched_pipe(int argc, const char **argv, const char *prefix);
extern int bench_mem_memcpy(int argc, const char **argv,
const char *prefix __maybe_unused);
extern int bench_mem_memset(int argc, const char **argv, const char *prefix);
extern int bench_futex_hash(int argc, const char **argv, const char *prefix);
extern int bench_futex_wake(int argc, const char **argv, const char *prefix);
extern int bench_futex_requeue(int argc, const char **argv, const char *prefix);
#define BENCH_FORMAT_DEFAULT_STR "default"
#define BENCH_FORMAT_DEFAULT 0

View File

@ -0,0 +1,212 @@
/*
* Copyright (C) 2013 Davidlohr Bueso <davidlohr@hp.com>
*
* futex-hash: Stress the hell out of the Linux kernel futex uaddr hashing.
*
* This program is particularly useful for measuring the kernel's futex hash
* table/function implementation. In order for it to make sense, use with as
* many threads and futexes as possible.
*/
#include "../perf.h"
#include "../util/util.h"
#include "../util/stat.h"
#include "../util/parse-options.h"
#include "../util/header.h"
#include "bench.h"
#include "futex.h"
#include <err.h>
#include <stdlib.h>
#include <sys/time.h>
#include <pthread.h>
static unsigned int nthreads = 0;
static unsigned int nsecs = 10;
/* amount of futexes per thread */
static unsigned int nfutexes = 1024;
static bool fshared = false, done = false, silent = false;
struct timeval start, end, runtime;
static pthread_mutex_t thread_lock;
static unsigned int threads_starting;
static struct stats throughput_stats;
static pthread_cond_t thread_parent, thread_worker;
struct worker {
int tid;
u_int32_t *futex;
pthread_t thread;
unsigned long ops;
};
static const struct option options[] = {
OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
OPT_UINTEGER('r', "runtime", &nsecs, "Specify runtime (in seconds)"),
OPT_UINTEGER('f', "futexes", &nfutexes, "Specify amount of futexes per threads"),
OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"),
OPT_BOOLEAN( 'S', "shared", &fshared, "Use shared futexes instead of private ones"),
OPT_END()
};
static const char * const bench_futex_hash_usage[] = {
"perf bench futex hash <options>",
NULL
};
static void *workerfn(void *arg)
{
int ret;
unsigned int i;
struct worker *w = (struct worker *) arg;
pthread_mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
pthread_cond_signal(&thread_parent);
pthread_cond_wait(&thread_worker, &thread_lock);
pthread_mutex_unlock(&thread_lock);
do {
for (i = 0; i < nfutexes; i++, w->ops++) {
/*
* We want the futex calls to fail in order to stress
* the hashing of uaddr and not measure other steps,
* such as internal waitqueue handling, thus enlarging
* the critical region protected by hb->lock.
*/
ret = futex_wait(&w->futex[i], 1234, NULL,
fshared ? 0 : FUTEX_PRIVATE_FLAG);
if (!silent &&
(!ret || errno != EAGAIN || errno != EWOULDBLOCK))
warn("Non-expected futex return call");
}
} while (!done);
return NULL;
}
static void toggle_done(int sig __maybe_unused,
siginfo_t *info __maybe_unused,
void *uc __maybe_unused)
{
/* inform all threads that we're done for the day */
done = true;
gettimeofday(&end, NULL);
timersub(&end, &start, &runtime);
}
static void print_summary(void)
{
unsigned long avg = avg_stats(&throughput_stats);
double stddev = stddev_stats(&throughput_stats);
printf("%sAveraged %ld operations/sec (+- %.2f%%), total secs = %d\n",
!silent ? "\n" : "", avg, rel_stddev_stats(stddev, avg),
(int) runtime.tv_sec);
}
int bench_futex_hash(int argc, const char **argv,
const char *prefix __maybe_unused)
{
int ret = 0;
cpu_set_t cpu;
struct sigaction act;
unsigned int i, ncpus;
pthread_attr_t thread_attr;
struct worker *worker = NULL;
argc = parse_options(argc, argv, options, bench_futex_hash_usage, 0);
if (argc) {
usage_with_options(bench_futex_hash_usage, options);
exit(EXIT_FAILURE);
}
ncpus = sysconf(_SC_NPROCESSORS_ONLN);
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
if (!nthreads) /* default to the number of CPUs */
nthreads = ncpus;
worker = calloc(nthreads, sizeof(*worker));
if (!worker)
goto errmem;
printf("Run summary [PID %d]: %d threads, each operating on %d [%s] futexes for %d secs.\n\n",
getpid(), nthreads, nfutexes, fshared ? "shared":"private", nsecs);
init_stats(&throughput_stats);
pthread_mutex_init(&thread_lock, NULL);
pthread_cond_init(&thread_parent, NULL);
pthread_cond_init(&thread_worker, NULL);
threads_starting = nthreads;
pthread_attr_init(&thread_attr);
gettimeofday(&start, NULL);
for (i = 0; i < nthreads; i++) {
worker[i].tid = i;
worker[i].futex = calloc(nfutexes, sizeof(*worker[i].futex));
if (!worker[i].futex)
goto errmem;
CPU_ZERO(&cpu);
CPU_SET(i % ncpus, &cpu);
ret = pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu);
if (ret)
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
ret = pthread_create(&worker[i].thread, &thread_attr, workerfn,
(void *)(struct worker *) &worker[i]);
if (ret)
err(EXIT_FAILURE, "pthread_create");
}
pthread_attr_destroy(&thread_attr);
pthread_mutex_lock(&thread_lock);
while (threads_starting)
pthread_cond_wait(&thread_parent, &thread_lock);
pthread_cond_broadcast(&thread_worker);
pthread_mutex_unlock(&thread_lock);
sleep(nsecs);
toggle_done(0, NULL, NULL);
for (i = 0; i < nthreads; i++) {
ret = pthread_join(worker[i].thread, NULL);
if (ret)
err(EXIT_FAILURE, "pthread_join");
}
/* cleanup & report results */
pthread_cond_destroy(&thread_parent);
pthread_cond_destroy(&thread_worker);
pthread_mutex_destroy(&thread_lock);
for (i = 0; i < nthreads; i++) {
unsigned long t = worker[i].ops/runtime.tv_sec;
update_stats(&throughput_stats, t);
if (!silent) {
if (nfutexes == 1)
printf("[thread %2d] futex: %p [ %ld ops/sec ]\n",
worker[i].tid, &worker[i].futex[0], t);
else
printf("[thread %2d] futexes: %p ... %p [ %ld ops/sec ]\n",
worker[i].tid, &worker[i].futex[0],
&worker[i].futex[nfutexes-1], t);
}
free(worker[i].futex);
}
print_summary();
free(worker);
return ret;
errmem:
err(EXIT_FAILURE, "calloc");
}

View File

@ -0,0 +1,211 @@
/*
* Copyright (C) 2013 Davidlohr Bueso <davidlohr@hp.com>
*
* futex-requeue: Block a bunch of threads on futex1 and requeue them
* on futex2, N at a time.
*
* This program is particularly useful to measure the latency of nthread
* requeues without waking up any tasks -- thus mimicking a regular futex_wait.
*/
#include "../perf.h"
#include "../util/util.h"
#include "../util/stat.h"
#include "../util/parse-options.h"
#include "../util/header.h"
#include "bench.h"
#include "futex.h"
#include <err.h>
#include <stdlib.h>
#include <sys/time.h>
#include <pthread.h>
static u_int32_t futex1 = 0, futex2 = 0;
/*
* How many tasks to requeue at a time.
* Default to 1 in order to make the kernel work more.
*/
static unsigned int nrequeue = 1;
/*
* There can be significant variance from run to run,
* the more repeats, the more exact the overall avg and
* the better idea of the futex latency.
*/
static unsigned int repeat = 10;
static pthread_t *worker;
static bool done = 0, silent = 0;
static pthread_mutex_t thread_lock;
static pthread_cond_t thread_parent, thread_worker;
static struct stats requeuetime_stats, requeued_stats;
static unsigned int ncpus, threads_starting, nthreads = 0;
static const struct option options[] = {
OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
OPT_UINTEGER('q', "nrequeue", &nrequeue, "Specify amount of threads to requeue at once"),
OPT_UINTEGER('r', "repeat", &repeat, "Specify amount of times to repeat the run"),
OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"),
OPT_END()
};
static const char * const bench_futex_requeue_usage[] = {
"perf bench futex requeue <options>",
NULL
};
static void print_summary(void)
{
double requeuetime_avg = avg_stats(&requeuetime_stats);
double requeuetime_stddev = stddev_stats(&requeuetime_stats);
unsigned int requeued_avg = avg_stats(&requeued_stats);
printf("Requeued %d of %d threads in %.4f ms (+-%.2f%%)\n",
requeued_avg,
nthreads,
requeuetime_avg/1e3,
rel_stddev_stats(requeuetime_stddev, requeuetime_avg));
}
static void *workerfn(void *arg __maybe_unused)
{
pthread_mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
pthread_cond_signal(&thread_parent);
pthread_cond_wait(&thread_worker, &thread_lock);
pthread_mutex_unlock(&thread_lock);
futex_wait(&futex1, 0, NULL, FUTEX_PRIVATE_FLAG);
return NULL;
}
static void block_threads(pthread_t *w,
pthread_attr_t thread_attr)
{
cpu_set_t cpu;
unsigned int i;
threads_starting = nthreads;
/* create and block all threads */
for (i = 0; i < nthreads; i++) {
CPU_ZERO(&cpu);
CPU_SET(i % ncpus, &cpu);
if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
err(EXIT_FAILURE, "pthread_create");
}
}
static void toggle_done(int sig __maybe_unused,
siginfo_t *info __maybe_unused,
void *uc __maybe_unused)
{
done = true;
}
int bench_futex_requeue(int argc, const char **argv,
const char *prefix __maybe_unused)
{
int ret = 0;
unsigned int i, j;
struct sigaction act;
pthread_attr_t thread_attr;
argc = parse_options(argc, argv, options, bench_futex_requeue_usage, 0);
if (argc)
goto err;
ncpus = sysconf(_SC_NPROCESSORS_ONLN);
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
if (!nthreads)
nthreads = ncpus;
worker = calloc(nthreads, sizeof(*worker));
if (!worker)
err(EXIT_FAILURE, "calloc");
printf("Run summary [PID %d]: Requeuing %d threads (from %p to %p), "
"%d at a time.\n\n",
getpid(), nthreads, &futex1, &futex2, nrequeue);
init_stats(&requeued_stats);
init_stats(&requeuetime_stats);
pthread_attr_init(&thread_attr);
pthread_mutex_init(&thread_lock, NULL);
pthread_cond_init(&thread_parent, NULL);
pthread_cond_init(&thread_worker, NULL);
for (j = 0; j < repeat && !done; j++) {
unsigned int nrequeued = 0;
struct timeval start, end, runtime;
/* create, launch & block all threads */
block_threads(worker, thread_attr);
/* make sure all threads are already blocked */
pthread_mutex_lock(&thread_lock);
while (threads_starting)
pthread_cond_wait(&thread_parent, &thread_lock);
pthread_cond_broadcast(&thread_worker);
pthread_mutex_unlock(&thread_lock);
usleep(100000);
/* Ok, all threads are patiently blocked, start requeueing */
gettimeofday(&start, NULL);
for (nrequeued = 0; nrequeued < nthreads; nrequeued += nrequeue)
/*
* Do not wakeup any tasks blocked on futex1, allowing
* us to really measure futex_wait functionality.
*/
futex_cmp_requeue(&futex1, 0, &futex2, 0, nrequeue,
FUTEX_PRIVATE_FLAG);
gettimeofday(&end, NULL);
timersub(&end, &start, &runtime);
update_stats(&requeued_stats, nrequeued);
update_stats(&requeuetime_stats, runtime.tv_usec);
if (!silent) {
printf("[Run %d]: Requeued %d of %d threads in %.4f ms\n",
j + 1, nrequeued, nthreads, runtime.tv_usec/1e3);
}
/* everybody should be blocked on futex2, wake'em up */
nrequeued = futex_wake(&futex2, nthreads, FUTEX_PRIVATE_FLAG);
if (nthreads != nrequeued)
warnx("couldn't wakeup all tasks (%d/%d)", nrequeued, nthreads);
for (i = 0; i < nthreads; i++) {
ret = pthread_join(worker[i], NULL);
if (ret)
err(EXIT_FAILURE, "pthread_join");
}
}
/* cleanup & report results */
pthread_cond_destroy(&thread_parent);
pthread_cond_destroy(&thread_worker);
pthread_mutex_destroy(&thread_lock);
pthread_attr_destroy(&thread_attr);
print_summary();
free(worker);
return ret;
err:
usage_with_options(bench_futex_requeue_usage, options);
exit(EXIT_FAILURE);
}

View File

@ -0,0 +1,201 @@
/*
* Copyright (C) 2013 Davidlohr Bueso <davidlohr@hp.com>
*
* futex-wake: Block a bunch of threads on a futex and wake'em up, N at a time.
*
* This program is particularly useful to measure the latency of nthread wakeups
* in non-error situations: all waiters are queued and all wake calls wakeup
* one or more tasks, and thus the waitqueue is never empty.
*/
#include "../perf.h"
#include "../util/util.h"
#include "../util/stat.h"
#include "../util/parse-options.h"
#include "../util/header.h"
#include "bench.h"
#include "futex.h"
#include <err.h>
#include <stdlib.h>
#include <sys/time.h>
#include <pthread.h>
/* all threads will block on the same futex */
static u_int32_t futex1 = 0;
/*
* How many wakeups to do at a time.
* Default to 1 in order to make the kernel work more.
*/
static unsigned int nwakes = 1;
/*
* There can be significant variance from run to run,
* the more repeats, the more exact the overall avg and
* the better idea of the futex latency.
*/
static unsigned int repeat = 10;
pthread_t *worker;
static bool done = 0, silent = 0;
static pthread_mutex_t thread_lock;
static pthread_cond_t thread_parent, thread_worker;
static struct stats waketime_stats, wakeup_stats;
static unsigned int ncpus, threads_starting, nthreads = 0;
static const struct option options[] = {
OPT_UINTEGER('t', "threads", &nthreads, "Specify amount of threads"),
OPT_UINTEGER('w', "nwakes", &nwakes, "Specify amount of threads to wake at once"),
OPT_UINTEGER('r', "repeat", &repeat, "Specify amount of times to repeat the run"),
OPT_BOOLEAN( 's', "silent", &silent, "Silent mode: do not display data/details"),
OPT_END()
};
static const char * const bench_futex_wake_usage[] = {
"perf bench futex wake <options>",
NULL
};
static void *workerfn(void *arg __maybe_unused)
{
pthread_mutex_lock(&thread_lock);
threads_starting--;
if (!threads_starting)
pthread_cond_signal(&thread_parent);
pthread_cond_wait(&thread_worker, &thread_lock);
pthread_mutex_unlock(&thread_lock);
futex_wait(&futex1, 0, NULL, FUTEX_PRIVATE_FLAG);
return NULL;
}
static void print_summary(void)
{
double waketime_avg = avg_stats(&waketime_stats);
double waketime_stddev = stddev_stats(&waketime_stats);
unsigned int wakeup_avg = avg_stats(&wakeup_stats);
printf("Wokeup %d of %d threads in %.4f ms (+-%.2f%%)\n",
wakeup_avg,
nthreads,
waketime_avg/1e3,
rel_stddev_stats(waketime_stddev, waketime_avg));
}
static void block_threads(pthread_t *w,
pthread_attr_t thread_attr)
{
cpu_set_t cpu;
unsigned int i;
threads_starting = nthreads;
/* create and block all threads */
for (i = 0; i < nthreads; i++) {
CPU_ZERO(&cpu);
CPU_SET(i % ncpus, &cpu);
if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
if (pthread_create(&w[i], &thread_attr, workerfn, NULL))
err(EXIT_FAILURE, "pthread_create");
}
}
static void toggle_done(int sig __maybe_unused,
siginfo_t *info __maybe_unused,
void *uc __maybe_unused)
{
done = true;
}
int bench_futex_wake(int argc, const char **argv,
const char *prefix __maybe_unused)
{
int ret = 0;
unsigned int i, j;
struct sigaction act;
pthread_attr_t thread_attr;
argc = parse_options(argc, argv, options, bench_futex_wake_usage, 0);
if (argc) {
usage_with_options(bench_futex_wake_usage, options);
exit(EXIT_FAILURE);
}
ncpus = sysconf(_SC_NPROCESSORS_ONLN);
sigfillset(&act.sa_mask);
act.sa_sigaction = toggle_done;
sigaction(SIGINT, &act, NULL);
if (!nthreads)
nthreads = ncpus;
worker = calloc(nthreads, sizeof(*worker));
if (!worker)
err(EXIT_FAILURE, "calloc");
printf("Run summary [PID %d]: blocking on %d threads (at futex %p), "
"waking up %d at a time.\n\n",
getpid(), nthreads, &futex1, nwakes);
init_stats(&wakeup_stats);
init_stats(&waketime_stats);
pthread_attr_init(&thread_attr);
pthread_mutex_init(&thread_lock, NULL);
pthread_cond_init(&thread_parent, NULL);
pthread_cond_init(&thread_worker, NULL);
for (j = 0; j < repeat && !done; j++) {
unsigned int nwoken = 0;
struct timeval start, end, runtime;
/* create, launch & block all threads */
block_threads(worker, thread_attr);
/* make sure all threads are already blocked */
pthread_mutex_lock(&thread_lock);
while (threads_starting)
pthread_cond_wait(&thread_parent, &thread_lock);
pthread_cond_broadcast(&thread_worker);
pthread_mutex_unlock(&thread_lock);
usleep(100000);
/* Ok, all threads are patiently blocked, start waking folks up */
gettimeofday(&start, NULL);
while (nwoken != nthreads)
nwoken += futex_wake(&futex1, nwakes, FUTEX_PRIVATE_FLAG);
gettimeofday(&end, NULL);
timersub(&end, &start, &runtime);
update_stats(&wakeup_stats, nwoken);
update_stats(&waketime_stats, runtime.tv_usec);
if (!silent) {
printf("[Run %d]: Wokeup %d of %d threads in %.4f ms\n",
j + 1, nwoken, nthreads, runtime.tv_usec/1e3);
}
for (i = 0; i < nthreads; i++) {
ret = pthread_join(worker[i], NULL);
if (ret)
err(EXIT_FAILURE, "pthread_join");
}
}
/* cleanup & report results */
pthread_cond_destroy(&thread_parent);
pthread_cond_destroy(&thread_worker);
pthread_mutex_destroy(&thread_lock);
pthread_attr_destroy(&thread_attr);
print_summary();
free(worker);
return ret;
}

71
tools/perf/bench/futex.h Normal file
View File

@ -0,0 +1,71 @@
/*
* Glibc independent futex library for testing kernel functionality.
* Shamelessly stolen from Darren Hart <dvhltc@us.ibm.com>
* http://git.kernel.org/cgit/linux/kernel/git/dvhart/futextest.git/
*/
#ifndef _FUTEX_H
#define _FUTEX_H
#include <unistd.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <linux/futex.h>
/**
* futex() - SYS_futex syscall wrapper
* @uaddr: address of first futex
* @op: futex op code
* @val: typically expected value of uaddr, but varies by op
* @timeout: typically an absolute struct timespec (except where noted
* otherwise). Overloaded by some ops
* @uaddr2: address of second futex for some ops\
* @val3: varies by op
* @opflags: flags to be bitwise OR'd with op, such as FUTEX_PRIVATE_FLAG
*
* futex() is used by all the following futex op wrappers. It can also be
* used for misuse and abuse testing. Generally, the specific op wrappers
* should be used instead. It is a macro instead of an static inline function as
* some of the types over overloaded (timeout is used for nr_requeue for
* example).
*
* These argument descriptions are the defaults for all
* like-named arguments in the following wrappers except where noted below.
*/
#define futex(uaddr, op, val, timeout, uaddr2, val3, opflags) \
syscall(SYS_futex, uaddr, op | opflags, val, timeout, uaddr2, val3)
/**
* futex_wait() - block on uaddr with optional timeout
* @timeout: relative timeout
*/
static inline int
futex_wait(u_int32_t *uaddr, u_int32_t val, struct timespec *timeout, int opflags)
{
return futex(uaddr, FUTEX_WAIT, val, timeout, NULL, 0, opflags);
}
/**
* futex_wake() - wake one or more tasks blocked on uaddr
* @nr_wake: wake up to this many tasks
*/
static inline int
futex_wake(u_int32_t *uaddr, int nr_wake, int opflags)
{
return futex(uaddr, FUTEX_WAKE, nr_wake, NULL, NULL, 0, opflags);
}
/**
* futex_cmp_requeue() - requeue tasks from uaddr to uaddr2
* @nr_wake: wake up to this many tasks
* @nr_requeue: requeue up to this many tasks
*/
static inline int
futex_cmp_requeue(u_int32_t *uaddr, u_int32_t val, u_int32_t *uaddr2, int nr_wake,
int nr_requeue, int opflags)
{
return futex(uaddr, FUTEX_CMP_REQUEUE, nr_wake, nr_requeue, uaddr2,
val, opflags);
}
#endif /* _FUTEX_H */

View File

@ -12,6 +12,7 @@
* sched ... scheduler and IPC performance
* mem ... memory access performance
* numa ... NUMA scheduling and MM performance
* futex ... Futex performance
*/
#include "perf.h"
#include "util/util.h"
@ -54,6 +55,14 @@ static struct bench mem_benchmarks[] = {
{ NULL, NULL, NULL }
};
static struct bench futex_benchmarks[] = {
{ "hash", "Benchmark for futex hash table", bench_futex_hash },
{ "wake", "Benchmark for futex wake calls", bench_futex_wake },
{ "requeue", "Benchmark for futex requeue calls", bench_futex_requeue },
{ "all", "Test all futex benchmarks", NULL },
{ NULL, NULL, NULL }
};
struct collection {
const char *name;
const char *summary;
@ -61,11 +70,12 @@ struct collection {
};
static struct collection collections[] = {
{ "sched", "Scheduler and IPC benchmarks", sched_benchmarks },
{ "sched", "Scheduler and IPC benchmarks", sched_benchmarks },
{ "mem", "Memory access benchmarks", mem_benchmarks },
#ifdef HAVE_LIBNUMA_SUPPORT
{ "numa", "NUMA scheduling and MM benchmarks", numa_benchmarks },
#endif
{"futex", "Futex stressing benchmarks", futex_benchmarks },
{ "all", "All benchmarks", NULL },
{ NULL, NULL, NULL }
};

View File

@ -952,8 +952,8 @@ static int hpp__entry_global(struct perf_hpp_fmt *_fmt, struct perf_hpp *hpp,
dfmt->header_width, buf);
}
static int hpp__header(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp)
static int hpp__header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct perf_evsel *evsel __maybe_unused)
{
struct diff_hpp_fmt *dfmt =
container_of(fmt, struct diff_hpp_fmt, fmt);
@ -963,7 +963,8 @@ static int hpp__header(struct perf_hpp_fmt *fmt,
}
static int hpp__width(struct perf_hpp_fmt *fmt,
struct perf_hpp *hpp __maybe_unused)
struct perf_hpp *hpp __maybe_unused,
struct perf_evsel *evsel __maybe_unused)
{
struct diff_hpp_fmt *dfmt =
container_of(fmt, struct diff_hpp_fmt, fmt);

View File

@ -1691,17 +1691,15 @@ int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused)
OPT_END()
};
const char * const kvm_usage[] = {
"perf kvm [<options>] {top|record|report|diff|buildid-list|stat}",
NULL
};
const char *const kvm_subcommands[] = { "top", "record", "report", "diff",
"buildid-list", "stat", NULL };
const char *kvm_usage[] = { NULL, NULL };
perf_host = 0;
perf_guest = 1;
argc = parse_options(argc, argv, kvm_options, kvm_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
argc = parse_options_subcommand(argc, argv, kvm_options, kvm_subcommands, kvm_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (!argc)
usage_with_options(kvm_usage, kvm_options);

View File

@ -123,7 +123,7 @@ __perf_main ()
__perfcomp_colon "$evts" "$cur"
# List subcommands for 'perf kvm'
elif [[ $prev == "kvm" ]]; then
subcmds="top record report diff buildid-list stat"
subcmds=$($cmd $prev --list-cmds)
__perfcomp_colon "$subcmds" "$cur"
# List long option names
elif [[ $cur == --* ]]; then

View File

@ -12,6 +12,9 @@
#ifndef __NR_perf_event_open
# define __NR_perf_event_open 336
#endif
#ifndef __NR_futex
# define __NR_futex 240
#endif
#endif
#if defined(__x86_64__)
@ -23,6 +26,9 @@
#ifndef __NR_perf_event_open
# define __NR_perf_event_open 298
#endif
#ifndef __NR_futex
# define __NR_futex 202
#endif
#endif
#ifdef __powerpc__

View File

@ -128,7 +128,7 @@ int test__dwarf_unwind(void)
if (verbose > 1)
machine__fprintf(machine, stderr);
thread = machine__find_thread(machine, getpid());
thread = machine__find_thread(machine, getpid(), getpid());
if (!thread) {
pr_err("Could not get thread\n");
goto out;

View File

@ -587,95 +587,52 @@ struct hpp_arg {
bool current_entry;
};
static int __hpp__color_callchain(struct hpp_arg *arg)
static int __hpp__overhead_callback(struct perf_hpp *hpp, bool front)
{
if (!symbol_conf.use_callchain)
return 0;
slsmg_printf("%c ", arg->folded_sign);
return 2;
}
static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he,
u64 (*get_field)(struct hist_entry *),
int (*callchain_cb)(struct hpp_arg *))
{
int ret = 0;
double percent = 0.0;
struct hists *hists = he->hists;
struct hpp_arg *arg = hpp->ptr;
if (hists->stats.total_period)
percent = 100.0 * get_field(he) / hists->stats.total_period;
if (arg->current_entry && arg->b->navkeypressed)
ui_browser__set_color(arg->b, HE_COLORSET_SELECTED);
else
ui_browser__set_color(arg->b, HE_COLORSET_NORMAL);
if (front) {
if (!symbol_conf.use_callchain)
return 0;
slsmg_printf("%c ", arg->folded_sign);
return 2;
}
return 0;
}
static int __hpp__color_callback(struct perf_hpp *hpp, bool front __maybe_unused)
{
struct hpp_arg *arg = hpp->ptr;
if (!arg->current_entry || !arg->b->navkeypressed)
ui_browser__set_color(arg->b, HE_COLORSET_NORMAL);
return 0;
}
static int __hpp__slsmg_color_printf(struct perf_hpp *hpp, const char *fmt, ...)
{
struct hpp_arg *arg = hpp->ptr;
int ret;
va_list args;
double percent;
va_start(args, fmt);
percent = va_arg(args, double);
va_end(args);
ui_browser__set_percent_color(arg->b, percent, arg->current_entry);
if (callchain_cb)
ret += callchain_cb(arg);
ret += scnprintf(hpp->buf, hpp->size, "%6.2f%%", percent);
ret = scnprintf(hpp->buf, hpp->size, fmt, percent);
slsmg_printf("%s", hpp->buf);
if (symbol_conf.event_group) {
int prev_idx, idx_delta;
struct perf_evsel *evsel = hists_to_evsel(hists);
struct hist_entry *pair;
int nr_members = evsel->nr_members;
if (nr_members <= 1)
goto out;
prev_idx = perf_evsel__group_idx(evsel);
list_for_each_entry(pair, &he->pairs.head, pairs.node) {
u64 period = get_field(pair);
u64 total = pair->hists->stats.total_period;
if (!total)
continue;
evsel = hists_to_evsel(pair->hists);
idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
while (idx_delta--) {
/*
* zero-fill group members in the middle which
* have no sample
*/
ui_browser__set_percent_color(arg->b, 0.0,
arg->current_entry);
ret += scnprintf(hpp->buf, hpp->size,
" %6.2f%%", 0.0);
slsmg_printf("%s", hpp->buf);
}
percent = 100.0 * period / total;
ui_browser__set_percent_color(arg->b, percent,
arg->current_entry);
ret += scnprintf(hpp->buf, hpp->size,
" %6.2f%%", percent);
slsmg_printf("%s", hpp->buf);
prev_idx = perf_evsel__group_idx(evsel);
}
idx_delta = nr_members - prev_idx - 1;
while (idx_delta--) {
/*
* zero-fill group members at last which have no sample
*/
ui_browser__set_percent_color(arg->b, 0.0,
arg->current_entry);
ret += scnprintf(hpp->buf, hpp->size,
" %6.2f%%", 0.0);
slsmg_printf("%s", hpp->buf);
}
}
out:
if (!arg->current_entry || !arg->b->navkeypressed)
ui_browser__set_color(arg->b, HE_COLORSET_NORMAL);
advance_hpp(hpp, ret);
return ret;
}
@ -690,14 +647,15 @@ hist_browser__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused,\
struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
return __hpp__color_fmt(hpp, he, __hpp_get_##_field, _cb); \
return __hpp__fmt(hpp, he, __hpp_get_##_field, _cb, " %6.2f%%", \
__hpp__slsmg_color_printf, true); \
}
__HPP_COLOR_PERCENT_FN(overhead, period, __hpp__color_callchain)
__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys, NULL)
__HPP_COLOR_PERCENT_FN(overhead_us, period_us, NULL)
__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys, NULL)
__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us, NULL)
__HPP_COLOR_PERCENT_FN(overhead, period, __hpp__overhead_callback)
__HPP_COLOR_PERCENT_FN(overhead_sys, period_sys, __hpp__color_callback)
__HPP_COLOR_PERCENT_FN(overhead_us, period_us, __hpp__color_callback)
__HPP_COLOR_PERCENT_FN(overhead_guest_sys, period_guest_sys, __hpp__color_callback)
__HPP_COLOR_PERCENT_FN(overhead_guest_us, period_guest_us, __hpp__color_callback)
#undef __HPP_COLOR_PERCENT_FN

View File

@ -8,16 +8,24 @@
#define MAX_COLUMNS 32
static int __percent_color_snprintf(char *buf, size_t size, double percent)
static int __percent_color_snprintf(struct perf_hpp *hpp, const char *fmt, ...)
{
int ret = 0;
va_list args;
double percent;
const char *markup;
char *buf = hpp->buf;
size_t size = hpp->size;
va_start(args, fmt);
percent = va_arg(args, double);
va_end(args);
markup = perf_gtk__get_percent_color(percent);
if (markup)
ret += scnprintf(buf, size, markup);
ret += scnprintf(buf + ret, size - ret, " %6.2f%%", percent);
ret += scnprintf(buf + ret, size - ret, fmt, percent);
if (markup)
ret += scnprintf(buf + ret, size - ret, "</span>");
@ -25,66 +33,6 @@ static int __percent_color_snprintf(char *buf, size_t size, double percent)
return ret;
}
static int __hpp__color_fmt(struct perf_hpp *hpp, struct hist_entry *he,
u64 (*get_field)(struct hist_entry *))
{
int ret;
double percent = 0.0;
struct hists *hists = he->hists;
struct perf_evsel *evsel = hists_to_evsel(hists);
if (hists->stats.total_period)
percent = 100.0 * get_field(he) / hists->stats.total_period;
ret = __percent_color_snprintf(hpp->buf, hpp->size, percent);
if (perf_evsel__is_group_event(evsel)) {
int prev_idx, idx_delta;
struct hist_entry *pair;
int nr_members = evsel->nr_members;
prev_idx = perf_evsel__group_idx(evsel);
list_for_each_entry(pair, &he->pairs.head, pairs.node) {
u64 period = get_field(pair);
u64 total = pair->hists->stats.total_period;
evsel = hists_to_evsel(pair->hists);
idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
while (idx_delta--) {
/*
* zero-fill group members in the middle which
* have no sample
*/
ret += __percent_color_snprintf(hpp->buf + ret,
hpp->size - ret,
0.0);
}
percent = 100.0 * period / total;
ret += __percent_color_snprintf(hpp->buf + ret,
hpp->size - ret,
percent);
prev_idx = perf_evsel__group_idx(evsel);
}
idx_delta = nr_members - prev_idx - 1;
while (idx_delta--) {
/*
* zero-fill group members at last which have no sample
*/
ret += __percent_color_snprintf(hpp->buf + ret,
hpp->size - ret,
0.0);
}
}
return ret;
}
#define __HPP_COLOR_PERCENT_FN(_type, _field) \
static u64 he_get_##_field(struct hist_entry *he) \
{ \
@ -95,7 +43,8 @@ static int perf_gtk__hpp_color_##_type(struct perf_hpp_fmt *fmt __maybe_unused,
struct perf_hpp *hpp, \
struct hist_entry *he) \
{ \
return __hpp__color_fmt(hpp, he, he_get_##_field); \
return __hpp__fmt(hpp, he, he_get_##_field, NULL, " %6.2f%%", \
__percent_color_snprintf, true); \
}
__HPP_COLOR_PERCENT_FN(overhead, period)
@ -216,7 +165,6 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
struct perf_hpp hpp = {
.buf = s,
.size = sizeof(s),
.ptr = hists_to_evsel(hists),
};
nr_cols = 0;
@ -243,7 +191,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
col_idx = 0;
perf_hpp__for_each_format(fmt) {
fmt->header(fmt, &hpp);
fmt->header(fmt, &hpp, hists_to_evsel(hists));
gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
-1, ltrim(s),

View File

@ -8,16 +8,27 @@
/* hist period print (hpp) functions */
typedef int (*hpp_snprint_fn)(char *buf, size_t size, const char *fmt, ...);
#define hpp__call_print_fn(hpp, fn, fmt, ...) \
({ \
int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
advance_hpp(hpp, __ret); \
__ret; \
})
static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
u64 (*get_field)(struct hist_entry *),
const char *fmt, hpp_snprint_fn print_fn,
bool fmt_percent)
int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
hpp_field_fn get_field, hpp_callback_fn callback,
const char *fmt, hpp_snprint_fn print_fn, bool fmt_percent)
{
int ret;
int ret = 0;
struct hists *hists = he->hists;
struct perf_evsel *evsel = hists_to_evsel(hists);
char *buf = hpp->buf;
size_t size = hpp->size;
if (callback) {
ret = callback(hpp, true);
advance_hpp(hpp, ret);
}
if (fmt_percent) {
double percent = 0.0;
@ -26,9 +37,9 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
percent = 100.0 * get_field(he) /
hists->stats.total_period;
ret = print_fn(hpp->buf, hpp->size, fmt, percent);
ret += hpp__call_print_fn(hpp, print_fn, fmt, percent);
} else
ret = print_fn(hpp->buf, hpp->size, fmt, get_field(he));
ret += hpp__call_print_fn(hpp, print_fn, fmt, get_field(he));
if (perf_evsel__is_group_event(evsel)) {
int prev_idx, idx_delta;
@ -52,16 +63,22 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
* zero-fill group members in the middle which
* have no sample
*/
ret += print_fn(hpp->buf + ret, hpp->size - ret,
fmt, 0);
if (fmt_percent) {
ret += hpp__call_print_fn(hpp, print_fn,
fmt, 0.0);
} else {
ret += hpp__call_print_fn(hpp, print_fn,
fmt, 0ULL);
}
}
if (fmt_percent)
ret += print_fn(hpp->buf + ret, hpp->size - ret,
fmt, 100.0 * period / total);
else
ret += print_fn(hpp->buf + ret, hpp->size - ret,
fmt, period);
if (fmt_percent) {
ret += hpp__call_print_fn(hpp, print_fn, fmt,
100.0 * period / total);
} else {
ret += hpp__call_print_fn(hpp, print_fn, fmt,
period);
}
prev_idx = perf_evsel__group_idx(evsel);
}
@ -72,41 +89,87 @@ static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
/*
* zero-fill group members at last which have no sample
*/
ret += print_fn(hpp->buf + ret, hpp->size - ret,
fmt, 0);
if (fmt_percent) {
ret += hpp__call_print_fn(hpp, print_fn,
fmt, 0.0);
} else {
ret += hpp__call_print_fn(hpp, print_fn,
fmt, 0ULL);
}
}
}
if (callback) {
int __ret = callback(hpp, false);
advance_hpp(hpp, __ret);
ret += __ret;
}
/*
* Restore original buf and size as it's where caller expects
* the result will be saved.
*/
hpp->buf = buf;
hpp->size = size;
return ret;
}
#define __HPP_HEADER_FN(_type, _str, _min_width, _unit_width) \
static int hpp__header_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
struct perf_hpp *hpp) \
struct perf_hpp *hpp, \
struct perf_evsel *evsel) \
{ \
int len = _min_width; \
\
if (symbol_conf.event_group) { \
struct perf_evsel *evsel = hpp->ptr; \
\
if (symbol_conf.event_group) \
len = max(len, evsel->nr_members * _unit_width); \
} \
\
return scnprintf(hpp->buf, hpp->size, "%*s", len, _str); \
}
#define __HPP_WIDTH_FN(_type, _min_width, _unit_width) \
static int hpp__width_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
struct perf_hpp *hpp __maybe_unused) \
struct perf_hpp *hpp __maybe_unused, \
struct perf_evsel *evsel) \
{ \
int len = _min_width; \
\
if (symbol_conf.event_group) { \
struct perf_evsel *evsel = hpp->ptr; \
\
if (symbol_conf.event_group) \
len = max(len, evsel->nr_members * _unit_width); \
} \
\
return len; \
}
static int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
{
va_list args;
ssize_t ssize = hpp->size;
double percent;
int ret;
va_start(args, fmt);
percent = va_arg(args, double);
ret = value_color_snprintf(hpp->buf, hpp->size, fmt, percent);
va_end(args);
return (ret >= ssize) ? (ssize - 1) : ret;
}
static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
{
va_list args;
ssize_t ssize = hpp->size;
int ret;
va_start(args, fmt);
ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
va_end(args);
return (ret >= ssize) ? (ssize - 1) : ret;
}
#define __HPP_COLOR_PERCENT_FN(_type, _field) \
static u64 he_get_##_field(struct hist_entry *he) \
{ \
@ -116,8 +179,8 @@ static u64 he_get_##_field(struct hist_entry *he) \
static int hpp__color_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
struct perf_hpp *hpp, struct hist_entry *he) \
{ \
return __hpp__fmt(hpp, he, he_get_##_field, " %6.2f%%", \
percent_color_snprintf, true); \
return __hpp__fmt(hpp, he, he_get_##_field, NULL, " %6.2f%%", \
hpp_color_scnprintf, true); \
}
#define __HPP_ENTRY_PERCENT_FN(_type, _field) \
@ -125,8 +188,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \
struct perf_hpp *hpp, struct hist_entry *he) \
{ \
const char *fmt = symbol_conf.field_sep ? " %.2f" : " %6.2f%%"; \
return __hpp__fmt(hpp, he, he_get_##_field, fmt, \
scnprintf, true); \
return __hpp__fmt(hpp, he, he_get_##_field, NULL, fmt, \
hpp_entry_scnprintf, true); \
}
#define __HPP_ENTRY_RAW_FN(_type, _field) \
@ -139,7 +202,8 @@ static int hpp__entry_##_type(struct perf_hpp_fmt *_fmt __maybe_unused, \
struct perf_hpp *hpp, struct hist_entry *he) \
{ \
const char *fmt = symbol_conf.field_sep ? " %"PRIu64 : " %11"PRIu64; \
return __hpp__fmt(hpp, he, he_get_raw_##_field, fmt, scnprintf, false); \
return __hpp__fmt(hpp, he, he_get_raw_##_field, NULL, fmt, \
hpp_entry_scnprintf, false); \
}
#define HPP_PERCENT_FNS(_type, _str, _field, _min_width, _unit_width) \
@ -263,15 +327,13 @@ unsigned int hists__sort_list_width(struct hists *hists)
struct perf_hpp_fmt *fmt;
struct sort_entry *se;
int i = 0, ret = 0;
struct perf_hpp dummy_hpp = {
.ptr = hists_to_evsel(hists),
};
struct perf_hpp dummy_hpp;
perf_hpp__for_each_format(fmt) {
if (i)
ret += 2;
ret += fmt->width(fmt, &dummy_hpp);
ret += fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
}
list_for_each_entry(se, &hist_entry__sort_list, list)

View File

@ -306,12 +306,6 @@ static size_t hist_entry__callchain_fprintf(struct hist_entry *he,
return hist_entry_callchain__fprintf(he, total_period, left_margin, fp);
}
static inline void advance_hpp(struct perf_hpp *hpp, int inc)
{
hpp->buf += inc;
hpp->size -= inc;
}
static int hist_entry__period_snprintf(struct perf_hpp *hpp,
struct hist_entry *he)
{
@ -385,7 +379,6 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
struct perf_hpp dummy_hpp = {
.buf = bf,
.size = sizeof(bf),
.ptr = hists_to_evsel(hists),
};
bool first = true;
size_t linesz;
@ -404,7 +397,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
else
first = false;
fmt->header(fmt, &dummy_hpp);
fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
fprintf(fp, "%s", bf);
}
@ -449,7 +442,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
else
first = false;
width = fmt->width(fmt, &dummy_hpp);
width = fmt->width(fmt, &dummy_hpp, hists_to_evsel(hists));
for (i = 0; i < width; i++)
fprintf(fp, ".");
}

View File

@ -94,14 +94,10 @@ static pid_t perf_event__get_comm_tgid(pid_t pid, char *comm, size_t len)
static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
union perf_event *event, pid_t pid,
int full,
perf_event__handler_t process,
struct machine *machine)
{
char filename[PATH_MAX];
size_t size;
DIR *tasks;
struct dirent dirent, *next;
pid_t tgid;
memset(&event->comm, 0, sizeof(event->comm));
@ -124,57 +120,37 @@ static pid_t perf_event__synthesize_comm(struct perf_tool *tool,
event->comm.header.size = (sizeof(event->comm) -
(sizeof(event->comm.comm) - size) +
machine->id_hdr_size);
if (!full) {
event->comm.tid = pid;
event->comm.tid = pid;
if (process(tool, event, &synth_sample, machine) != 0)
return -1;
if (process(tool, event, &synth_sample, machine) != 0)
return -1;
goto out;
}
if (machine__is_default_guest(machine))
return 0;
snprintf(filename, sizeof(filename), "%s/proc/%d/task",
machine->root_dir, pid);
tasks = opendir(filename);
if (tasks == NULL) {
pr_debug("couldn't open %s\n", filename);
return 0;
}
while (!readdir_r(tasks, &dirent, &next) && next) {
char *end;
pid = strtol(dirent.d_name, &end, 10);
if (*end)
continue;
/* already have tgid; jut want to update the comm */
(void) perf_event__get_comm_tgid(pid, event->comm.comm,
sizeof(event->comm.comm));
size = strlen(event->comm.comm) + 1;
size = PERF_ALIGN(size, sizeof(u64));
memset(event->comm.comm + size, 0, machine->id_hdr_size);
event->comm.header.size = (sizeof(event->comm) -
(sizeof(event->comm.comm) - size) +
machine->id_hdr_size);
event->comm.tid = pid;
if (process(tool, event, &synth_sample, machine) != 0) {
tgid = -1;
break;
}
}
closedir(tasks);
out:
return tgid;
}
static int perf_event__synthesize_fork(struct perf_tool *tool,
union perf_event *event, pid_t pid,
pid_t tgid, perf_event__handler_t process,
struct machine *machine)
{
memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
/* this is really a clone event but we use fork to synthesize it */
event->fork.ppid = tgid;
event->fork.ptid = tgid;
event->fork.pid = tgid;
event->fork.tid = pid;
event->fork.header.type = PERF_RECORD_FORK;
event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
if (process(tool, event, &synth_sample, machine) != 0)
return -1;
return 0;
}
int perf_event__synthesize_mmap_events(struct perf_tool *tool,
union perf_event *event,
pid_t pid, pid_t tgid,
@ -324,17 +300,71 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
static int __event__synthesize_thread(union perf_event *comm_event,
union perf_event *mmap_event,
union perf_event *fork_event,
pid_t pid, int full,
perf_event__handler_t process,
struct perf_tool *tool,
struct machine *machine, bool mmap_data)
{
pid_t tgid = perf_event__synthesize_comm(tool, comm_event, pid, full,
char filename[PATH_MAX];
DIR *tasks;
struct dirent dirent, *next;
pid_t tgid;
/* special case: only send one comm event using passed in pid */
if (!full) {
tgid = perf_event__synthesize_comm(tool, comm_event, pid,
process, machine);
if (tgid == -1)
return -1;
return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
process, machine, mmap_data);
}
if (machine__is_default_guest(machine))
return 0;
snprintf(filename, sizeof(filename), "%s/proc/%d/task",
machine->root_dir, pid);
tasks = opendir(filename);
if (tasks == NULL) {
pr_debug("couldn't open %s\n", filename);
return 0;
}
while (!readdir_r(tasks, &dirent, &next) && next) {
char *end;
int rc = 0;
pid_t _pid;
_pid = strtol(dirent.d_name, &end, 10);
if (*end)
continue;
tgid = perf_event__synthesize_comm(tool, comm_event, _pid,
process, machine);
if (tgid == -1)
return -1;
if (_pid == pid) {
/* process the parent's maps too */
rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
process, machine, mmap_data);
} else {
/* only fork the tid's map, to save time */
rc = perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
process, machine);
if (tgid == -1)
return -1;
return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
process, machine, mmap_data);
}
if (rc)
return rc;
}
closedir(tasks);
return 0;
}
int perf_event__synthesize_thread_map(struct perf_tool *tool,
@ -343,7 +373,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
struct machine *machine,
bool mmap_data)
{
union perf_event *comm_event, *mmap_event;
union perf_event *comm_event, *mmap_event, *fork_event;
int err = -1, thread, j;
comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
@ -354,9 +384,14 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
if (mmap_event == NULL)
goto out_free_comm;
fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
if (fork_event == NULL)
goto out_free_mmap;
err = 0;
for (thread = 0; thread < threads->nr; ++thread) {
if (__event__synthesize_thread(comm_event, mmap_event,
fork_event,
threads->map[thread], 0,
process, tool, machine,
mmap_data)) {
@ -382,6 +417,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
/* if not, generate events for it */
if (need_leader &&
__event__synthesize_thread(comm_event, mmap_event,
fork_event,
comm_event->comm.pid, 0,
process, tool, machine,
mmap_data)) {
@ -390,6 +426,8 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
}
}
}
free(fork_event);
out_free_mmap:
free(mmap_event);
out_free_comm:
free(comm_event);
@ -404,7 +442,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
DIR *proc;
char proc_path[PATH_MAX];
struct dirent dirent, *next;
union perf_event *comm_event, *mmap_event;
union perf_event *comm_event, *mmap_event, *fork_event;
int err = -1;
comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
@ -415,6 +453,10 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
if (mmap_event == NULL)
goto out_free_comm;
fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
if (fork_event == NULL)
goto out_free_mmap;
if (machine__is_default_guest(machine))
return 0;
@ -422,7 +464,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
proc = opendir(proc_path);
if (proc == NULL)
goto out_free_mmap;
goto out_free_fork;
while (!readdir_r(proc, &dirent, &next) && next) {
char *end;
@ -434,12 +476,14 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
* We may race with exiting thread, so don't stop just because
* one thread couldn't be synthesized.
*/
__event__synthesize_thread(comm_event, mmap_event, pid, 1,
process, tool, machine, mmap_data);
__event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
1, process, tool, machine, mmap_data);
}
err = 0;
closedir(proc);
out_free_fork:
free(fork_event);
out_free_mmap:
free(mmap_event);
out_free_comm:

View File

@ -132,8 +132,10 @@ struct perf_hpp {
};
struct perf_hpp_fmt {
int (*header)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp);
int (*width)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp);
int (*header)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct perf_evsel *evsel);
int (*width)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct perf_evsel *evsel);
int (*color)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
struct hist_entry *he);
int (*entry)(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
@ -166,6 +168,20 @@ void perf_hpp__init(void);
void perf_hpp__column_register(struct perf_hpp_fmt *format);
void perf_hpp__column_enable(unsigned col);
typedef u64 (*hpp_field_fn)(struct hist_entry *he);
typedef int (*hpp_callback_fn)(struct perf_hpp *hpp, bool front);
typedef int (*hpp_snprint_fn)(struct perf_hpp *hpp, const char *fmt, ...);
int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
hpp_field_fn get_field, hpp_callback_fn callback,
const char *fmt, hpp_snprint_fn print_fn, bool fmt_percent);
static inline void advance_hpp(struct perf_hpp *hpp, int inc)
{
hpp->buf += inc;
hpp->size -= inc;
}
static inline size_t perf_hpp__use_color(void)
{
return !symbol_conf.field_sep;

View File

@ -327,9 +327,10 @@ struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
return __machine__findnew_thread(machine, pid, tid, true);
}
struct thread *machine__find_thread(struct machine *machine, pid_t tid)
struct thread *machine__find_thread(struct machine *machine, pid_t pid,
pid_t tid)
{
return __machine__findnew_thread(machine, 0, tid, false);
return __machine__findnew_thread(machine, pid, tid, false);
}
int machine__process_comm_event(struct machine *machine, union perf_event *event,
@ -1114,7 +1115,9 @@ static void machine__remove_thread(struct machine *machine, struct thread *th)
int machine__process_fork_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample)
{
struct thread *thread = machine__find_thread(machine, event->fork.tid);
struct thread *thread = machine__find_thread(machine,
event->fork.pid,
event->fork.tid);
struct thread *parent = machine__findnew_thread(machine,
event->fork.ppid,
event->fork.ptid);
@ -1140,7 +1143,9 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
int machine__process_exit_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample __maybe_unused)
{
struct thread *thread = machine__find_thread(machine, event->fork.tid);
struct thread *thread = machine__find_thread(machine,
event->fork.pid,
event->fork.tid);
if (dump_trace)
perf_event__fprintf_task(event, stdout);
@ -1184,39 +1189,22 @@ static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
return 0;
}
static const u8 cpumodes[] = {
PERF_RECORD_MISC_USER,
PERF_RECORD_MISC_KERNEL,
PERF_RECORD_MISC_GUEST_USER,
PERF_RECORD_MISC_GUEST_KERNEL
};
#define NCPUMODES (sizeof(cpumodes)/sizeof(u8))
static void ip__resolve_ams(struct machine *machine, struct thread *thread,
struct addr_map_symbol *ams,
u64 ip)
{
struct addr_location al;
size_t i;
u8 m;
memset(&al, 0, sizeof(al));
/*
* We cannot use the header.misc hint to determine whether a
* branch stack address is user, kernel, guest, hypervisor.
* Branches may straddle the kernel/user/hypervisor boundaries.
* Thus, we have to try consecutively until we find a match
* or else, the symbol is unknown
*/
thread__find_cpumode_addr_location(thread, machine, MAP__FUNCTION, ip, &al);
for (i = 0; i < NCPUMODES; i++) {
m = cpumodes[i];
/*
* We cannot use the header.misc hint to determine whether a
* branch stack address is user, kernel, guest, hypervisor.
* Branches may straddle the kernel/user/hypervisor boundaries.
* Thus, we have to try consecutively until we find a match
* or else, the symbol is unknown
*/
thread__find_addr_location(thread, machine, m, MAP__FUNCTION,
ip, &al);
if (al.map)
goto found;
}
found:
ams->addr = ip;
ams->al_addr = al.addr;
ams->sym = al.sym;

View File

@ -41,7 +41,8 @@ struct map *machine__kernel_map(struct machine *machine, enum map_type type)
return machine->vmlinux_maps[type];
}
struct thread *machine__find_thread(struct machine *machine, pid_t tid);
struct thread *machine__find_thread(struct machine *machine, pid_t pid,
pid_t tid);
int machine__process_comm_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample);

View File

@ -407,7 +407,9 @@ int parse_options_step(struct parse_opt_ctx_t *ctx,
if (internal_help && !strcmp(arg + 2, "help"))
return usage_with_options_internal(usagestr, options, 0);
if (!strcmp(arg + 2, "list-opts"))
return PARSE_OPT_LIST;
return PARSE_OPT_LIST_OPTS;
if (!strcmp(arg + 2, "list-cmds"))
return PARSE_OPT_LIST_SUBCMDS;
switch (parse_long_opt(ctx, arg + 2, options)) {
case -1:
return parse_options_usage(usagestr, options, arg + 2, 0);
@ -433,25 +435,45 @@ int parse_options_end(struct parse_opt_ctx_t *ctx)
return ctx->cpidx + ctx->argc;
}
int parse_options(int argc, const char **argv, const struct option *options,
const char * const usagestr[], int flags)
int parse_options_subcommand(int argc, const char **argv, const struct option *options,
const char *const subcommands[], const char *usagestr[], int flags)
{
struct parse_opt_ctx_t ctx;
perf_header__set_cmdline(argc, argv);
/* build usage string if it's not provided */
if (subcommands && !usagestr[0]) {
struct strbuf buf = STRBUF_INIT;
strbuf_addf(&buf, "perf %s [<options>] {", argv[0]);
for (int i = 0; subcommands[i]; i++) {
if (i)
strbuf_addstr(&buf, "|");
strbuf_addstr(&buf, subcommands[i]);
}
strbuf_addstr(&buf, "}");
usagestr[0] = strdup(buf.buf);
strbuf_release(&buf);
}
parse_options_start(&ctx, argc, argv, flags);
switch (parse_options_step(&ctx, options, usagestr)) {
case PARSE_OPT_HELP:
exit(129);
case PARSE_OPT_DONE:
break;
case PARSE_OPT_LIST:
case PARSE_OPT_LIST_OPTS:
while (options->type != OPTION_END) {
printf("--%s ", options->long_name);
options++;
}
exit(130);
case PARSE_OPT_LIST_SUBCMDS:
for (int i = 0; subcommands[i]; i++)
printf("%s ", subcommands[i]);
exit(130);
default: /* PARSE_OPT_UNKNOWN */
if (ctx.argv[0][1] == '-') {
error("unknown option `%s'", ctx.argv[0] + 2);
@ -464,6 +486,13 @@ int parse_options(int argc, const char **argv, const struct option *options,
return parse_options_end(&ctx);
}
int parse_options(int argc, const char **argv, const struct option *options,
const char * const usagestr[], int flags)
{
return parse_options_subcommand(argc, argv, options, NULL,
(const char **) usagestr, flags);
}
#define USAGE_OPTS_WIDTH 24
#define USAGE_GAP 2

View File

@ -140,6 +140,11 @@ extern int parse_options(int argc, const char **argv,
const struct option *options,
const char * const usagestr[], int flags);
extern int parse_options_subcommand(int argc, const char **argv,
const struct option *options,
const char *const subcommands[],
const char *usagestr[], int flags);
extern NORETURN void usage_with_options(const char * const *usagestr,
const struct option *options);
@ -148,7 +153,8 @@ extern NORETURN void usage_with_options(const char * const *usagestr,
enum {
PARSE_OPT_HELP = -1,
PARSE_OPT_DONE,
PARSE_OPT_LIST,
PARSE_OPT_LIST_OPTS,
PARSE_OPT_LIST_SUBCMDS,
PARSE_OPT_UNKNOWN,
};

View File

@ -794,7 +794,7 @@ static void dump_sample(struct perf_evsel *evsel, union perf_event *event,
if (!dump_trace)
return;
printf("(IP, %d): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
event->header.misc, sample->pid, sample->tid, sample->ip,
sample->period, sample->addr);

View File

@ -142,3 +142,24 @@ int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp)
return 0;
}
void thread__find_cpumode_addr_location(struct thread *thread,
struct machine *machine,
enum map_type type, u64 addr,
struct addr_location *al)
{
size_t i;
const u8 const cpumodes[] = {
PERF_RECORD_MISC_USER,
PERF_RECORD_MISC_KERNEL,
PERF_RECORD_MISC_GUEST_USER,
PERF_RECORD_MISC_GUEST_KERNEL
};
for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
thread__find_addr_location(thread, machine, cpumodes[i], type,
addr, al);
if (al->map)
break;
}
}

View File

@ -58,6 +58,11 @@ void thread__find_addr_location(struct thread *thread, struct machine *machine,
u8 cpumode, enum map_type type, u64 addr,
struct addr_location *al);
void thread__find_cpumode_addr_location(struct thread *thread,
struct machine *machine,
enum map_type type, u64 addr,
struct addr_location *al);
static inline void *thread__priv(struct thread *thread)
{
return thread->priv;