mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 14:24:11 +08:00
libperf: Adopt perf_mmap__read_init() from tools/perf
Move perf_mmap__read_init() from tools/perf to libperf and export it in perf/mmap.h header. And add pr_debug2()/pr_debug3() macros support, because the code is using them. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Link: http://lore.kernel.org/lkml/20191007125344.14268-11-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
parent
7728fa0cfa
commit
7c4d41824f
@ -118,7 +118,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
|
||||
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
continue;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
|
@ -760,7 +760,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
|
||||
|
||||
*mmap_time = ULLONG_MAX;
|
||||
md = &evlist->mmap[idx];
|
||||
err = perf_mmap__read_init(md);
|
||||
err = perf_mmap__read_init(&md->core);
|
||||
if (err < 0)
|
||||
return (err == -EAGAIN) ? 0 : -1;
|
||||
|
||||
|
@ -870,7 +870,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
|
||||
union perf_event *event;
|
||||
|
||||
md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
return;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
|
@ -3801,7 +3801,7 @@ again:
|
||||
struct mmap *md;
|
||||
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
continue;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
|
@ -12,6 +12,8 @@ enum libperf_print_level {
|
||||
LIBPERF_WARN,
|
||||
LIBPERF_INFO,
|
||||
LIBPERF_DEBUG,
|
||||
LIBPERF_DEBUG2,
|
||||
LIBPERF_DEBUG3,
|
||||
};
|
||||
|
||||
typedef int (*libperf_print_fn_t)(enum libperf_print_level level,
|
||||
|
@ -7,5 +7,6 @@
|
||||
struct perf_mmap;
|
||||
|
||||
LIBPERF_API void perf_mmap__consume(struct perf_mmap *map);
|
||||
LIBPERF_API int perf_mmap__read_init(struct perf_mmap *map);
|
||||
|
||||
#endif /* __LIBPERF_MMAP_H */
|
||||
|
@ -14,5 +14,7 @@ do { \
|
||||
#define pr_warning(fmt, ...) __pr(LIBPERF_WARN, fmt, ##__VA_ARGS__)
|
||||
#define pr_info(fmt, ...) __pr(LIBPERF_INFO, fmt, ##__VA_ARGS__)
|
||||
#define pr_debug(fmt, ...) __pr(LIBPERF_DEBUG, fmt, ##__VA_ARGS__)
|
||||
#define pr_debug2(fmt, ...) __pr(LIBPERF_DEBUG2, fmt, ##__VA_ARGS__)
|
||||
#define pr_debug3(fmt, ...) __pr(LIBPERF_DEBUG3, fmt, ##__VA_ARGS__)
|
||||
|
||||
#endif /* __LIBPERF_INTERNAL_H */
|
||||
|
@ -41,6 +41,7 @@ LIBPERF_0.0.1 {
|
||||
perf_evlist__set_maps;
|
||||
perf_evlist__poll;
|
||||
perf_mmap__consume;
|
||||
perf_mmap__read_init;
|
||||
local:
|
||||
*;
|
||||
};
|
||||
|
@ -1,11 +1,15 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <sys/mman.h>
|
||||
#include <inttypes.h>
|
||||
#include <asm/bug.h>
|
||||
#include <errno.h>
|
||||
#include <linux/ring_buffer.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <perf/mmap.h>
|
||||
#include <internal/mmap.h>
|
||||
#include <internal/lib.h>
|
||||
#include <linux/kernel.h>
|
||||
#include "internal.h"
|
||||
|
||||
void perf_mmap__init(struct perf_mmap *map, bool overwrite,
|
||||
libperf_unmap_cb_t unmap_cb)
|
||||
@ -91,3 +95,83 @@ void perf_mmap__consume(struct perf_mmap *map)
|
||||
if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map))
|
||||
perf_mmap__put(map);
|
||||
}
|
||||
|
||||
static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
|
||||
{
|
||||
struct perf_event_header *pheader;
|
||||
u64 evt_head = *start;
|
||||
int size = mask + 1;
|
||||
|
||||
pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start);
|
||||
pheader = (struct perf_event_header *)(buf + (*start & mask));
|
||||
while (true) {
|
||||
if (evt_head - *start >= (unsigned int)size) {
|
||||
pr_debug("Finished reading overwrite ring buffer: rewind\n");
|
||||
if (evt_head - *start > (unsigned int)size)
|
||||
evt_head -= pheader->size;
|
||||
*end = evt_head;
|
||||
return 0;
|
||||
}
|
||||
|
||||
pheader = (struct perf_event_header *)(buf + (evt_head & mask));
|
||||
|
||||
if (pheader->size == 0) {
|
||||
pr_debug("Finished reading overwrite ring buffer: get start\n");
|
||||
*end = evt_head;
|
||||
return 0;
|
||||
}
|
||||
|
||||
evt_head += pheader->size;
|
||||
pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
|
||||
}
|
||||
WARN_ONCE(1, "Shouldn't get here\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Report the start and end of the available data in ringbuffer
|
||||
*/
|
||||
static int __perf_mmap__read_init(struct perf_mmap *md)
|
||||
{
|
||||
u64 head = perf_mmap__read_head(md);
|
||||
u64 old = md->prev;
|
||||
unsigned char *data = md->base + page_size;
|
||||
unsigned long size;
|
||||
|
||||
md->start = md->overwrite ? head : old;
|
||||
md->end = md->overwrite ? old : head;
|
||||
|
||||
if ((md->end - md->start) < md->flush)
|
||||
return -EAGAIN;
|
||||
|
||||
size = md->end - md->start;
|
||||
if (size > (unsigned long)(md->mask) + 1) {
|
||||
if (!md->overwrite) {
|
||||
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
|
||||
|
||||
md->prev = head;
|
||||
perf_mmap__consume(md);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/*
|
||||
* Backward ring buffer is full. We still have a chance to read
|
||||
* most of data from it.
|
||||
*/
|
||||
if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_mmap__read_init(struct perf_mmap *map)
|
||||
{
|
||||
/*
|
||||
* Check if event was unmapped due to a POLLHUP/POLLERR.
|
||||
*/
|
||||
if (!refcount_read(&map->refcnt))
|
||||
return -ENOENT;
|
||||
|
||||
return __perf_mmap__read_init(map);
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ static int count_samples(struct evlist *evlist, int *sample_count,
|
||||
struct mmap *map = &evlist->overwrite_mmap[i];
|
||||
union perf_event *event;
|
||||
|
||||
perf_mmap__read_init(map);
|
||||
perf_mmap__read_init(&map->core);
|
||||
while ((event = perf_mmap__read_event(map)) != NULL) {
|
||||
const u32 type = event->header.type;
|
||||
|
||||
|
@ -185,7 +185,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
|
||||
struct mmap *md;
|
||||
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
continue;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
|
@ -426,7 +426,7 @@ static int process_events(struct machine *machine, struct evlist *evlist,
|
||||
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
continue;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
|
@ -39,7 +39,7 @@ static int find_comm(struct evlist *evlist, const char *comm)
|
||||
found = 0;
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
continue;
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
if (event->header.type == PERF_RECORD_COMM &&
|
||||
|
@ -114,7 +114,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
|
||||
}
|
||||
|
||||
md = &evlist->mmap[0];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
goto out_init;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
|
@ -93,7 +93,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
|
||||
struct mmap *md;
|
||||
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
continue;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
|
@ -171,7 +171,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
|
||||
struct mmap *md;
|
||||
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
continue;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
|
@ -100,7 +100,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
|
||||
evlist__disable(evlist);
|
||||
|
||||
md = &evlist->mmap[0];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
goto out_init;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
|
@ -270,7 +270,7 @@ static int process_events(struct evlist *evlist,
|
||||
|
||||
for (i = 0; i < evlist->core.nr_mmaps; i++) {
|
||||
md = &evlist->mmap[i];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
continue;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
|
@ -118,7 +118,7 @@ int test__task_exit(struct test *test __maybe_unused, int subtest __maybe_unused
|
||||
|
||||
retry:
|
||||
md = &evlist->mmap[0];
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
goto out_init;
|
||||
|
||||
while ((event = perf_mmap__read_event(md)) != NULL) {
|
||||
|
@ -1809,7 +1809,7 @@ static void *perf_evlist__poll_thread(void *arg)
|
||||
struct mmap *map = &evlist->mmap[i];
|
||||
union perf_event *event;
|
||||
|
||||
if (perf_mmap__read_init(map))
|
||||
if (perf_mmap__read_init(&map->core))
|
||||
continue;
|
||||
while ((event = perf_mmap__read_event(map)) != NULL) {
|
||||
struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
|
||||
|
@ -365,86 +365,6 @@ int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, int cpu)
|
||||
return perf_mmap__aio_mmap(map, mp);
|
||||
}
|
||||
|
||||
static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end)
|
||||
{
|
||||
struct perf_event_header *pheader;
|
||||
u64 evt_head = *start;
|
||||
int size = mask + 1;
|
||||
|
||||
pr_debug2("%s: buf=%p, start=%"PRIx64"\n", __func__, buf, *start);
|
||||
pheader = (struct perf_event_header *)(buf + (*start & mask));
|
||||
while (true) {
|
||||
if (evt_head - *start >= (unsigned int)size) {
|
||||
pr_debug("Finished reading overwrite ring buffer: rewind\n");
|
||||
if (evt_head - *start > (unsigned int)size)
|
||||
evt_head -= pheader->size;
|
||||
*end = evt_head;
|
||||
return 0;
|
||||
}
|
||||
|
||||
pheader = (struct perf_event_header *)(buf + (evt_head & mask));
|
||||
|
||||
if (pheader->size == 0) {
|
||||
pr_debug("Finished reading overwrite ring buffer: get start\n");
|
||||
*end = evt_head;
|
||||
return 0;
|
||||
}
|
||||
|
||||
evt_head += pheader->size;
|
||||
pr_debug3("move evt_head: %"PRIx64"\n", evt_head);
|
||||
}
|
||||
WARN_ONCE(1, "Shouldn't get here\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Report the start and end of the available data in ringbuffer
|
||||
*/
|
||||
static int __perf_mmap__read_init(struct mmap *md)
|
||||
{
|
||||
u64 head = perf_mmap__read_head(&md->core);
|
||||
u64 old = md->core.prev;
|
||||
unsigned char *data = md->core.base + page_size;
|
||||
unsigned long size;
|
||||
|
||||
md->core.start = md->core.overwrite ? head : old;
|
||||
md->core.end = md->core.overwrite ? old : head;
|
||||
|
||||
if ((md->core.end - md->core.start) < md->core.flush)
|
||||
return -EAGAIN;
|
||||
|
||||
size = md->core.end - md->core.start;
|
||||
if (size > (unsigned long)(md->core.mask) + 1) {
|
||||
if (!md->core.overwrite) {
|
||||
WARN_ONCE(1, "failed to keep up with mmap data. (warn only once)\n");
|
||||
|
||||
md->core.prev = head;
|
||||
perf_mmap__consume(&md->core);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
/*
|
||||
* Backward ring buffer is full. We still have a chance to read
|
||||
* most of data from it.
|
||||
*/
|
||||
if (overwrite_rb_find_range(data, md->core.mask, &md->core.start, &md->core.end))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int perf_mmap__read_init(struct mmap *map)
|
||||
{
|
||||
/*
|
||||
* Check if event was unmapped due to a POLLHUP/POLLERR.
|
||||
*/
|
||||
if (!refcount_read(&map->core.refcnt))
|
||||
return -ENOENT;
|
||||
|
||||
return __perf_mmap__read_init(map);
|
||||
}
|
||||
|
||||
int perf_mmap__push(struct mmap *md, void *to,
|
||||
int push(struct mmap *map, void *to, void *buf, size_t size))
|
||||
{
|
||||
@ -454,7 +374,7 @@ int perf_mmap__push(struct mmap *md, void *to,
|
||||
void *buf;
|
||||
int rc = 0;
|
||||
|
||||
rc = perf_mmap__read_init(md);
|
||||
rc = perf_mmap__read_init(&md->core);
|
||||
if (rc < 0)
|
||||
return (rc == -EAGAIN) ? 1 : -1;
|
||||
|
||||
|
@ -54,6 +54,5 @@ int perf_mmap__push(struct mmap *md, void *to,
|
||||
|
||||
size_t mmap__mmap_len(struct mmap *map);
|
||||
|
||||
int perf_mmap__read_init(struct mmap *md);
|
||||
void perf_mmap__read_done(struct mmap *map);
|
||||
#endif /*__PERF_MMAP_H */
|
||||
|
@ -1023,7 +1023,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist,
|
||||
if (!md)
|
||||
return NULL;
|
||||
|
||||
if (perf_mmap__read_init(md) < 0)
|
||||
if (perf_mmap__read_init(&md->core) < 0)
|
||||
goto end;
|
||||
|
||||
event = perf_mmap__read_event(md);
|
||||
|
Loading…
Reference in New Issue
Block a user