2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-18 10:13:57 +08:00

libperf: Adopt perf_mmap__read_done() from tools/perf

Move perf_mmap__read_init() from tools/perf to libperf and export it in
the perf/mmap.h header.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lore.kernel.org/lkml/20191007125344.14268-12-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
This commit is contained in:
Jiri Olsa 2019-10-07 14:53:19 +02:00 committed by Arnaldo Carvalho de Melo
parent 7c4d41824f
commit 32fdc2ca7e
20 changed files with 34 additions and 33 deletions

View File

@ -142,7 +142,7 @@ int test__perf_time_to_tsc(struct test *test __maybe_unused, int subtest __maybe
next_event:
perf_mmap__consume(&md->core);
}
perf_mmap__read_done(md);
perf_mmap__read_done(&md->core);
}
if (!comm1_time || !comm2_time)

View File

@ -794,7 +794,7 @@ static s64 perf_kvm__mmap_read_idx(struct perf_kvm_stat *kvm, int idx,
break;
}
perf_mmap__read_done(md);
perf_mmap__read_done(&md->core);
return n;
}

View File

@ -894,7 +894,7 @@ static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
}
}
perf_mmap__read_done(md);
perf_mmap__read_done(&md->core);
}
static void perf_top__mmap_read(struct perf_top *top)

View File

@ -3821,7 +3821,7 @@ again:
draining = true;
}
}
perf_mmap__read_done(md);
perf_mmap__read_done(&md->core);
}
if (trace->nr_events == before) {

View File

@ -8,5 +8,6 @@ struct perf_mmap;
LIBPERF_API void perf_mmap__consume(struct perf_mmap *map);
LIBPERF_API int perf_mmap__read_init(struct perf_mmap *map);
LIBPERF_API void perf_mmap__read_done(struct perf_mmap *map);
#endif /* __LIBPERF_MMAP_H */

View File

@ -42,6 +42,7 @@ LIBPERF_0.0.1 {
perf_evlist__poll;
perf_mmap__consume;
perf_mmap__read_init;
perf_mmap__read_done;
local:
*;
};

View File

@ -175,3 +175,20 @@ int perf_mmap__read_init(struct perf_mmap *map)
return __perf_mmap__read_init(map);
}
/*
* Mandatory for overwrite mode
* The direction of overwrite mode is backward.
* The last perf_mmap__read() will set tail to map->core.prev.
* Need to correct the map->core.prev to head which is the end of next read.
*/
void perf_mmap__read_done(struct perf_mmap *map)
{
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
if (!refcount_read(&map->refcnt))
return;
map->prev = perf_mmap__read_head(map);
}

View File

@ -54,7 +54,7 @@ static int count_samples(struct evlist *evlist, int *sample_count,
return TEST_FAIL;
}
}
perf_mmap__read_done(map);
perf_mmap__read_done(&map->core);
}
return TEST_OK;
}

View File

@ -194,7 +194,7 @@ static int do_test(struct bpf_object *obj, int (*func)(void),
if (type == PERF_RECORD_SAMPLE)
count ++;
}
perf_mmap__read_done(md);
perf_mmap__read_done(&md->core);
}
if (count != expect) {

View File

@ -435,7 +435,7 @@ static int process_events(struct machine *machine, struct evlist *evlist,
if (ret < 0)
return ret;
}
perf_mmap__read_done(md);
perf_mmap__read_done(&md->core);
}
return 0;
}

View File

@ -49,7 +49,7 @@ static int find_comm(struct evlist *evlist, const char *comm)
found += 1;
perf_mmap__consume(&md->core);
}
perf_mmap__read_done(md);
perf_mmap__read_done(&md->core);
}
return found;
}

View File

@ -142,7 +142,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse
nr_events[evsel->idx]++;
perf_mmap__consume(&md->core);
}
perf_mmap__read_done(md);
perf_mmap__read_done(&md->core);
out_init:
err = 0;

View File

@ -124,7 +124,7 @@ int test__syscall_openat_tp_fields(struct test *test __maybe_unused, int subtest
goto out_ok;
}
perf_mmap__read_done(md);
perf_mmap__read_done(&md->core);
}
if (nr_events == before)

View File

@ -279,7 +279,7 @@ int test__PERF_RECORD(struct test *test __maybe_unused, int subtest __maybe_unus
perf_mmap__consume(&md->core);
}
perf_mmap__read_done(md);
perf_mmap__read_done(&md->core);
}
/*

View File

@ -120,7 +120,7 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
next_event:
perf_mmap__consume(&md->core);
}
perf_mmap__read_done(md);
perf_mmap__read_done(&md->core);
out_init:
if ((u64) nr_samples == total_periods) {

View File

@ -280,7 +280,7 @@ static int process_events(struct evlist *evlist,
if (ret < 0)
goto out_free_nodes;
}
perf_mmap__read_done(md);
perf_mmap__read_done(&md->core);
}
events_array = calloc(cnt, sizeof(struct event_node));

View File

@ -127,7 +127,7 @@ retry:
perf_mmap__consume(&md->core);
}
perf_mmap__read_done(md);
perf_mmap__read_done(&md->core);
out_init:
if (!exited || !nr_exit) {

View File

@ -1822,7 +1822,7 @@ static void *perf_evlist__poll_thread(void *arg)
perf_mmap__consume(&map->core);
got_data = true;
}
perf_mmap__read_done(map);
perf_mmap__read_done(&map->core);
}
if (draining && !got_data)

View File

@ -405,20 +405,3 @@ int perf_mmap__push(struct mmap *md, void *to,
out:
return rc;
}
/*
* Mandatory for overwrite mode
* The direction of overwrite mode is backward.
* The last perf_mmap__read() will set tail to map->core.prev.
* Need to correct the map->core.prev to head which is the end of next read.
*/
void perf_mmap__read_done(struct mmap *map)
{
/*
* Check if event was unmapped due to a POLLHUP/POLLERR.
*/
if (!refcount_read(&map->core.refcnt))
return;
map->core.prev = perf_mmap__read_head(&map->core);
}

View File

@ -54,5 +54,4 @@ int perf_mmap__push(struct mmap *md, void *to,
size_t mmap__mmap_len(struct mmap *map);
void perf_mmap__read_done(struct mmap *map);
#endif /*__PERF_MMAP_H */