2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-11-29 04:54:49 +08:00

samples/bpf: move common-purpose trace functions to selftests

There is no functionality change in this patch. The common-purpose
trace functions, including perf_event polling and ksym lookup,
are moved from trace_output_user.c and bpf_load.c to
selftests/bpf/trace_helpers.c so that these function can
be reused later in selftests.

Acked-by: Alexei Starovoitov <ast@fb.com>
Signed-off-by: Yonghong Song <yhs@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
Yonghong Song 2018-04-28 22:28:13 -07:00 committed by Alexei Starovoitov
parent de2ff05f48
commit 28dbf861de
10 changed files with 223 additions and 175 deletions

View File

@ -49,6 +49,7 @@ hostprogs-y += xdp_adjust_tail
# Libbpf dependencies
LIBBPF := ../../tools/lib/bpf/bpf.o ../../tools/lib/bpf/nlattr.o
CGROUP_HELPERS := ../../tools/testing/selftests/bpf/cgroup_helpers.o
TRACE_HELPERS := ../../tools/testing/selftests/bpf/trace_helpers.o
test_lru_dist-objs := test_lru_dist.o $(LIBBPF)
sock_example-objs := sock_example.o $(LIBBPF)
@ -65,10 +66,10 @@ tracex6-objs := bpf_load.o $(LIBBPF) tracex6_user.o
tracex7-objs := bpf_load.o $(LIBBPF) tracex7_user.o
load_sock_ops-objs := bpf_load.o $(LIBBPF) load_sock_ops.o
test_probe_write_user-objs := bpf_load.o $(LIBBPF) test_probe_write_user_user.o
trace_output-objs := bpf_load.o $(LIBBPF) trace_output_user.o
trace_output-objs := bpf_load.o $(LIBBPF) trace_output_user.o $(TRACE_HELPERS)
lathist-objs := bpf_load.o $(LIBBPF) lathist_user.o
offwaketime-objs := bpf_load.o $(LIBBPF) offwaketime_user.o
spintest-objs := bpf_load.o $(LIBBPF) spintest_user.o
offwaketime-objs := bpf_load.o $(LIBBPF) offwaketime_user.o $(TRACE_HELPERS)
spintest-objs := bpf_load.o $(LIBBPF) spintest_user.o $(TRACE_HELPERS)
map_perf_test-objs := bpf_load.o $(LIBBPF) map_perf_test_user.o
test_overhead-objs := bpf_load.o $(LIBBPF) test_overhead_user.o
test_cgrp2_array_pin-objs := $(LIBBPF) test_cgrp2_array_pin.o
@ -82,8 +83,8 @@ xdp2-objs := bpf_load.o $(LIBBPF) xdp1_user.o
xdp_router_ipv4-objs := bpf_load.o $(LIBBPF) xdp_router_ipv4_user.o
test_current_task_under_cgroup-objs := bpf_load.o $(LIBBPF) $(CGROUP_HELPERS) \
test_current_task_under_cgroup_user.o
trace_event-objs := bpf_load.o $(LIBBPF) trace_event_user.o
sampleip-objs := bpf_load.o $(LIBBPF) sampleip_user.o
trace_event-objs := bpf_load.o $(LIBBPF) trace_event_user.o $(TRACE_HELPERS)
sampleip-objs := bpf_load.o $(LIBBPF) sampleip_user.o $(TRACE_HELPERS)
tc_l2_redirect-objs := bpf_load.o $(LIBBPF) tc_l2_redirect_user.o
lwt_len_hist-objs := bpf_load.o $(LIBBPF) lwt_len_hist_user.o
xdp_tx_iptunnel-objs := bpf_load.o $(LIBBPF) xdp_tx_iptunnel_user.o

View File

@ -648,66 +648,3 @@ void read_trace_pipe(void)
}
}
}
#define MAX_SYMS 300000
static struct ksym syms[MAX_SYMS];
static int sym_cnt;
static int ksym_cmp(const void *p1, const void *p2)
{
return ((struct ksym *)p1)->addr - ((struct ksym *)p2)->addr;
}
int load_kallsyms(void)
{
FILE *f = fopen("/proc/kallsyms", "r");
char func[256], buf[256];
char symbol;
void *addr;
int i = 0;
if (!f)
return -ENOENT;
while (!feof(f)) {
if (!fgets(buf, sizeof(buf), f))
break;
if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3)
break;
if (!addr)
continue;
syms[i].addr = (long) addr;
syms[i].name = strdup(func);
i++;
}
sym_cnt = i;
qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp);
return 0;
}
struct ksym *ksym_search(long key)
{
int start = 0, end = sym_cnt;
int result;
while (start < end) {
size_t mid = start + (end - start) / 2;
result = key - syms[mid].addr;
if (result < 0)
end = mid;
else if (result > 0)
start = mid + 1;
else
return &syms[mid];
}
if (start >= 1 && syms[start - 1].addr < key &&
key < syms[start].addr)
/* valid ksym */
return &syms[start - 1];
/* out of range. return _stext */
return &syms[0];
}

View File

@ -54,12 +54,5 @@ int load_bpf_file(char *path);
int load_bpf_file_fixup_map(const char *path, fixup_map_cb fixup_map);
void read_trace_pipe(void);
struct ksym {
long addr;
char *name;
};
int load_kallsyms(void);
struct ksym *ksym_search(long key);
int bpf_set_link_xdp_fd(int ifindex, int fd, __u32 flags);
#endif

View File

@ -17,6 +17,7 @@
#include <sys/resource.h>
#include "libbpf.h"
#include "bpf_load.h"
#include "trace_helpers.h"
#define PRINT_RAW_ADDR 0

View File

@ -22,6 +22,7 @@
#include "libbpf.h"
#include "bpf_load.h"
#include "perf-sys.h"
#include "trace_helpers.h"
#define DEFAULT_FREQ 99
#define DEFAULT_SECS 5

View File

@ -7,6 +7,7 @@
#include <sys/resource.h>
#include "libbpf.h"
#include "bpf_load.h"
#include "trace_helpers.h"
int main(int ac, char **argv)
{

View File

@ -21,6 +21,7 @@
#include "libbpf.h"
#include "bpf_load.h"
#include "perf-sys.h"
#include "trace_helpers.h"
#define SAMPLE_FREQ 50

View File

@ -21,100 +21,10 @@
#include "libbpf.h"
#include "bpf_load.h"
#include "perf-sys.h"
#include "trace_helpers.h"
static int pmu_fd;
int page_size;
int page_cnt = 8;
volatile struct perf_event_mmap_page *header;
typedef void (*print_fn)(void *data, int size);
static int perf_event_mmap(int fd)
{
void *base;
int mmap_size;
page_size = getpagesize();
mmap_size = page_size * (page_cnt + 1);
base = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (base == MAP_FAILED) {
printf("mmap err\n");
return -1;
}
header = base;
return 0;
}
static int perf_event_poll(int fd)
{
struct pollfd pfd = { .fd = fd, .events = POLLIN };
return poll(&pfd, 1, 1000);
}
struct perf_event_sample {
struct perf_event_header header;
__u32 size;
char data[];
};
static void perf_event_read(print_fn fn)
{
__u64 data_tail = header->data_tail;
__u64 data_head = header->data_head;
__u64 buffer_size = page_cnt * page_size;
void *base, *begin, *end;
char buf[256];
asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
if (data_head == data_tail)
return;
base = ((char *)header) + page_size;
begin = base + data_tail % buffer_size;
end = base + data_head % buffer_size;
while (begin != end) {
struct perf_event_sample *e;
e = begin;
if (begin + e->header.size > base + buffer_size) {
long len = base + buffer_size - begin;
assert(len < e->header.size);
memcpy(buf, begin, len);
memcpy(buf + len, base, e->header.size - len);
e = (void *) buf;
begin = base + e->header.size - len;
} else if (begin + e->header.size == base + buffer_size) {
begin = base;
} else {
begin += e->header.size;
}
if (e->header.type == PERF_RECORD_SAMPLE) {
fn(e->data, e->size);
} else if (e->header.type == PERF_RECORD_LOST) {
struct {
struct perf_event_header header;
__u64 id;
__u64 lost;
} *lost = (void *) e;
printf("lost %lld events\n", lost->lost);
} else {
printf("unknown event type=%d size=%d\n",
e->header.type, e->header.size);
}
}
__sync_synchronize(); /* smp_mb() */
header->data_tail = data_head;
}
static __u64 time_get_ns(void)
{
struct timespec ts;
@ -127,7 +37,7 @@ static __u64 start_time;
#define MAX_CNT 100000ll
static void print_bpf_output(void *data, int size)
static int print_bpf_output(void *data, int size)
{
static __u64 cnt;
struct {
@ -138,7 +48,7 @@ static void print_bpf_output(void *data, int size)
if (e->cookie != 0x12345678) {
printf("BUG pid %llx cookie %llx sized %d\n",
e->pid, e->cookie, size);
kill(0, SIGINT);
return PERF_EVENT_ERROR;
}
cnt++;
@ -146,8 +56,10 @@ static void print_bpf_output(void *data, int size)
if (cnt == MAX_CNT) {
printf("recv %lld events per sec\n",
MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
kill(0, SIGINT);
return PERF_EVENT_DONE;
}
return PERF_EVENT_CONT;
}
static void test_bpf_perf_event(void)
@ -170,6 +82,7 @@ int main(int argc, char **argv)
{
char filename[256];
FILE *f;
int ret;
snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
@ -187,10 +100,7 @@ int main(int argc, char **argv)
(void) f;
start_time = time_get_ns();
for (;;) {
perf_event_poll(pmu_fd);
perf_event_read(print_bpf_output);
}
return 0;
ret = perf_event_poller(pmu_fd, print_bpf_output);
kill(0, SIGINT);
return ret;
}

View File

@ -0,0 +1,180 @@
// SPDX-License-Identifier: GPL-2.0
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include <poll.h>
#include <unistd.h>
#include <linux/perf_event.h>
#include <sys/mman.h>
#include "trace_helpers.h"
#define MAX_SYMS 300000
static struct ksym syms[MAX_SYMS];
static int sym_cnt;
static int ksym_cmp(const void *p1, const void *p2)
{
return ((struct ksym *)p1)->addr - ((struct ksym *)p2)->addr;
}
int load_kallsyms(void)
{
FILE *f = fopen("/proc/kallsyms", "r");
char func[256], buf[256];
char symbol;
void *addr;
int i = 0;
if (!f)
return -ENOENT;
while (!feof(f)) {
if (!fgets(buf, sizeof(buf), f))
break;
if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3)
break;
if (!addr)
continue;
syms[i].addr = (long) addr;
syms[i].name = strdup(func);
i++;
}
sym_cnt = i;
qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp);
return 0;
}
struct ksym *ksym_search(long key)
{
int start = 0, end = sym_cnt;
int result;
while (start < end) {
size_t mid = start + (end - start) / 2;
result = key - syms[mid].addr;
if (result < 0)
end = mid;
else if (result > 0)
start = mid + 1;
else
return &syms[mid];
}
if (start >= 1 && syms[start - 1].addr < key &&
key < syms[start].addr)
/* valid ksym */
return &syms[start - 1];
/* out of range. return _stext */
return &syms[0];
}
static int page_size;
static int page_cnt = 8;
static volatile struct perf_event_mmap_page *header;
int perf_event_mmap(int fd)
{
void *base;
int mmap_size;
page_size = getpagesize();
mmap_size = page_size * (page_cnt + 1);
base = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
if (base == MAP_FAILED) {
printf("mmap err\n");
return -1;
}
header = base;
return 0;
}
static int perf_event_poll(int fd)
{
struct pollfd pfd = { .fd = fd, .events = POLLIN };
return poll(&pfd, 1, 1000);
}
struct perf_event_sample {
struct perf_event_header header;
__u32 size;
char data[];
};
static int perf_event_read(perf_event_print_fn fn)
{
__u64 data_tail = header->data_tail;
__u64 data_head = header->data_head;
__u64 buffer_size = page_cnt * page_size;
void *base, *begin, *end;
char buf[256];
int ret;
asm volatile("" ::: "memory"); /* in real code it should be smp_rmb() */
if (data_head == data_tail)
return PERF_EVENT_CONT;
base = ((char *)header) + page_size;
begin = base + data_tail % buffer_size;
end = base + data_head % buffer_size;
while (begin != end) {
struct perf_event_sample *e;
e = begin;
if (begin + e->header.size > base + buffer_size) {
long len = base + buffer_size - begin;
assert(len < e->header.size);
memcpy(buf, begin, len);
memcpy(buf + len, base, e->header.size - len);
e = (void *) buf;
begin = base + e->header.size - len;
} else if (begin + e->header.size == base + buffer_size) {
begin = base;
} else {
begin += e->header.size;
}
if (e->header.type == PERF_RECORD_SAMPLE) {
ret = fn(e->data, e->size);
if (ret != PERF_EVENT_CONT)
return ret;
} else if (e->header.type == PERF_RECORD_LOST) {
struct {
struct perf_event_header header;
__u64 id;
__u64 lost;
} *lost = (void *) e;
printf("lost %lld events\n", lost->lost);
} else {
printf("unknown event type=%d size=%d\n",
e->header.type, e->header.size);
}
}
__sync_synchronize(); /* smp_mb() */
header->data_tail = data_head;
return PERF_EVENT_CONT;
}
int perf_event_poller(int fd, perf_event_print_fn output_fn)
{
int ret;
for (;;) {
perf_event_poll(fd);
ret = perf_event_read(output_fn);
if (ret != PERF_EVENT_CONT)
return ret;
}
return PERF_EVENT_DONE;
}

View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __TRACE_HELPER_H
#define __TRACE_HELPER_H
struct ksym {
long addr;
char *name;
};
int load_kallsyms(void);
struct ksym *ksym_search(long key);
typedef int (*perf_event_print_fn)(void *data, int size);
/* return code for perf_event_print_fn */
#define PERF_EVENT_DONE 0
#define PERF_EVENT_ERROR -1
#define PERF_EVENT_CONT -2
int perf_event_mmap(int fd);
/* return PERF_EVENT_DONE or PERF_EVENT_ERROR */
int perf_event_poller(int fd, perf_event_print_fn output_fn);
#endif