mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-15 15:04:27 +08:00
3677d0a131
For the problem of increasing fragmentation of the bpf loader programs, instead of using bpf_loader.o, which is used in samples/bpf, this commit refactors the existing kprobe tracing programs with libbbpf bpf loader. - For kprobe events pointing to system calls, the SYSCALL() macro in trace_common.h was used. - Adding a kprobe event and attaching a bpf program to it was done through bpf_program_attach(). - Instead of using the existing BPF MAP definition, MAP definition has been refactored with the new BTF-defined MAP format. Signed-off-by: Daniel T. Lee <danieltimlee@gmail.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200823085334.9413-3-danieltimlee@gmail.com
100 lines
2.1 KiB
C
100 lines
2.1 KiB
C
/* Copyright (c) 2013-2015 PLUMgrid, http://plumgrid.com
|
|
* Copyright (c) 2015 BMW Car IT GmbH
|
|
*
|
|
* This program is free software; you can redistribute it and/or
|
|
* modify it under the terms of version 2 of the GNU General Public
|
|
* License as published by the Free Software Foundation.
|
|
*/
|
|
#include <linux/version.h>
|
|
#include <linux/ptrace.h>
|
|
#include <uapi/linux/bpf.h>
|
|
#include <bpf/bpf_helpers.h>
|
|
|
|
#define MAX_ENTRIES 20
|
|
#define MAX_CPU 4
|
|
|
|
/* We need to stick to static allocated memory (an array instead of
|
|
* hash table) because managing dynamic memory from the
|
|
* trace_preempt_[on|off] tracepoints hooks is not supported.
|
|
*/
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_ARRAY);
|
|
__type(key, int);
|
|
__type(value, u64);
|
|
__uint(max_entries, MAX_CPU);
|
|
} my_map SEC(".maps");
|
|
|
|
SEC("kprobe/trace_preempt_off")
|
|
int bpf_prog1(struct pt_regs *ctx)
|
|
{
|
|
int cpu = bpf_get_smp_processor_id();
|
|
u64 *ts = bpf_map_lookup_elem(&my_map, &cpu);
|
|
|
|
if (ts)
|
|
*ts = bpf_ktime_get_ns();
|
|
|
|
return 0;
|
|
}
|
|
|
|
static unsigned int log2(unsigned int v)
|
|
{
|
|
unsigned int r;
|
|
unsigned int shift;
|
|
|
|
r = (v > 0xFFFF) << 4; v >>= r;
|
|
shift = (v > 0xFF) << 3; v >>= shift; r |= shift;
|
|
shift = (v > 0xF) << 2; v >>= shift; r |= shift;
|
|
shift = (v > 0x3) << 1; v >>= shift; r |= shift;
|
|
r |= (v >> 1);
|
|
|
|
return r;
|
|
}
|
|
|
|
static unsigned int log2l(unsigned long v)
|
|
{
|
|
unsigned int hi = v >> 32;
|
|
|
|
if (hi)
|
|
return log2(hi) + 32;
|
|
else
|
|
return log2(v);
|
|
}
|
|
|
|
struct {
|
|
__uint(type, BPF_MAP_TYPE_ARRAY);
|
|
__type(key, int);
|
|
__type(value, long);
|
|
__uint(max_entries, MAX_CPU * MAX_ENTRIES);
|
|
} my_lat SEC(".maps");
|
|
|
|
SEC("kprobe/trace_preempt_on")
|
|
int bpf_prog2(struct pt_regs *ctx)
|
|
{
|
|
u64 *ts, cur_ts, delta;
|
|
int key, cpu;
|
|
long *val;
|
|
|
|
cpu = bpf_get_smp_processor_id();
|
|
ts = bpf_map_lookup_elem(&my_map, &cpu);
|
|
if (!ts)
|
|
return 0;
|
|
|
|
cur_ts = bpf_ktime_get_ns();
|
|
delta = log2l(cur_ts - *ts);
|
|
|
|
if (delta > MAX_ENTRIES - 1)
|
|
delta = MAX_ENTRIES - 1;
|
|
|
|
key = cpu * MAX_ENTRIES + delta;
|
|
val = bpf_map_lookup_elem(&my_lat, &key);
|
|
if (val)
|
|
__sync_fetch_and_add((long *)val, 1);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
char _license[] SEC("license") = "GPL";
|
|
u32 _version SEC("version") = LINUX_VERSION_CODE;
|