mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 08:14:15 +08:00
bpf: decouple stack_map_get_build_id_offset() from perf_callchain_entry
Change stack_map_get_build_id_offset() which is used to convert stack trace IP addresses into build ID+offset pairs. Right now this function accepts an array of u64s as an input, and uses array of struct bpf_stack_build_id as an output. This is problematic because u64 array is coming from perf_callchain_entry, which is (non-sleepable) RCU protected, so once we allows sleepable build ID fetching, this all breaks down. But its actually pretty easy to make stack_map_get_build_id_offset() works with array of struct bpf_stack_build_id as both input and output. Which is what this patch is doing, eliminating the dependency on perf_callchain_entry. We require caller to fill out bpf_stack_build_id.ip fields (all other can be left uninitialized), and update in place as we do build ID resolution. We make sure to READ_ONCE() and cache locally current IP value as we used it in a few places to find matching VMA and so on. Given this data is directly accessible and modifiable by user's BPF code, we should make sure to have a consistent view of it. Reviewed-by: Eduard Zingerman <eddyz87@gmail.com> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/r/20240829174232.3133883-9-andrii@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
This commit is contained in:
parent
cdbb44f9a7
commit
4f4c4fc015
@ -124,8 +124,18 @@ free_smap:
|
||||
return ERR_PTR(err);
|
||||
}
|
||||
|
||||
/*
|
||||
* Expects all id_offs[i].ip values to be set to correct initial IPs.
|
||||
* They will be subsequently:
|
||||
* - either adjusted in place to a file offset, if build ID fetching
|
||||
* succeeds; in this case id_offs[i].build_id is set to correct build ID,
|
||||
* and id_offs[i].status is set to BPF_STACK_BUILD_ID_VALID;
|
||||
* - or IP will be kept intact, if build ID fetching failed; in this case
|
||||
* id_offs[i].build_id is zeroed out and id_offs[i].status is set to
|
||||
* BPF_STACK_BUILD_ID_IP.
|
||||
*/
|
||||
static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
|
||||
u64 *ips, u32 trace_nr, bool user)
|
||||
u32 trace_nr, bool user)
|
||||
{
|
||||
int i;
|
||||
struct mmap_unlock_irq_work *work = NULL;
|
||||
@ -142,30 +152,28 @@ static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs,
|
||||
/* cannot access current->mm, fall back to ips */
|
||||
for (i = 0; i < trace_nr; i++) {
|
||||
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
|
||||
id_offs[i].ip = ips[i];
|
||||
memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
for (i = 0; i < trace_nr; i++) {
|
||||
if (range_in_vma(prev_vma, ips[i], ips[i])) {
|
||||
u64 ip = READ_ONCE(id_offs[i].ip);
|
||||
|
||||
if (range_in_vma(prev_vma, ip, ip)) {
|
||||
vma = prev_vma;
|
||||
memcpy(id_offs[i].build_id, prev_build_id,
|
||||
BUILD_ID_SIZE_MAX);
|
||||
memcpy(id_offs[i].build_id, prev_build_id, BUILD_ID_SIZE_MAX);
|
||||
goto build_id_valid;
|
||||
}
|
||||
vma = find_vma(current->mm, ips[i]);
|
||||
vma = find_vma(current->mm, ip);
|
||||
if (!vma || build_id_parse_nofault(vma, id_offs[i].build_id, NULL)) {
|
||||
/* per entry fall back to ips */
|
||||
id_offs[i].status = BPF_STACK_BUILD_ID_IP;
|
||||
id_offs[i].ip = ips[i];
|
||||
memset(id_offs[i].build_id, 0, BUILD_ID_SIZE_MAX);
|
||||
continue;
|
||||
}
|
||||
build_id_valid:
|
||||
id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ips[i]
|
||||
- vma->vm_start;
|
||||
id_offs[i].offset = (vma->vm_pgoff << PAGE_SHIFT) + ip - vma->vm_start;
|
||||
id_offs[i].status = BPF_STACK_BUILD_ID_VALID;
|
||||
prev_vma = vma;
|
||||
prev_build_id = id_offs[i].build_id;
|
||||
@ -216,7 +224,7 @@ static long __bpf_get_stackid(struct bpf_map *map,
|
||||
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
|
||||
struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
|
||||
u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
|
||||
u32 hash, id, trace_nr, trace_len;
|
||||
u32 hash, id, trace_nr, trace_len, i;
|
||||
bool user = flags & BPF_F_USER_STACK;
|
||||
u64 *ips;
|
||||
bool hash_matches;
|
||||
@ -238,15 +246,18 @@ static long __bpf_get_stackid(struct bpf_map *map,
|
||||
return id;
|
||||
|
||||
if (stack_map_use_build_id(map)) {
|
||||
struct bpf_stack_build_id *id_offs;
|
||||
|
||||
/* for build_id+offset, pop a bucket before slow cmp */
|
||||
new_bucket = (struct stack_map_bucket *)
|
||||
pcpu_freelist_pop(&smap->freelist);
|
||||
if (unlikely(!new_bucket))
|
||||
return -ENOMEM;
|
||||
new_bucket->nr = trace_nr;
|
||||
stack_map_get_build_id_offset(
|
||||
(struct bpf_stack_build_id *)new_bucket->data,
|
||||
ips, trace_nr, user);
|
||||
id_offs = (struct bpf_stack_build_id *)new_bucket->data;
|
||||
for (i = 0; i < trace_nr; i++)
|
||||
id_offs[i].ip = ips[i];
|
||||
stack_map_get_build_id_offset(id_offs, trace_nr, user);
|
||||
trace_len = trace_nr * sizeof(struct bpf_stack_build_id);
|
||||
if (hash_matches && bucket->nr == trace_nr &&
|
||||
memcmp(bucket->data, new_bucket->data, trace_len) == 0) {
|
||||
@ -445,10 +456,16 @@ static long __bpf_get_stack(struct pt_regs *regs, struct task_struct *task,
|
||||
copy_len = trace_nr * elem_size;
|
||||
|
||||
ips = trace->ip + skip;
|
||||
if (user && user_build_id)
|
||||
stack_map_get_build_id_offset(buf, ips, trace_nr, user);
|
||||
else
|
||||
if (user && user_build_id) {
|
||||
struct bpf_stack_build_id *id_offs = buf;
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < trace_nr; i++)
|
||||
id_offs[i].ip = ips[i];
|
||||
stack_map_get_build_id_offset(buf, trace_nr, user);
|
||||
} else {
|
||||
memcpy(buf, ips, copy_len);
|
||||
}
|
||||
|
||||
if (size > copy_len)
|
||||
memset(buf + copy_len, 0, size - copy_len);
|
||||
|
Loading…
Reference in New Issue
Block a user