linux/tools/bpf/bpftool/common.c

1111 lines
23 KiB
C
Raw Normal View History

// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
#ifndef _GNU_SOURCE
#define _GNU_SOURCE
#endif
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <ftw.h>
#include <libgen.h>
tools: bpftool: show filenames of pinned objects Added support to show filenames of pinned objects. For example: root@test# ./bpftool prog 3: tracepoint name tracepoint__irq tag f677a7dd722299a3 loaded_at Oct 26/11:39 uid 0 xlated 160B not jited memlock 4096B map_ids 4 pinned /sys/fs/bpf/softirq_prog 4: tracepoint name tracepoint__irq tag ea5dc530d00b92b6 loaded_at Oct 26/11:39 uid 0 xlated 392B not jited memlock 4096B map_ids 4,6 root@test# ./bpftool --json --pretty prog [{ "id": 3, "type": "tracepoint", "name": "tracepoint__irq", "tag": "f677a7dd722299a3", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 160, "jited": false, "bytes_memlock": 4096, "map_ids": [4 ], "pinned": ["/sys/fs/bpf/softirq_prog" ] },{ "id": 4, "type": "tracepoint", "name": "tracepoint__irq", "tag": "ea5dc530d00b92b6", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 392, "jited": false, "bytes_memlock": 4096, "map_ids": [4,6 ], "pinned": [] } ] root@test# ./bpftool map 4: hash name start flags 0x0 key 4B value 16B max_entries 10240 memlock 1003520B pinned /sys/fs/bpf/softirq_map1 5: hash name iptr flags 0x0 key 4B value 8B max_entries 10240 memlock 921600B root@test# ./bpftool --json --pretty map [{ "id": 4, "type": "hash", "name": "start", "flags": 0, "bytes_key": 4, "bytes_value": 16, "max_entries": 10240, "bytes_memlock": 1003520, "pinned": ["/sys/fs/bpf/softirq_map1" ] },{ "id": 5, "type": "hash", "name": "iptr", "flags": 0, "bytes_key": 4, "bytes_value": 8, "max_entries": 10240, "bytes_memlock": 921600, "pinned": [] } ] Signed-off-by: Prashant Bhole <bhole_prashant_q7@lab.ntt.co.jp> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 12:55:48 +08:00
#include <mntent.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <net/if.h>
#include <sys/mount.h>
Revert "bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK" This reverts commit a777e18f1bcd32528ff5dfd10a6629b655b05eb8. In commit a777e18f1bcd ("bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK"), we removed the rlimit bump in bpftool, because the kernel has switched to memcg-based memory accounting. Thanks to the LIBBPF_STRICT_AUTO_RLIMIT_MEMLOCK, we attempted to keep compatibility with other systems and ask libbpf to raise the limit for us if necessary. How do we know if memcg-based accounting is supported? There is a probe in libbpf to check this. But this probe currently relies on the availability of a given BPF helper, bpf_ktime_get_coarse_ns(), which landed in the same kernel version as the memory accounting change. This works in the generic case, but it may fail, for example, if the helper function has been backported to an older kernel. This has been observed for Google Cloud's Container-Optimized OS (COS), where the helper is available but rlimit is still in use. The probe succeeds, the rlimit is not raised, and probing features with bpftool, for example, fails. A patch was submitted [0] to update this probe in libbpf, based on what the cilium/ebpf Go library does [1]. It would lower the soft rlimit to 0, attempt to load a BPF object, and reset the rlimit. But it may induce some hard-to-debug flakiness if another process starts, or the current application is killed, while the rlimit is reduced, and the approach was discarded. As a workaround to ensure that the rlimit bump does not depend on the availability of a given helper, we restore the unconditional rlimit bump in bpftool for now. [0] https://lore.kernel.org/bpf/20220609143614.97837-1-quentin@isovalent.com/ [1] https://github.com/cilium/ebpf/blob/v0.9.0/rlimit/rlimit.go#L39 Signed-off-by: Quentin Monnet <quentin@isovalent.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Cc: Yafang Shao <laoar.shao@gmail.com> Cc: Stanislav Fomichev <sdf@google.com> Link: https://lore.kernel.org/bpf/20220610112648.29695-2-quentin@isovalent.com
2022-06-10 19:26:47 +08:00
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/vfs.h>
bpftool: Probe for memcg-based accounting before bumping rlimit Bpftool used to bump the memlock rlimit to make sure to be able to load BPF objects. After the kernel has switched to memcg-based memory accounting [0] in 5.11, bpftool has relied on libbpf to probe the system for memcg-based accounting support and for raising the rlimit if necessary [1]. But this was later reverted, because the probe would sometimes fail, resulting in bpftool not being able to load all required objects [2]. Here we add a more efficient probe, in bpftool itself. We first lower the rlimit to 0, then we attempt to load a BPF object (and finally reset the rlimit): if the load succeeds, then memcg-based memory accounting is supported. This approach was earlier proposed for the probe in libbpf itself [3], but given that the library may be used in multithreaded applications, the probe could have undesirable consequences if one thread attempts to lock kernel memory while memlock rlimit is at 0. Since bpftool is single-threaded and the rlimit is process-based, this is fine to do in bpftool itself. This probe was inspired by the similar one from the cilium/ebpf Go library [4]. [0] commit 97306be45fbe ("Merge branch 'switch to memcg-based memory accounting'") [1] commit a777e18f1bcd ("bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK") [2] commit 6b4384ff1088 ("Revert "bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK"") [3] https://lore.kernel.org/bpf/20220609143614.97837-1-quentin@isovalent.com/t/#u [4] https://github.com/cilium/ebpf/blob/v0.9.0/rlimit/rlimit.go#L39 Suggested-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Quentin Monnet <quentin@isovalent.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Reviewed-by: Stanislav Fomichev <sdf@google.com> Acked-by: Yafang Shao <laoar.shao@gmail.com> Link: https://lore.kernel.org/bpf/20220629111351.47699-1-quentin@isovalent.com
2022-06-29 19:13:51 +08:00
#include <linux/filter.h>
#include <linux/limits.h>
#include <linux/magic.h>
#include <linux/unistd.h>
#include <bpf/bpf.h>
#include <bpf/hashmap.h>
#include <bpf/libbpf.h> /* libbpf_num_possible_cpus */
#include <bpf/btf.h>
#include "main.h"
#ifndef BPF_FS_MAGIC
#define BPF_FS_MAGIC 0xcafe4a11
#endif
void p_err(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
if (json_output) {
jsonw_start_object(json_wtr);
jsonw_name(json_wtr, "error");
jsonw_vprintf_enquote(json_wtr, fmt, ap);
jsonw_end_object(json_wtr);
} else {
fprintf(stderr, "Error: ");
vfprintf(stderr, fmt, ap);
fprintf(stderr, "\n");
}
va_end(ap);
}
void p_info(const char *fmt, ...)
{
va_list ap;
if (json_output)
return;
va_start(ap, fmt);
vfprintf(stderr, fmt, ap);
fprintf(stderr, "\n");
va_end(ap);
}
static bool is_bpffs(const char *path)
{
struct statfs st_fs;
if (statfs(path, &st_fs) < 0)
return false;
return (unsigned long)st_fs.f_type == BPF_FS_MAGIC;
}
bpftool: Probe for memcg-based accounting before bumping rlimit Bpftool used to bump the memlock rlimit to make sure to be able to load BPF objects. After the kernel has switched to memcg-based memory accounting [0] in 5.11, bpftool has relied on libbpf to probe the system for memcg-based accounting support and for raising the rlimit if necessary [1]. But this was later reverted, because the probe would sometimes fail, resulting in bpftool not being able to load all required objects [2]. Here we add a more efficient probe, in bpftool itself. We first lower the rlimit to 0, then we attempt to load a BPF object (and finally reset the rlimit): if the load succeeds, then memcg-based memory accounting is supported. This approach was earlier proposed for the probe in libbpf itself [3], but given that the library may be used in multithreaded applications, the probe could have undesirable consequences if one thread attempts to lock kernel memory while memlock rlimit is at 0. Since bpftool is single-threaded and the rlimit is process-based, this is fine to do in bpftool itself. This probe was inspired by the similar one from the cilium/ebpf Go library [4]. [0] commit 97306be45fbe ("Merge branch 'switch to memcg-based memory accounting'") [1] commit a777e18f1bcd ("bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK") [2] commit 6b4384ff1088 ("Revert "bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK"") [3] https://lore.kernel.org/bpf/20220609143614.97837-1-quentin@isovalent.com/t/#u [4] https://github.com/cilium/ebpf/blob/v0.9.0/rlimit/rlimit.go#L39 Suggested-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Quentin Monnet <quentin@isovalent.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Reviewed-by: Stanislav Fomichev <sdf@google.com> Acked-by: Yafang Shao <laoar.shao@gmail.com> Link: https://lore.kernel.org/bpf/20220629111351.47699-1-quentin@isovalent.com
2022-06-29 19:13:51 +08:00
/* Probe whether kernel switched from memlock-based (RLIMIT_MEMLOCK) to
* memcg-based memory accounting for BPF maps and programs. This was done in
* commit 97306be45fbe ("Merge branch 'switch to memcg-based memory
* accounting'"), in Linux 5.11.
*
* Libbpf also offers to probe for memcg-based accounting vs rlimit, but does
* so by checking for the availability of a given BPF helper and this has
* failed on some kernels with backports in the past, see commit 6b4384ff1088
* ("Revert "bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK"").
* Instead, we can probe by lowering the process-based rlimit to 0, trying to
* load a BPF object, and resetting the rlimit. If the load succeeds then
* memcg-based accounting is supported.
*
* This would be too dangerous to do in the library, because multithreaded
* applications might attempt to load items while the rlimit is at 0. Given
* that bpftool is single-threaded, this is fine to do here.
*/
static bool known_to_need_rlimit(void)
{
struct rlimit rlim_init, rlim_cur_zero = {};
struct bpf_insn insns[] = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
};
size_t insn_cnt = ARRAY_SIZE(insns);
union bpf_attr attr;
int prog_fd, err;
memset(&attr, 0, sizeof(attr));
attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
attr.insns = ptr_to_u64(insns);
attr.insn_cnt = insn_cnt;
attr.license = ptr_to_u64("GPL");
if (getrlimit(RLIMIT_MEMLOCK, &rlim_init))
return false;
/* Drop the soft limit to zero. We maintain the hard limit to its
* current value, because lowering it would be a permanent operation
* for unprivileged users.
*/
rlim_cur_zero.rlim_max = rlim_init.rlim_max;
if (setrlimit(RLIMIT_MEMLOCK, &rlim_cur_zero))
return false;
/* Do not use bpf_prog_load() from libbpf here, because it calls
* bump_rlimit_memlock(), interfering with the current probe.
*/
prog_fd = syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr));
err = errno;
/* reset soft rlimit to its initial value */
setrlimit(RLIMIT_MEMLOCK, &rlim_init);
if (prog_fd < 0)
return err == EPERM;
close(prog_fd);
return false;
}
Revert "bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK" This reverts commit a777e18f1bcd32528ff5dfd10a6629b655b05eb8. In commit a777e18f1bcd ("bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK"), we removed the rlimit bump in bpftool, because the kernel has switched to memcg-based memory accounting. Thanks to the LIBBPF_STRICT_AUTO_RLIMIT_MEMLOCK, we attempted to keep compatibility with other systems and ask libbpf to raise the limit for us if necessary. How do we know if memcg-based accounting is supported? There is a probe in libbpf to check this. But this probe currently relies on the availability of a given BPF helper, bpf_ktime_get_coarse_ns(), which landed in the same kernel version as the memory accounting change. This works in the generic case, but it may fail, for example, if the helper function has been backported to an older kernel. This has been observed for Google Cloud's Container-Optimized OS (COS), where the helper is available but rlimit is still in use. The probe succeeds, the rlimit is not raised, and probing features with bpftool, for example, fails. A patch was submitted [0] to update this probe in libbpf, based on what the cilium/ebpf Go library does [1]. It would lower the soft rlimit to 0, attempt to load a BPF object, and reset the rlimit. But it may induce some hard-to-debug flakiness if another process starts, or the current application is killed, while the rlimit is reduced, and the approach was discarded. As a workaround to ensure that the rlimit bump does not depend on the availability of a given helper, we restore the unconditional rlimit bump in bpftool for now. [0] https://lore.kernel.org/bpf/20220609143614.97837-1-quentin@isovalent.com/ [1] https://github.com/cilium/ebpf/blob/v0.9.0/rlimit/rlimit.go#L39 Signed-off-by: Quentin Monnet <quentin@isovalent.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Cc: Yafang Shao <laoar.shao@gmail.com> Cc: Stanislav Fomichev <sdf@google.com> Link: https://lore.kernel.org/bpf/20220610112648.29695-2-quentin@isovalent.com
2022-06-10 19:26:47 +08:00
void set_max_rlimit(void)
{
struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
bpftool: Probe for memcg-based accounting before bumping rlimit Bpftool used to bump the memlock rlimit to make sure to be able to load BPF objects. After the kernel has switched to memcg-based memory accounting [0] in 5.11, bpftool has relied on libbpf to probe the system for memcg-based accounting support and for raising the rlimit if necessary [1]. But this was later reverted, because the probe would sometimes fail, resulting in bpftool not being able to load all required objects [2]. Here we add a more efficient probe, in bpftool itself. We first lower the rlimit to 0, then we attempt to load a BPF object (and finally reset the rlimit): if the load succeeds, then memcg-based memory accounting is supported. This approach was earlier proposed for the probe in libbpf itself [3], but given that the library may be used in multithreaded applications, the probe could have undesirable consequences if one thread attempts to lock kernel memory while memlock rlimit is at 0. Since bpftool is single-threaded and the rlimit is process-based, this is fine to do in bpftool itself. This probe was inspired by the similar one from the cilium/ebpf Go library [4]. [0] commit 97306be45fbe ("Merge branch 'switch to memcg-based memory accounting'") [1] commit a777e18f1bcd ("bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK") [2] commit 6b4384ff1088 ("Revert "bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK"") [3] https://lore.kernel.org/bpf/20220609143614.97837-1-quentin@isovalent.com/t/#u [4] https://github.com/cilium/ebpf/blob/v0.9.0/rlimit/rlimit.go#L39 Suggested-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Quentin Monnet <quentin@isovalent.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Reviewed-by: Stanislav Fomichev <sdf@google.com> Acked-by: Yafang Shao <laoar.shao@gmail.com> Link: https://lore.kernel.org/bpf/20220629111351.47699-1-quentin@isovalent.com
2022-06-29 19:13:51 +08:00
if (known_to_need_rlimit())
setrlimit(RLIMIT_MEMLOCK, &rinf);
Revert "bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK" This reverts commit a777e18f1bcd32528ff5dfd10a6629b655b05eb8. In commit a777e18f1bcd ("bpftool: Use libbpf 1.0 API mode instead of RLIMIT_MEMLOCK"), we removed the rlimit bump in bpftool, because the kernel has switched to memcg-based memory accounting. Thanks to the LIBBPF_STRICT_AUTO_RLIMIT_MEMLOCK, we attempted to keep compatibility with other systems and ask libbpf to raise the limit for us if necessary. How do we know if memcg-based accounting is supported? There is a probe in libbpf to check this. But this probe currently relies on the availability of a given BPF helper, bpf_ktime_get_coarse_ns(), which landed in the same kernel version as the memory accounting change. This works in the generic case, but it may fail, for example, if the helper function has been backported to an older kernel. This has been observed for Google Cloud's Container-Optimized OS (COS), where the helper is available but rlimit is still in use. The probe succeeds, the rlimit is not raised, and probing features with bpftool, for example, fails. A patch was submitted [0] to update this probe in libbpf, based on what the cilium/ebpf Go library does [1]. It would lower the soft rlimit to 0, attempt to load a BPF object, and reset the rlimit. But it may induce some hard-to-debug flakiness if another process starts, or the current application is killed, while the rlimit is reduced, and the approach was discarded. As a workaround to ensure that the rlimit bump does not depend on the availability of a given helper, we restore the unconditional rlimit bump in bpftool for now. [0] https://lore.kernel.org/bpf/20220609143614.97837-1-quentin@isovalent.com/ [1] https://github.com/cilium/ebpf/blob/v0.9.0/rlimit/rlimit.go#L39 Signed-off-by: Quentin Monnet <quentin@isovalent.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Cc: Yafang Shao <laoar.shao@gmail.com> Cc: Stanislav Fomichev <sdf@google.com> Link: https://lore.kernel.org/bpf/20220610112648.29695-2-quentin@isovalent.com
2022-06-10 19:26:47 +08:00
}
static int
mnt_fs(const char *target, const char *type, char *buff, size_t bufflen)
{
bool bind_done = false;
while (mount("", target, "none", MS_PRIVATE | MS_REC, NULL)) {
if (errno != EINVAL || bind_done) {
snprintf(buff, bufflen,
"mount --make-private %s failed: %s",
target, strerror(errno));
return -1;
}
if (mount(target, target, "none", MS_BIND, NULL)) {
snprintf(buff, bufflen,
"mount --bind %s %s failed: %s",
target, target, strerror(errno));
return -1;
}
bind_done = true;
}
if (mount(type, target, type, 0, "mode=0700")) {
snprintf(buff, bufflen, "mount -t %s %s %s failed: %s",
type, type, target, strerror(errno));
return -1;
}
return 0;
}
int mount_tracefs(const char *target)
{
char err_str[ERR_MAX_LEN];
int err;
err = mnt_fs(target, "tracefs", err_str, ERR_MAX_LEN);
if (err) {
err_str[ERR_MAX_LEN - 1] = '\0';
p_err("can't mount tracefs: %s", err_str);
}
return err;
}
int open_obj_pinned(const char *path, bool quiet)
{
char *pname;
int fd = -1;
pname = strdup(path);
if (!pname) {
if (!quiet)
p_err("mem alloc failed");
goto out_ret;
}
fd = bpf_obj_get(pname);
if (fd < 0) {
if (!quiet)
p_err("bpf obj get (%s): %s", pname,
errno == EACCES && !is_bpffs(dirname(pname)) ?
"directory not in bpf file system (bpffs)" :
strerror(errno));
goto out_free;
}
out_free:
free(pname);
out_ret:
return fd;
}
int open_obj_pinned_any(const char *path, enum bpf_obj_type exp_type)
{
enum bpf_obj_type type;
int fd;
fd = open_obj_pinned(path, false);
if (fd < 0)
return -1;
type = get_fd_type(fd);
if (type < 0) {
close(fd);
return type;
}
if (type != exp_type) {
p_err("incorrect object type: %s", get_fd_type_name(type));
close(fd);
return -1;
}
return fd;
}
int mount_bpffs_for_pin(const char *name, bool is_dir)
{
char err_str[ERR_MAX_LEN];
char *file;
char *dir;
int err = 0;
if (is_dir && is_bpffs(name))
return err;
file = malloc(strlen(name) + 1);
if (!file) {
p_err("mem alloc failed");
return -1;
}
strcpy(file, name);
dir = dirname(file);
if (is_bpffs(dir))
/* nothing to do if already mounted */
goto out_free;
if (block_mount) {
p_err("no BPF file system found, not mounting it due to --nomount option");
err = -1;
goto out_free;
}
err = mnt_fs(dir, "bpf", err_str, ERR_MAX_LEN);
if (err) {
err_str[ERR_MAX_LEN - 1] = '\0';
p_err("can't mount BPF file system to pin the object (%s): %s",
name, err_str);
}
out_free:
free(file);
return err;
}
int do_pin_fd(int fd, const char *name)
{
int err;
err = mount_bpffs_for_pin(name, false);
if (err)
return err;
err = bpf_obj_pin(fd, name);
if (err)
p_err("can't pin the object (%s): %s", name, strerror(errno));
return err;
}
int do_pin_any(int argc, char **argv, int (*get_fd)(int *, char ***))
{
int err;
int fd;
if (!REQ_ARGS(3))
return -EINVAL;
fd = get_fd(&argc, &argv);
if (fd < 0)
return fd;
err = do_pin_fd(fd, *argv);
close(fd);
return err;
}
const char *get_fd_type_name(enum bpf_obj_type type)
{
static const char * const names[] = {
[BPF_OBJ_UNKNOWN] = "unknown",
[BPF_OBJ_PROG] = "prog",
[BPF_OBJ_MAP] = "map",
[BPF_OBJ_LINK] = "link",
};
if (type < 0 || type >= ARRAY_SIZE(names) || !names[type])
return names[BPF_OBJ_UNKNOWN];
return names[type];
}
void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd,
char *name_buff, size_t buff_len)
{
const char *prog_name = prog_info->name;
const struct btf_type *func_type;
const struct bpf_func_info finfo = {};
struct bpf_prog_info info = {};
__u32 info_len = sizeof(info);
struct btf *prog_btf = NULL;
if (buff_len <= BPF_OBJ_NAME_LEN ||
strlen(prog_info->name) < BPF_OBJ_NAME_LEN - 1)
goto copy_name;
if (!prog_info->btf_id || prog_info->nr_func_info == 0)
goto copy_name;
info.nr_func_info = 1;
info.func_info_rec_size = prog_info->func_info_rec_size;
if (info.func_info_rec_size > sizeof(finfo))
info.func_info_rec_size = sizeof(finfo);
info.func_info = ptr_to_u64(&finfo);
if (bpf_prog_get_info_by_fd(prog_fd, &info, &info_len))
goto copy_name;
prog_btf = btf__load_from_kernel_by_id(info.btf_id);
if (!prog_btf)
goto copy_name;
func_type = btf__type_by_id(prog_btf, finfo.type_id);
if (!func_type || !btf_is_func(func_type))
goto copy_name;
prog_name = btf__name_by_offset(prog_btf, func_type->name_off);
copy_name:
snprintf(name_buff, buff_len, "%s", prog_name);
if (prog_btf)
btf__free(prog_btf);
}
int get_fd_type(int fd)
{
char path[PATH_MAX];
char buf[512];
ssize_t n;
snprintf(path, sizeof(path), "/proc/self/fd/%d", fd);
n = readlink(path, buf, sizeof(buf));
if (n < 0) {
p_err("can't read link type: %s", strerror(errno));
return -1;
}
if (n == sizeof(path)) {
p_err("can't read link type: path too long!");
return -1;
}
if (strstr(buf, "bpf-map"))
return BPF_OBJ_MAP;
else if (strstr(buf, "bpf-prog"))
return BPF_OBJ_PROG;
else if (strstr(buf, "bpf-link"))
return BPF_OBJ_LINK;
return BPF_OBJ_UNKNOWN;
}
char *get_fdinfo(int fd, const char *key)
{
char path[PATH_MAX];
char *line = NULL;
size_t line_n = 0;
ssize_t n;
FILE *fdi;
snprintf(path, sizeof(path), "/proc/self/fdinfo/%d", fd);
fdi = fopen(path, "r");
if (!fdi)
return NULL;
while ((n = getline(&line, &line_n, fdi)) > 0) {
char *value;
int len;
if (!strstr(line, key))
continue;
fclose(fdi);
value = strchr(line, '\t');
if (!value || !value[1]) {
free(line);
return NULL;
}
value++;
len = strlen(value);
memmove(line, value, len);
line[len - 1] = '\0';
return line;
}
free(line);
fclose(fdi);
return NULL;
}
void print_data_json(uint8_t *data, size_t len)
{
unsigned int i;
jsonw_start_array(json_wtr);
for (i = 0; i < len; i++)
jsonw_printf(json_wtr, "%d", data[i]);
jsonw_end_array(json_wtr);
}
void print_hex_data_json(uint8_t *data, size_t len)
{
unsigned int i;
jsonw_start_array(json_wtr);
for (i = 0; i < len; i++)
jsonw_printf(json_wtr, "\"0x%02hhx\"", data[i]);
jsonw_end_array(json_wtr);
}
tools: bpftool: show filenames of pinned objects Added support to show filenames of pinned objects. For example: root@test# ./bpftool prog 3: tracepoint name tracepoint__irq tag f677a7dd722299a3 loaded_at Oct 26/11:39 uid 0 xlated 160B not jited memlock 4096B map_ids 4 pinned /sys/fs/bpf/softirq_prog 4: tracepoint name tracepoint__irq tag ea5dc530d00b92b6 loaded_at Oct 26/11:39 uid 0 xlated 392B not jited memlock 4096B map_ids 4,6 root@test# ./bpftool --json --pretty prog [{ "id": 3, "type": "tracepoint", "name": "tracepoint__irq", "tag": "f677a7dd722299a3", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 160, "jited": false, "bytes_memlock": 4096, "map_ids": [4 ], "pinned": ["/sys/fs/bpf/softirq_prog" ] },{ "id": 4, "type": "tracepoint", "name": "tracepoint__irq", "tag": "ea5dc530d00b92b6", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 392, "jited": false, "bytes_memlock": 4096, "map_ids": [4,6 ], "pinned": [] } ] root@test# ./bpftool map 4: hash name start flags 0x0 key 4B value 16B max_entries 10240 memlock 1003520B pinned /sys/fs/bpf/softirq_map1 5: hash name iptr flags 0x0 key 4B value 8B max_entries 10240 memlock 921600B root@test# ./bpftool --json --pretty map [{ "id": 4, "type": "hash", "name": "start", "flags": 0, "bytes_key": 4, "bytes_value": 16, "max_entries": 10240, "bytes_memlock": 1003520, "pinned": ["/sys/fs/bpf/softirq_map1" ] },{ "id": 5, "type": "hash", "name": "iptr", "flags": 0, "bytes_key": 4, "bytes_value": 8, "max_entries": 10240, "bytes_memlock": 921600, "pinned": [] } ] Signed-off-by: Prashant Bhole <bhole_prashant_q7@lab.ntt.co.jp> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 12:55:48 +08:00
/* extra params for nftw cb */
static struct hashmap *build_fn_table;
static enum bpf_obj_type build_fn_type;
static int do_build_table_cb(const char *fpath, const struct stat *sb,
int typeflag, struct FTW *ftwbuf)
{
struct bpf_prog_info pinned_info;
__u32 len = sizeof(pinned_info);
enum bpf_obj_type objtype;
int fd, err = 0;
char *path;
if (typeflag != FTW_F)
goto out_ret;
fd = open_obj_pinned(fpath, true);
if (fd < 0)
goto out_ret;
objtype = get_fd_type(fd);
if (objtype != build_fn_type)
goto out_close;
memset(&pinned_info, 0, sizeof(pinned_info));
if (bpf_prog_get_info_by_fd(fd, &pinned_info, &len))
goto out_close;
path = strdup(fpath);
if (!path) {
err = -1;
goto out_close;
}
libbpf: Hashmap interface update to allow both long and void* keys/values An update for libbpf's hashmap interface from void* -> void* to a polymorphic one, allowing both long and void* keys and values. This simplifies many use cases in libbpf as hashmaps there are mostly integer to integer. Perf copies hashmap implementation from libbpf and has to be updated as well. Changes to libbpf, selftests/bpf and perf are packed as a single commit to avoid compilation issues with any future bisect. Polymorphic interface is acheived by hiding hashmap interface functions behind auxiliary macros that take care of necessary type casts, for example: #define hashmap_cast_ptr(p) \ ({ \ _Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\ #p " pointee should be a long-sized integer or a pointer"); \ (long *)(p); \ }) bool hashmap_find(const struct hashmap *map, long key, long *value); #define hashmap__find(map, key, value) \ hashmap_find((map), (long)(key), hashmap_cast_ptr(value)) - hashmap__find macro casts key and value parameters to long and long* respectively - hashmap_cast_ptr ensures that value pointer points to a memory of appropriate size. This hack was suggested by Andrii Nakryiko in [1]. This is a follow up for [2]. [1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/ [2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 22:26:09 +08:00
err = hashmap__append(build_fn_table, pinned_info.id, path);
if (err) {
p_err("failed to append entry to hashmap for ID %u, path '%s': %s",
pinned_info.id, path, strerror(errno));
free(path);
goto out_close;
}
out_close:
close(fd);
out_ret:
return err;
}
int build_pinned_obj_table(struct hashmap *tab,
tools: bpftool: show filenames of pinned objects Added support to show filenames of pinned objects. For example: root@test# ./bpftool prog 3: tracepoint name tracepoint__irq tag f677a7dd722299a3 loaded_at Oct 26/11:39 uid 0 xlated 160B not jited memlock 4096B map_ids 4 pinned /sys/fs/bpf/softirq_prog 4: tracepoint name tracepoint__irq tag ea5dc530d00b92b6 loaded_at Oct 26/11:39 uid 0 xlated 392B not jited memlock 4096B map_ids 4,6 root@test# ./bpftool --json --pretty prog [{ "id": 3, "type": "tracepoint", "name": "tracepoint__irq", "tag": "f677a7dd722299a3", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 160, "jited": false, "bytes_memlock": 4096, "map_ids": [4 ], "pinned": ["/sys/fs/bpf/softirq_prog" ] },{ "id": 4, "type": "tracepoint", "name": "tracepoint__irq", "tag": "ea5dc530d00b92b6", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 392, "jited": false, "bytes_memlock": 4096, "map_ids": [4,6 ], "pinned": [] } ] root@test# ./bpftool map 4: hash name start flags 0x0 key 4B value 16B max_entries 10240 memlock 1003520B pinned /sys/fs/bpf/softirq_map1 5: hash name iptr flags 0x0 key 4B value 8B max_entries 10240 memlock 921600B root@test# ./bpftool --json --pretty map [{ "id": 4, "type": "hash", "name": "start", "flags": 0, "bytes_key": 4, "bytes_value": 16, "max_entries": 10240, "bytes_memlock": 1003520, "pinned": ["/sys/fs/bpf/softirq_map1" ] },{ "id": 5, "type": "hash", "name": "iptr", "flags": 0, "bytes_key": 4, "bytes_value": 8, "max_entries": 10240, "bytes_memlock": 921600, "pinned": [] } ] Signed-off-by: Prashant Bhole <bhole_prashant_q7@lab.ntt.co.jp> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 12:55:48 +08:00
enum bpf_obj_type type)
{
struct mntent *mntent = NULL;
FILE *mntfile = NULL;
int flags = FTW_PHYS;
int nopenfd = 16;
int err = 0;
tools: bpftool: show filenames of pinned objects Added support to show filenames of pinned objects. For example: root@test# ./bpftool prog 3: tracepoint name tracepoint__irq tag f677a7dd722299a3 loaded_at Oct 26/11:39 uid 0 xlated 160B not jited memlock 4096B map_ids 4 pinned /sys/fs/bpf/softirq_prog 4: tracepoint name tracepoint__irq tag ea5dc530d00b92b6 loaded_at Oct 26/11:39 uid 0 xlated 392B not jited memlock 4096B map_ids 4,6 root@test# ./bpftool --json --pretty prog [{ "id": 3, "type": "tracepoint", "name": "tracepoint__irq", "tag": "f677a7dd722299a3", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 160, "jited": false, "bytes_memlock": 4096, "map_ids": [4 ], "pinned": ["/sys/fs/bpf/softirq_prog" ] },{ "id": 4, "type": "tracepoint", "name": "tracepoint__irq", "tag": "ea5dc530d00b92b6", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 392, "jited": false, "bytes_memlock": 4096, "map_ids": [4,6 ], "pinned": [] } ] root@test# ./bpftool map 4: hash name start flags 0x0 key 4B value 16B max_entries 10240 memlock 1003520B pinned /sys/fs/bpf/softirq_map1 5: hash name iptr flags 0x0 key 4B value 8B max_entries 10240 memlock 921600B root@test# ./bpftool --json --pretty map [{ "id": 4, "type": "hash", "name": "start", "flags": 0, "bytes_key": 4, "bytes_value": 16, "max_entries": 10240, "bytes_memlock": 1003520, "pinned": ["/sys/fs/bpf/softirq_map1" ] },{ "id": 5, "type": "hash", "name": "iptr", "flags": 0, "bytes_key": 4, "bytes_value": 8, "max_entries": 10240, "bytes_memlock": 921600, "pinned": [] } ] Signed-off-by: Prashant Bhole <bhole_prashant_q7@lab.ntt.co.jp> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 12:55:48 +08:00
mntfile = setmntent("/proc/mounts", "r");
if (!mntfile)
return -1;
build_fn_table = tab;
build_fn_type = type;
tools: bpftool: show filenames of pinned objects Added support to show filenames of pinned objects. For example: root@test# ./bpftool prog 3: tracepoint name tracepoint__irq tag f677a7dd722299a3 loaded_at Oct 26/11:39 uid 0 xlated 160B not jited memlock 4096B map_ids 4 pinned /sys/fs/bpf/softirq_prog 4: tracepoint name tracepoint__irq tag ea5dc530d00b92b6 loaded_at Oct 26/11:39 uid 0 xlated 392B not jited memlock 4096B map_ids 4,6 root@test# ./bpftool --json --pretty prog [{ "id": 3, "type": "tracepoint", "name": "tracepoint__irq", "tag": "f677a7dd722299a3", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 160, "jited": false, "bytes_memlock": 4096, "map_ids": [4 ], "pinned": ["/sys/fs/bpf/softirq_prog" ] },{ "id": 4, "type": "tracepoint", "name": "tracepoint__irq", "tag": "ea5dc530d00b92b6", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 392, "jited": false, "bytes_memlock": 4096, "map_ids": [4,6 ], "pinned": [] } ] root@test# ./bpftool map 4: hash name start flags 0x0 key 4B value 16B max_entries 10240 memlock 1003520B pinned /sys/fs/bpf/softirq_map1 5: hash name iptr flags 0x0 key 4B value 8B max_entries 10240 memlock 921600B root@test# ./bpftool --json --pretty map [{ "id": 4, "type": "hash", "name": "start", "flags": 0, "bytes_key": 4, "bytes_value": 16, "max_entries": 10240, "bytes_memlock": 1003520, "pinned": ["/sys/fs/bpf/softirq_map1" ] },{ "id": 5, "type": "hash", "name": "iptr", "flags": 0, "bytes_key": 4, "bytes_value": 8, "max_entries": 10240, "bytes_memlock": 921600, "pinned": [] } ] Signed-off-by: Prashant Bhole <bhole_prashant_q7@lab.ntt.co.jp> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 12:55:48 +08:00
while ((mntent = getmntent(mntfile))) {
char *path = mntent->mnt_dir;
tools: bpftool: show filenames of pinned objects Added support to show filenames of pinned objects. For example: root@test# ./bpftool prog 3: tracepoint name tracepoint__irq tag f677a7dd722299a3 loaded_at Oct 26/11:39 uid 0 xlated 160B not jited memlock 4096B map_ids 4 pinned /sys/fs/bpf/softirq_prog 4: tracepoint name tracepoint__irq tag ea5dc530d00b92b6 loaded_at Oct 26/11:39 uid 0 xlated 392B not jited memlock 4096B map_ids 4,6 root@test# ./bpftool --json --pretty prog [{ "id": 3, "type": "tracepoint", "name": "tracepoint__irq", "tag": "f677a7dd722299a3", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 160, "jited": false, "bytes_memlock": 4096, "map_ids": [4 ], "pinned": ["/sys/fs/bpf/softirq_prog" ] },{ "id": 4, "type": "tracepoint", "name": "tracepoint__irq", "tag": "ea5dc530d00b92b6", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 392, "jited": false, "bytes_memlock": 4096, "map_ids": [4,6 ], "pinned": [] } ] root@test# ./bpftool map 4: hash name start flags 0x0 key 4B value 16B max_entries 10240 memlock 1003520B pinned /sys/fs/bpf/softirq_map1 5: hash name iptr flags 0x0 key 4B value 8B max_entries 10240 memlock 921600B root@test# ./bpftool --json --pretty map [{ "id": 4, "type": "hash", "name": "start", "flags": 0, "bytes_key": 4, "bytes_value": 16, "max_entries": 10240, "bytes_memlock": 1003520, "pinned": ["/sys/fs/bpf/softirq_map1" ] },{ "id": 5, "type": "hash", "name": "iptr", "flags": 0, "bytes_key": 4, "bytes_value": 8, "max_entries": 10240, "bytes_memlock": 921600, "pinned": [] } ] Signed-off-by: Prashant Bhole <bhole_prashant_q7@lab.ntt.co.jp> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 12:55:48 +08:00
if (strncmp(mntent->mnt_type, "bpf", 3) != 0)
continue;
err = nftw(path, do_build_table_cb, nopenfd, flags);
if (err)
break;
tools: bpftool: show filenames of pinned objects Added support to show filenames of pinned objects. For example: root@test# ./bpftool prog 3: tracepoint name tracepoint__irq tag f677a7dd722299a3 loaded_at Oct 26/11:39 uid 0 xlated 160B not jited memlock 4096B map_ids 4 pinned /sys/fs/bpf/softirq_prog 4: tracepoint name tracepoint__irq tag ea5dc530d00b92b6 loaded_at Oct 26/11:39 uid 0 xlated 392B not jited memlock 4096B map_ids 4,6 root@test# ./bpftool --json --pretty prog [{ "id": 3, "type": "tracepoint", "name": "tracepoint__irq", "tag": "f677a7dd722299a3", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 160, "jited": false, "bytes_memlock": 4096, "map_ids": [4 ], "pinned": ["/sys/fs/bpf/softirq_prog" ] },{ "id": 4, "type": "tracepoint", "name": "tracepoint__irq", "tag": "ea5dc530d00b92b6", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 392, "jited": false, "bytes_memlock": 4096, "map_ids": [4,6 ], "pinned": [] } ] root@test# ./bpftool map 4: hash name start flags 0x0 key 4B value 16B max_entries 10240 memlock 1003520B pinned /sys/fs/bpf/softirq_map1 5: hash name iptr flags 0x0 key 4B value 8B max_entries 10240 memlock 921600B root@test# ./bpftool --json --pretty map [{ "id": 4, "type": "hash", "name": "start", "flags": 0, "bytes_key": 4, "bytes_value": 16, "max_entries": 10240, "bytes_memlock": 1003520, "pinned": ["/sys/fs/bpf/softirq_map1" ] },{ "id": 5, "type": "hash", "name": "iptr", "flags": 0, "bytes_key": 4, "bytes_value": 8, "max_entries": 10240, "bytes_memlock": 921600, "pinned": [] } ] Signed-off-by: Prashant Bhole <bhole_prashant_q7@lab.ntt.co.jp> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 12:55:48 +08:00
}
fclose(mntfile);
return err;
tools: bpftool: show filenames of pinned objects Added support to show filenames of pinned objects. For example: root@test# ./bpftool prog 3: tracepoint name tracepoint__irq tag f677a7dd722299a3 loaded_at Oct 26/11:39 uid 0 xlated 160B not jited memlock 4096B map_ids 4 pinned /sys/fs/bpf/softirq_prog 4: tracepoint name tracepoint__irq tag ea5dc530d00b92b6 loaded_at Oct 26/11:39 uid 0 xlated 392B not jited memlock 4096B map_ids 4,6 root@test# ./bpftool --json --pretty prog [{ "id": 3, "type": "tracepoint", "name": "tracepoint__irq", "tag": "f677a7dd722299a3", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 160, "jited": false, "bytes_memlock": 4096, "map_ids": [4 ], "pinned": ["/sys/fs/bpf/softirq_prog" ] },{ "id": 4, "type": "tracepoint", "name": "tracepoint__irq", "tag": "ea5dc530d00b92b6", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 392, "jited": false, "bytes_memlock": 4096, "map_ids": [4,6 ], "pinned": [] } ] root@test# ./bpftool map 4: hash name start flags 0x0 key 4B value 16B max_entries 10240 memlock 1003520B pinned /sys/fs/bpf/softirq_map1 5: hash name iptr flags 0x0 key 4B value 8B max_entries 10240 memlock 921600B root@test# ./bpftool --json --pretty map [{ "id": 4, "type": "hash", "name": "start", "flags": 0, "bytes_key": 4, "bytes_value": 16, "max_entries": 10240, "bytes_memlock": 1003520, "pinned": ["/sys/fs/bpf/softirq_map1" ] },{ "id": 5, "type": "hash", "name": "iptr", "flags": 0, "bytes_key": 4, "bytes_value": 8, "max_entries": 10240, "bytes_memlock": 921600, "pinned": [] } ] Signed-off-by: Prashant Bhole <bhole_prashant_q7@lab.ntt.co.jp> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 12:55:48 +08:00
}
void delete_pinned_obj_table(struct hashmap *map)
tools: bpftool: show filenames of pinned objects Added support to show filenames of pinned objects. For example: root@test# ./bpftool prog 3: tracepoint name tracepoint__irq tag f677a7dd722299a3 loaded_at Oct 26/11:39 uid 0 xlated 160B not jited memlock 4096B map_ids 4 pinned /sys/fs/bpf/softirq_prog 4: tracepoint name tracepoint__irq tag ea5dc530d00b92b6 loaded_at Oct 26/11:39 uid 0 xlated 392B not jited memlock 4096B map_ids 4,6 root@test# ./bpftool --json --pretty prog [{ "id": 3, "type": "tracepoint", "name": "tracepoint__irq", "tag": "f677a7dd722299a3", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 160, "jited": false, "bytes_memlock": 4096, "map_ids": [4 ], "pinned": ["/sys/fs/bpf/softirq_prog" ] },{ "id": 4, "type": "tracepoint", "name": "tracepoint__irq", "tag": "ea5dc530d00b92b6", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 392, "jited": false, "bytes_memlock": 4096, "map_ids": [4,6 ], "pinned": [] } ] root@test# ./bpftool map 4: hash name start flags 0x0 key 4B value 16B max_entries 10240 memlock 1003520B pinned /sys/fs/bpf/softirq_map1 5: hash name iptr flags 0x0 key 4B value 8B max_entries 10240 memlock 921600B root@test# ./bpftool --json --pretty map [{ "id": 4, "type": "hash", "name": "start", "flags": 0, "bytes_key": 4, "bytes_value": 16, "max_entries": 10240, "bytes_memlock": 1003520, "pinned": ["/sys/fs/bpf/softirq_map1" ] },{ "id": 5, "type": "hash", "name": "iptr", "flags": 0, "bytes_key": 4, "bytes_value": 8, "max_entries": 10240, "bytes_memlock": 921600, "pinned": [] } ] Signed-off-by: Prashant Bhole <bhole_prashant_q7@lab.ntt.co.jp> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 12:55:48 +08:00
{
struct hashmap_entry *entry;
size_t bkt;
tools: bpftool: show filenames of pinned objects Added support to show filenames of pinned objects. For example: root@test# ./bpftool prog 3: tracepoint name tracepoint__irq tag f677a7dd722299a3 loaded_at Oct 26/11:39 uid 0 xlated 160B not jited memlock 4096B map_ids 4 pinned /sys/fs/bpf/softirq_prog 4: tracepoint name tracepoint__irq tag ea5dc530d00b92b6 loaded_at Oct 26/11:39 uid 0 xlated 392B not jited memlock 4096B map_ids 4,6 root@test# ./bpftool --json --pretty prog [{ "id": 3, "type": "tracepoint", "name": "tracepoint__irq", "tag": "f677a7dd722299a3", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 160, "jited": false, "bytes_memlock": 4096, "map_ids": [4 ], "pinned": ["/sys/fs/bpf/softirq_prog" ] },{ "id": 4, "type": "tracepoint", "name": "tracepoint__irq", "tag": "ea5dc530d00b92b6", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 392, "jited": false, "bytes_memlock": 4096, "map_ids": [4,6 ], "pinned": [] } ] root@test# ./bpftool map 4: hash name start flags 0x0 key 4B value 16B max_entries 10240 memlock 1003520B pinned /sys/fs/bpf/softirq_map1 5: hash name iptr flags 0x0 key 4B value 8B max_entries 10240 memlock 921600B root@test# ./bpftool --json --pretty map [{ "id": 4, "type": "hash", "name": "start", "flags": 0, "bytes_key": 4, "bytes_value": 16, "max_entries": 10240, "bytes_memlock": 1003520, "pinned": ["/sys/fs/bpf/softirq_map1" ] },{ "id": 5, "type": "hash", "name": "iptr", "flags": 0, "bytes_key": 4, "bytes_value": 8, "max_entries": 10240, "bytes_memlock": 921600, "pinned": [] } ] Signed-off-by: Prashant Bhole <bhole_prashant_q7@lab.ntt.co.jp> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 12:55:48 +08:00
if (!map)
return;
hashmap__for_each_entry(map, entry, bkt)
libbpf: Hashmap interface update to allow both long and void* keys/values An update for libbpf's hashmap interface from void* -> void* to a polymorphic one, allowing both long and void* keys and values. This simplifies many use cases in libbpf as hashmaps there are mostly integer to integer. Perf copies hashmap implementation from libbpf and has to be updated as well. Changes to libbpf, selftests/bpf and perf are packed as a single commit to avoid compilation issues with any future bisect. Polymorphic interface is acheived by hiding hashmap interface functions behind auxiliary macros that take care of necessary type casts, for example: #define hashmap_cast_ptr(p) \ ({ \ _Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\ #p " pointee should be a long-sized integer or a pointer"); \ (long *)(p); \ }) bool hashmap_find(const struct hashmap *map, long key, long *value); #define hashmap__find(map, key, value) \ hashmap_find((map), (long)(key), hashmap_cast_ptr(value)) - hashmap__find macro casts key and value parameters to long and long* respectively - hashmap_cast_ptr ensures that value pointer points to a memory of appropriate size. This hack was suggested by Andrii Nakryiko in [1]. This is a follow up for [2]. [1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/ [2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 22:26:09 +08:00
free(entry->pvalue);
hashmap__free(map);
tools: bpftool: show filenames of pinned objects Added support to show filenames of pinned objects. For example: root@test# ./bpftool prog 3: tracepoint name tracepoint__irq tag f677a7dd722299a3 loaded_at Oct 26/11:39 uid 0 xlated 160B not jited memlock 4096B map_ids 4 pinned /sys/fs/bpf/softirq_prog 4: tracepoint name tracepoint__irq tag ea5dc530d00b92b6 loaded_at Oct 26/11:39 uid 0 xlated 392B not jited memlock 4096B map_ids 4,6 root@test# ./bpftool --json --pretty prog [{ "id": 3, "type": "tracepoint", "name": "tracepoint__irq", "tag": "f677a7dd722299a3", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 160, "jited": false, "bytes_memlock": 4096, "map_ids": [4 ], "pinned": ["/sys/fs/bpf/softirq_prog" ] },{ "id": 4, "type": "tracepoint", "name": "tracepoint__irq", "tag": "ea5dc530d00b92b6", "loaded_at": "Oct 26/11:39", "uid": 0, "bytes_xlated": 392, "jited": false, "bytes_memlock": 4096, "map_ids": [4,6 ], "pinned": [] } ] root@test# ./bpftool map 4: hash name start flags 0x0 key 4B value 16B max_entries 10240 memlock 1003520B pinned /sys/fs/bpf/softirq_map1 5: hash name iptr flags 0x0 key 4B value 8B max_entries 10240 memlock 921600B root@test# ./bpftool --json --pretty map [{ "id": 4, "type": "hash", "name": "start", "flags": 0, "bytes_key": 4, "bytes_value": 16, "max_entries": 10240, "bytes_memlock": 1003520, "pinned": ["/sys/fs/bpf/softirq_map1" ] },{ "id": 5, "type": "hash", "name": "iptr", "flags": 0, "bytes_key": 4, "bytes_value": 8, "max_entries": 10240, "bytes_memlock": 921600, "pinned": [] } ] Signed-off-by: Prashant Bhole <bhole_prashant_q7@lab.ntt.co.jp> Signed-off-by: David S. Miller <davem@davemloft.net>
2017-11-08 12:55:48 +08:00
}
unsigned int get_page_size(void)
{
static int result;
if (!result)
result = getpagesize();
return result;
}
unsigned int get_possible_cpus(void)
{
int cpus = libbpf_num_possible_cpus();
if (cpus < 0) {
p_err("Can't get # of possible cpus: %s", strerror(-cpus));
exit(-1);
}
return cpus;
}
static char *
ifindex_to_name_ns(__u32 ifindex, __u32 ns_dev, __u32 ns_ino, char *buf)
{
struct stat st;
int err;
err = stat("/proc/self/ns/net", &st);
if (err) {
p_err("Can't stat /proc/self: %s", strerror(errno));
return NULL;
}
if (st.st_dev != ns_dev || st.st_ino != ns_ino)
return NULL;
return if_indextoname(ifindex, buf);
}
static int read_sysfs_hex_int(char *path)
{
char vendor_id_buf[8];
int len;
int fd;
fd = open(path, O_RDONLY);
if (fd < 0) {
p_err("Can't open %s: %s", path, strerror(errno));
return -1;
}
len = read(fd, vendor_id_buf, sizeof(vendor_id_buf));
close(fd);
if (len < 0) {
p_err("Can't read %s: %s", path, strerror(errno));
return -1;
}
if (len >= (int)sizeof(vendor_id_buf)) {
p_err("Value in %s too long", path);
return -1;
}
vendor_id_buf[len] = 0;
return strtol(vendor_id_buf, NULL, 0);
}
static int read_sysfs_netdev_hex_int(char *devname, const char *entry_name)
{
char full_path[64];
snprintf(full_path, sizeof(full_path), "/sys/class/net/%s/device/%s",
devname, entry_name);
return read_sysfs_hex_int(full_path);
}
const char *
ifindex_to_arch(__u32 ifindex, __u64 ns_dev, __u64 ns_ino, const char **opt)
{
__maybe_unused int device_id;
char devname[IF_NAMESIZE];
int vendor_id;
if (!ifindex_to_name_ns(ifindex, ns_dev, ns_ino, devname)) {
p_err("Can't get net device name for ifindex %d: %s", ifindex,
strerror(errno));
return NULL;
}
vendor_id = read_sysfs_netdev_hex_int(devname, "vendor");
if (vendor_id < 0) {
p_err("Can't get device vendor id for %s", devname);
return NULL;
}
switch (vendor_id) {
#ifdef HAVE_LIBBFD_SUPPORT
case 0x19ee:
device_id = read_sysfs_netdev_hex_int(devname, "device");
if (device_id != 0x4000 &&
device_id != 0x6000 &&
device_id != 0x6003)
p_info("Unknown NFP device ID, assuming it is NFP-6xxx arch");
*opt = "ctx4";
return "NFP-6xxx";
#endif /* HAVE_LIBBFD_SUPPORT */
/* No NFP support in LLVM, we have no valid triple to return. */
default:
p_err("Can't get arch name for device vendor id 0x%04x",
vendor_id);
return NULL;
}
}
void print_dev_plain(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
{
char name[IF_NAMESIZE];
if (!ifindex)
return;
printf(" offloaded_to ");
if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
printf("%s", name);
else
printf("ifindex %u ns_dev %llu ns_ino %llu",
ifindex, ns_dev, ns_inode);
}
void print_dev_json(__u32 ifindex, __u64 ns_dev, __u64 ns_inode)
{
char name[IF_NAMESIZE];
if (!ifindex)
return;
jsonw_name(json_wtr, "dev");
jsonw_start_object(json_wtr);
jsonw_uint_field(json_wtr, "ifindex", ifindex);
jsonw_uint_field(json_wtr, "ns_dev", ns_dev);
jsonw_uint_field(json_wtr, "ns_inode", ns_inode);
if (ifindex_to_name_ns(ifindex, ns_dev, ns_inode, name))
jsonw_string_field(json_wtr, "ifname", name);
jsonw_end_object(json_wtr);
}
int parse_u32_arg(int *argc, char ***argv, __u32 *val, const char *what)
{
char *endptr;
NEXT_ARGP();
if (*val) {
p_err("%s already specified", what);
return -1;
}
*val = strtoul(**argv, &endptr, 0);
if (*endptr) {
p_err("can't parse %s as %s", **argv, what);
return -1;
}
NEXT_ARGP();
return 0;
}
tools: bpftool: Restore message on failure to guess program type In commit 4a3d6c6a6e4d ("libbpf: Reduce log level for custom section names"), log level for messages for libbpf_attach_type_by_name() and libbpf_prog_type_by_name() was downgraded from "info" to "debug". The latter function, in particular, is used by bpftool when attempting to load programs, and this change caused bpftool to exit with no hint or error message when it fails to detect the type of the program to load (unless "-d" option was provided). To help users understand why bpftool fails to load the program, let's do a second run of the function with log level in "debug" mode in case of failure. Before: # bpftool prog load sample_ret0.o /sys/fs/bpf/sample_ret0 # echo $? 255 Or really verbose with -d flag: # bpftool -d prog load sample_ret0.o /sys/fs/bpf/sample_ret0 libbpf: loading sample_ret0.o libbpf: section(1) .strtab, size 134, link 0, flags 0, type=3 libbpf: skip section(1) .strtab libbpf: section(2) .text, size 16, link 0, flags 6, type=1 libbpf: found program .text libbpf: section(3) .debug_abbrev, size 55, link 0, flags 0, type=1 libbpf: skip section(3) .debug_abbrev libbpf: section(4) .debug_info, size 75, link 0, flags 0, type=1 libbpf: skip section(4) .debug_info libbpf: section(5) .rel.debug_info, size 32, link 14, flags 0, type=9 libbpf: skip relo .rel.debug_info(5) for section(4) libbpf: section(6) .debug_str, size 150, link 0, flags 30, type=1 libbpf: skip section(6) .debug_str libbpf: section(7) .BTF, size 155, link 0, flags 0, type=1 libbpf: section(8) .BTF.ext, size 80, link 0, flags 0, type=1 libbpf: section(9) .rel.BTF.ext, size 32, link 14, flags 0, type=9 libbpf: skip relo .rel.BTF.ext(9) for section(8) libbpf: section(10) .debug_frame, size 40, link 0, flags 0, type=1 libbpf: skip section(10) .debug_frame libbpf: section(11) .rel.debug_frame, size 16, link 14, flags 0, type=9 libbpf: skip relo .rel.debug_frame(11) for section(10) libbpf: section(12) .debug_line, size 74, link 0, flags 0, type=1 libbpf: skip section(12) .debug_line libbpf: section(13) .rel.debug_line, size 16, link 14, flags 0, type=9 libbpf: skip relo .rel.debug_line(13) for section(12) libbpf: section(14) .symtab, size 96, link 1, flags 0, type=2 libbpf: looking for externs among 4 symbols... libbpf: collected 0 externs total libbpf: failed to guess program type from ELF section '.text' libbpf: supported section(type) names are: socket sk_reuseport kprobe/ [...] After: # bpftool prog load sample_ret0.o /sys/fs/bpf/sample_ret0 libbpf: failed to guess program type from ELF section '.text' libbpf: supported section(type) names are: socket sk_reuseport kprobe/ [...] Signed-off-by: Quentin Monnet <quentin@isovalent.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: John Fastabend <john.fastabend@gmail.com> Link: https://lore.kernel.org/bpf/20200311021205.9755-1-quentin@isovalent.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2020-03-11 10:12:05 +08:00
int __printf(2, 0)
print_all_levels(__maybe_unused enum libbpf_print_level level,
const char *format, va_list args)
{
return vfprintf(stderr, format, args);
}
static int prog_fd_by_nametag(void *nametag, int **fds, bool tag)
{
char prog_name[MAX_PROG_FULL_NAME];
unsigned int id = 0;
int fd, nb_fds = 0;
void *tmp;
int err;
while (true) {
struct bpf_prog_info info = {};
__u32 len = sizeof(info);
err = bpf_prog_get_next_id(id, &id);
if (err) {
if (errno != ENOENT) {
p_err("%s", strerror(errno));
goto err_close_fds;
}
return nb_fds;
}
fd = bpf_prog_get_fd_by_id(id);
if (fd < 0) {
p_err("can't get prog by id (%u): %s",
id, strerror(errno));
goto err_close_fds;
}
err = bpf_prog_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get prog info (%u): %s",
id, strerror(errno));
goto err_close_fd;
}
if (tag && memcmp(nametag, info.tag, BPF_TAG_SIZE)) {
close(fd);
continue;
}
if (!tag) {
get_prog_full_name(&info, fd, prog_name,
sizeof(prog_name));
if (strncmp(nametag, prog_name, sizeof(prog_name))) {
close(fd);
continue;
}
}
if (nb_fds > 0) {
tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
if (!tmp) {
p_err("failed to realloc");
goto err_close_fd;
}
*fds = tmp;
}
(*fds)[nb_fds++] = fd;
}
err_close_fd:
close(fd);
err_close_fds:
while (--nb_fds >= 0)
close((*fds)[nb_fds]);
return -1;
}
int prog_parse_fds(int *argc, char ***argv, int **fds)
{
if (is_prefix(**argv, "id")) {
unsigned int id;
char *endptr;
NEXT_ARGP();
id = strtoul(**argv, &endptr, 0);
if (*endptr) {
p_err("can't parse %s as ID", **argv);
return -1;
}
NEXT_ARGP();
(*fds)[0] = bpf_prog_get_fd_by_id(id);
if ((*fds)[0] < 0) {
p_err("get by id (%u): %s", id, strerror(errno));
return -1;
}
return 1;
} else if (is_prefix(**argv, "tag")) {
unsigned char tag[BPF_TAG_SIZE];
NEXT_ARGP();
if (sscanf(**argv, BPF_TAG_FMT, tag, tag + 1, tag + 2,
tag + 3, tag + 4, tag + 5, tag + 6, tag + 7)
!= BPF_TAG_SIZE) {
p_err("can't parse tag");
return -1;
}
NEXT_ARGP();
return prog_fd_by_nametag(tag, fds, true);
} else if (is_prefix(**argv, "name")) {
char *name;
NEXT_ARGP();
name = **argv;
if (strlen(name) > MAX_PROG_FULL_NAME - 1) {
p_err("can't parse name");
return -1;
}
NEXT_ARGP();
return prog_fd_by_nametag(name, fds, false);
} else if (is_prefix(**argv, "pinned")) {
char *path;
NEXT_ARGP();
path = **argv;
NEXT_ARGP();
(*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_PROG);
if ((*fds)[0] < 0)
return -1;
return 1;
}
p_err("expected 'id', 'tag', 'name' or 'pinned', got: '%s'?", **argv);
return -1;
}
int prog_parse_fd(int *argc, char ***argv)
{
int *fds = NULL;
int nb_fds, fd;
fds = malloc(sizeof(int));
if (!fds) {
p_err("mem alloc failed");
return -1;
}
nb_fds = prog_parse_fds(argc, argv, &fds);
if (nb_fds != 1) {
if (nb_fds > 1) {
p_err("several programs match this handle");
while (nb_fds--)
close(fds[nb_fds]);
}
fd = -1;
goto exit_free;
}
fd = fds[0];
exit_free:
free(fds);
return fd;
}
static int map_fd_by_name(char *name, int **fds)
{
unsigned int id = 0;
int fd, nb_fds = 0;
void *tmp;
int err;
while (true) {
struct bpf_map_info info = {};
__u32 len = sizeof(info);
err = bpf_map_get_next_id(id, &id);
if (err) {
if (errno != ENOENT) {
p_err("%s", strerror(errno));
goto err_close_fds;
}
return nb_fds;
}
fd = bpf_map_get_fd_by_id(id);
if (fd < 0) {
p_err("can't get map by id (%u): %s",
id, strerror(errno));
goto err_close_fds;
}
err = bpf_map_get_info_by_fd(fd, &info, &len);
if (err) {
p_err("can't get map info (%u): %s",
id, strerror(errno));
goto err_close_fd;
}
if (strncmp(name, info.name, BPF_OBJ_NAME_LEN)) {
close(fd);
continue;
}
if (nb_fds > 0) {
tmp = realloc(*fds, (nb_fds + 1) * sizeof(int));
if (!tmp) {
p_err("failed to realloc");
goto err_close_fd;
}
*fds = tmp;
}
(*fds)[nb_fds++] = fd;
}
err_close_fd:
close(fd);
err_close_fds:
while (--nb_fds >= 0)
close((*fds)[nb_fds]);
return -1;
}
int map_parse_fds(int *argc, char ***argv, int **fds)
{
if (is_prefix(**argv, "id")) {
unsigned int id;
char *endptr;
NEXT_ARGP();
id = strtoul(**argv, &endptr, 0);
if (*endptr) {
p_err("can't parse %s as ID", **argv);
return -1;
}
NEXT_ARGP();
(*fds)[0] = bpf_map_get_fd_by_id(id);
if ((*fds)[0] < 0) {
p_err("get map by id (%u): %s", id, strerror(errno));
return -1;
}
return 1;
} else if (is_prefix(**argv, "name")) {
char *name;
NEXT_ARGP();
name = **argv;
if (strlen(name) > BPF_OBJ_NAME_LEN - 1) {
p_err("can't parse name");
return -1;
}
NEXT_ARGP();
return map_fd_by_name(name, fds);
} else if (is_prefix(**argv, "pinned")) {
char *path;
NEXT_ARGP();
path = **argv;
NEXT_ARGP();
(*fds)[0] = open_obj_pinned_any(path, BPF_OBJ_MAP);
if ((*fds)[0] < 0)
return -1;
return 1;
}
p_err("expected 'id', 'name' or 'pinned', got: '%s'?", **argv);
return -1;
}
int map_parse_fd(int *argc, char ***argv)
{
int *fds = NULL;
int nb_fds, fd;
fds = malloc(sizeof(int));
if (!fds) {
p_err("mem alloc failed");
return -1;
}
nb_fds = map_parse_fds(argc, argv, &fds);
if (nb_fds != 1) {
if (nb_fds > 1) {
p_err("several maps match this handle");
while (nb_fds--)
close(fds[nb_fds]);
}
fd = -1;
goto exit_free;
}
fd = fds[0];
exit_free:
free(fds);
return fd;
}
int map_parse_fd_and_info(int *argc, char ***argv, struct bpf_map_info *info,
__u32 *info_len)
{
int err;
int fd;
fd = map_parse_fd(argc, argv);
if (fd < 0)
return -1;
err = bpf_map_get_info_by_fd(fd, info, info_len);
if (err) {
p_err("can't get map info: %s", strerror(errno));
close(fd);
return err;
}
return fd;
}
libbpf: Hashmap interface update to allow both long and void* keys/values An update for libbpf's hashmap interface from void* -> void* to a polymorphic one, allowing both long and void* keys and values. This simplifies many use cases in libbpf as hashmaps there are mostly integer to integer. Perf copies hashmap implementation from libbpf and has to be updated as well. Changes to libbpf, selftests/bpf and perf are packed as a single commit to avoid compilation issues with any future bisect. Polymorphic interface is acheived by hiding hashmap interface functions behind auxiliary macros that take care of necessary type casts, for example: #define hashmap_cast_ptr(p) \ ({ \ _Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\ #p " pointee should be a long-sized integer or a pointer"); \ (long *)(p); \ }) bool hashmap_find(const struct hashmap *map, long key, long *value); #define hashmap__find(map, key, value) \ hashmap_find((map), (long)(key), hashmap_cast_ptr(value)) - hashmap__find macro casts key and value parameters to long and long* respectively - hashmap_cast_ptr ensures that value pointer points to a memory of appropriate size. This hack was suggested by Andrii Nakryiko in [1]. This is a follow up for [2]. [1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/ [2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 22:26:09 +08:00
size_t hash_fn_for_key_as_id(long key, void *ctx)
{
libbpf: Hashmap interface update to allow both long and void* keys/values An update for libbpf's hashmap interface from void* -> void* to a polymorphic one, allowing both long and void* keys and values. This simplifies many use cases in libbpf as hashmaps there are mostly integer to integer. Perf copies hashmap implementation from libbpf and has to be updated as well. Changes to libbpf, selftests/bpf and perf are packed as a single commit to avoid compilation issues with any future bisect. Polymorphic interface is acheived by hiding hashmap interface functions behind auxiliary macros that take care of necessary type casts, for example: #define hashmap_cast_ptr(p) \ ({ \ _Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\ #p " pointee should be a long-sized integer or a pointer"); \ (long *)(p); \ }) bool hashmap_find(const struct hashmap *map, long key, long *value); #define hashmap__find(map, key, value) \ hashmap_find((map), (long)(key), hashmap_cast_ptr(value)) - hashmap__find macro casts key and value parameters to long and long* respectively - hashmap_cast_ptr ensures that value pointer points to a memory of appropriate size. This hack was suggested by Andrii Nakryiko in [1]. This is a follow up for [2]. [1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/ [2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 22:26:09 +08:00
return key;
}
libbpf: Hashmap interface update to allow both long and void* keys/values An update for libbpf's hashmap interface from void* -> void* to a polymorphic one, allowing both long and void* keys and values. This simplifies many use cases in libbpf as hashmaps there are mostly integer to integer. Perf copies hashmap implementation from libbpf and has to be updated as well. Changes to libbpf, selftests/bpf and perf are packed as a single commit to avoid compilation issues with any future bisect. Polymorphic interface is acheived by hiding hashmap interface functions behind auxiliary macros that take care of necessary type casts, for example: #define hashmap_cast_ptr(p) \ ({ \ _Static_assert((p) == NULL || sizeof(*(p)) == sizeof(long),\ #p " pointee should be a long-sized integer or a pointer"); \ (long *)(p); \ }) bool hashmap_find(const struct hashmap *map, long key, long *value); #define hashmap__find(map, key, value) \ hashmap_find((map), (long)(key), hashmap_cast_ptr(value)) - hashmap__find macro casts key and value parameters to long and long* respectively - hashmap_cast_ptr ensures that value pointer points to a memory of appropriate size. This hack was suggested by Andrii Nakryiko in [1]. This is a follow up for [2]. [1] https://lore.kernel.org/bpf/CAEf4BzZ8KFneEJxFAaNCCFPGqp20hSpS2aCj76uRk3-qZUH5xg@mail.gmail.com/ [2] https://lore.kernel.org/bpf/af1facf9-7bc8-8a3d-0db4-7b3f333589a2@meta.com/T/#m65b28f1d6d969fcd318b556db6a3ad499a42607d Signed-off-by: Eduard Zingerman <eddyz87@gmail.com> Signed-off-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20221109142611.879983-2-eddyz87@gmail.com
2022-11-09 22:26:09 +08:00
bool equal_fn_for_key_as_id(long k1, long k2, void *ctx)
{
return k1 == k2;
}
const char *bpf_attach_type_input_str(enum bpf_attach_type t)
{
switch (t) {
case BPF_CGROUP_INET_INGRESS: return "ingress";
case BPF_CGROUP_INET_EGRESS: return "egress";
case BPF_CGROUP_INET_SOCK_CREATE: return "sock_create";
case BPF_CGROUP_INET_SOCK_RELEASE: return "sock_release";
case BPF_CGROUP_SOCK_OPS: return "sock_ops";
case BPF_CGROUP_DEVICE: return "device";
case BPF_CGROUP_INET4_BIND: return "bind4";
case BPF_CGROUP_INET6_BIND: return "bind6";
case BPF_CGROUP_INET4_CONNECT: return "connect4";
case BPF_CGROUP_INET6_CONNECT: return "connect6";
case BPF_CGROUP_INET4_POST_BIND: return "post_bind4";
case BPF_CGROUP_INET6_POST_BIND: return "post_bind6";
case BPF_CGROUP_INET4_GETPEERNAME: return "getpeername4";
case BPF_CGROUP_INET6_GETPEERNAME: return "getpeername6";
case BPF_CGROUP_INET4_GETSOCKNAME: return "getsockname4";
case BPF_CGROUP_INET6_GETSOCKNAME: return "getsockname6";
case BPF_CGROUP_UDP4_SENDMSG: return "sendmsg4";
case BPF_CGROUP_UDP6_SENDMSG: return "sendmsg6";
case BPF_CGROUP_SYSCTL: return "sysctl";
case BPF_CGROUP_UDP4_RECVMSG: return "recvmsg4";
case BPF_CGROUP_UDP6_RECVMSG: return "recvmsg6";
case BPF_CGROUP_GETSOCKOPT: return "getsockopt";
case BPF_CGROUP_SETSOCKOPT: return "setsockopt";
case BPF_TRACE_RAW_TP: return "raw_tp";
case BPF_TRACE_FENTRY: return "fentry";
case BPF_TRACE_FEXIT: return "fexit";
case BPF_MODIFY_RETURN: return "mod_ret";
case BPF_SK_REUSEPORT_SELECT: return "sk_skb_reuseport_select";
case BPF_SK_REUSEPORT_SELECT_OR_MIGRATE: return "sk_skb_reuseport_select_or_migrate";
default: return libbpf_bpf_attach_type_str(t);
}
}
int pathname_concat(char *buf, int buf_sz, const char *path,
const char *name)
{
int len;
len = snprintf(buf, buf_sz, "%s/%s", path, name);
if (len < 0)
return -EINVAL;
if (len >= buf_sz)
return -ENAMETOOLONG;
return 0;
}