mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-16 08:44:21 +08:00
516f4d3397
Certain programs may wish to be able to query cpumasks. For example, if a program that is tracing percpu operations wishes to track which tasks end up running on which CPUs, it could be useful to associate that with the tasks' cpumasks. Similarly, programs tracking NUMA allocations, CPU scheduling domains, etc, could potentially benefit from being able to see which CPUs a task could be migrated to. This patch enables these types of use cases by introducing a series of bpf_cpumask_* kfuncs. Amongst these kfuncs, there are two separate "classes" of operations: 1. kfuncs which allow the caller to allocate and mutate their own cpumask kptrs in the form of a struct bpf_cpumask * object. Such kfuncs include e.g. bpf_cpumask_create() to allocate the cpumask, and bpf_cpumask_or() to mutate it. "Regular" cpumasks such as p->cpus_ptr may not be passed to these kfuncs, and the verifier will ensure this is the case by comparing BTF IDs. 2. Read-only operations which operate on const struct cpumask * arguments. For example, bpf_cpumask_test_cpu(), which tests whether a CPU is set in the cpumask. Any trusted struct cpumask * or struct bpf_cpumask * may be passed to these kfuncs. The verifier allows struct bpf_cpumask * even though the kfunc is defined with struct cpumask * because the first element of a struct bpf_cpumask is a cpumask_t, so it is safe to cast. A follow-on patch will add selftests which validate these kfuncs, and another will document them. Signed-off-by: David Vernet <void@manifault.com> Link: https://lore.kernel.org/r/20230125143816.721952-3-void@manifault.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
47 lines
1.8 KiB
Makefile
47 lines
1.8 KiB
Makefile
# SPDX-License-Identifier: GPL-2.0
|
|
obj-y := core.o
|
|
ifneq ($(CONFIG_BPF_JIT_ALWAYS_ON),y)
|
|
# ___bpf_prog_run() needs GCSE disabled on x86; see 3193c0836f203 for details
|
|
cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse
|
|
endif
|
|
CFLAGS_core.o += $(call cc-disable-warning, override-init) $(cflags-nogcse-yy)
|
|
|
|
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o bpf_iter.o map_iter.o task_iter.o prog_iter.o link_iter.o
|
|
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o bloom_filter.o
|
|
obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o
|
|
obj-$(CONFIG_BPF_SYSCALL) += bpf_local_storage.o bpf_task_storage.o
|
|
obj-${CONFIG_BPF_LSM} += bpf_inode_storage.o
|
|
obj-$(CONFIG_BPF_SYSCALL) += disasm.o
|
|
obj-$(CONFIG_BPF_JIT) += trampoline.o
|
|
obj-$(CONFIG_BPF_SYSCALL) += btf.o memalloc.o
|
|
obj-$(CONFIG_BPF_JIT) += dispatcher.o
|
|
ifeq ($(CONFIG_NET),y)
|
|
obj-$(CONFIG_BPF_SYSCALL) += devmap.o
|
|
obj-$(CONFIG_BPF_SYSCALL) += cpumap.o
|
|
obj-$(CONFIG_BPF_SYSCALL) += offload.o
|
|
obj-$(CONFIG_BPF_SYSCALL) += net_namespace.o
|
|
endif
|
|
ifeq ($(CONFIG_PERF_EVENTS),y)
|
|
obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
|
|
endif
|
|
ifeq ($(CONFIG_CGROUPS),y)
|
|
obj-$(CONFIG_BPF_SYSCALL) += cgroup_iter.o bpf_cgrp_storage.o
|
|
endif
|
|
obj-$(CONFIG_CGROUP_BPF) += cgroup.o
|
|
ifeq ($(CONFIG_INET),y)
|
|
obj-$(CONFIG_BPF_SYSCALL) += reuseport_array.o
|
|
endif
|
|
ifeq ($(CONFIG_SYSFS),y)
|
|
obj-$(CONFIG_DEBUG_INFO_BTF) += sysfs_btf.o
|
|
endif
|
|
ifeq ($(CONFIG_BPF_JIT),y)
|
|
obj-$(CONFIG_BPF_SYSCALL) += bpf_struct_ops.o
|
|
obj-$(CONFIG_BPF_SYSCALL) += cpumask.o
|
|
obj-${CONFIG_BPF_LSM} += bpf_lsm.o
|
|
endif
|
|
obj-$(CONFIG_BPF_PRELOAD) += preload/
|
|
|
|
obj-$(CONFIG_BPF_SYSCALL) += relo_core.o
|
|
$(obj)/relo_core.o: $(srctree)/tools/lib/bpf/relo_core.c FORCE
|
|
$(call if_changed_rule,cc_o_c)
|