mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-13 14:24:11 +08:00
bpf: Add support for bpf_get_func_ip helper for uprobe program
Adding support for bpf_get_func_ip helper for uprobe program to return probed address for both uprobe and return uprobe. We discussed this in [1] and agreed that uprobe can have special use of bpf_get_func_ip helper that differs from kprobe. The kprobe bpf_get_func_ip returns: - address of the function if probe is attach on function entry for both kprobe and return kprobe - 0 if the probe is not attach on function entry The uprobe bpf_get_func_ip returns: - address of the probe for both uprobe and return uprobe The reason for this semantic change is that kernel can't really tell if the probe user space address is function entry. The uprobe program is actually kprobe type program attached as uprobe. One of the consequences of this design is that uprobes do not have its own set of helpers, but share them with kprobes. As we need different functionality for bpf_get_func_ip helper for uprobe, I'm adding the bool value to the bpf_trace_run_ctx, so the helper can detect that it's executed in uprobe context and call specific code. The is_uprobe bool is set as true in bpf_prog_run_array_sleepable, which is currently used only for executing bpf programs in uprobe. Renaming bpf_prog_run_array_sleepable to bpf_prog_run_array_uprobe to address that it's only used for uprobes and that it sets the run_ctx.is_uprobe as suggested by Yafang Shao. Suggested-by: Andrii Nakryiko <andrii@kernel.org> Tested-by: Alan Maguire <alan.maguire@oracle.com> [1] https://lore.kernel.org/bpf/CAEf4BzZ=xLVkG5eurEuvLU79wAMtwho7ReR+XJAgwhFF4M-7Cg@mail.gmail.com/ Signed-off-by: Jiri Olsa <jolsa@kernel.org> Tested-by: Viktor Malik <vmalik@redhat.com> Acked-by: Yonghong Song <yonghong.song@linux.dev> Link: https://lore.kernel.org/r/20230807085956.2344866-2-jolsa@kernel.org Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
This commit is contained in:
parent
a5c0a42bd3
commit
a3c485a5d8
@ -1819,6 +1819,7 @@ struct bpf_cg_run_ctx {
|
||||
struct bpf_trace_run_ctx {
|
||||
struct bpf_run_ctx run_ctx;
|
||||
u64 bpf_cookie;
|
||||
bool is_uprobe;
|
||||
};
|
||||
|
||||
struct bpf_tramp_run_ctx {
|
||||
@ -1867,6 +1868,8 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
|
||||
if (unlikely(!array))
|
||||
return ret;
|
||||
|
||||
run_ctx.is_uprobe = false;
|
||||
|
||||
migrate_disable();
|
||||
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
|
||||
item = &array->items[0];
|
||||
@ -1891,8 +1894,8 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
|
||||
* rcu-protected dynamically sized maps.
|
||||
*/
|
||||
static __always_inline u32
|
||||
bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu,
|
||||
const void *ctx, bpf_prog_run_fn run_prog)
|
||||
bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
|
||||
const void *ctx, bpf_prog_run_fn run_prog)
|
||||
{
|
||||
const struct bpf_prog_array_item *item;
|
||||
const struct bpf_prog *prog;
|
||||
@ -1906,6 +1909,8 @@ bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu,
|
||||
rcu_read_lock_trace();
|
||||
migrate_disable();
|
||||
|
||||
run_ctx.is_uprobe = true;
|
||||
|
||||
array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held());
|
||||
if (unlikely(!array))
|
||||
goto out;
|
||||
|
@ -5086,9 +5086,14 @@ union bpf_attr {
|
||||
* u64 bpf_get_func_ip(void *ctx)
|
||||
* Description
|
||||
* Get address of the traced function (for tracing and kprobe programs).
|
||||
*
|
||||
* When called for kprobe program attached as uprobe it returns
|
||||
* probe address for both entry and return uprobe.
|
||||
*
|
||||
* Return
|
||||
* Address of the traced function.
|
||||
* Address of the traced function for kprobe.
|
||||
* 0 for kprobes placed within the function (not at the entry).
|
||||
* Address of the probe for uprobe and return uprobe.
|
||||
*
|
||||
* u64 bpf_get_attach_cookie(void *ctx)
|
||||
* Description
|
||||
|
@ -1055,7 +1055,16 @@ static unsigned long get_entry_ip(unsigned long fentry_ip)
|
||||
|
||||
BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
|
||||
{
|
||||
struct kprobe *kp = kprobe_running();
|
||||
struct bpf_trace_run_ctx *run_ctx __maybe_unused;
|
||||
struct kprobe *kp;
|
||||
|
||||
#ifdef CONFIG_UPROBES
|
||||
run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
|
||||
if (run_ctx->is_uprobe)
|
||||
return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr;
|
||||
#endif
|
||||
|
||||
kp = kprobe_running();
|
||||
|
||||
if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
|
||||
return 0;
|
||||
|
@ -519,3 +519,8 @@ void __trace_probe_log_err(int offset, int err);
|
||||
|
||||
#define trace_probe_log_err(offs, err) \
|
||||
__trace_probe_log_err(offs, TP_ERR_##err)
|
||||
|
||||
struct uprobe_dispatch_data {
|
||||
struct trace_uprobe *tu;
|
||||
unsigned long bp_addr;
|
||||
};
|
||||
|
@ -88,11 +88,6 @@ static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
|
||||
static int register_uprobe_event(struct trace_uprobe *tu);
|
||||
static int unregister_uprobe_event(struct trace_uprobe *tu);
|
||||
|
||||
struct uprobe_dispatch_data {
|
||||
struct trace_uprobe *tu;
|
||||
unsigned long bp_addr;
|
||||
};
|
||||
|
||||
static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
|
||||
static int uretprobe_dispatcher(struct uprobe_consumer *con,
|
||||
unsigned long func, struct pt_regs *regs);
|
||||
@ -1352,7 +1347,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
|
||||
if (bpf_prog_array_valid(call)) {
|
||||
u32 ret;
|
||||
|
||||
ret = bpf_prog_run_array_sleepable(call->prog_array, regs, bpf_prog_run);
|
||||
ret = bpf_prog_run_array_uprobe(call->prog_array, regs, bpf_prog_run);
|
||||
if (!ret)
|
||||
return;
|
||||
}
|
||||
|
@ -5086,9 +5086,14 @@ union bpf_attr {
|
||||
* u64 bpf_get_func_ip(void *ctx)
|
||||
* Description
|
||||
* Get address of the traced function (for tracing and kprobe programs).
|
||||
*
|
||||
* When called for kprobe program attached as uprobe it returns
|
||||
* probe address for both entry and return uprobe.
|
||||
*
|
||||
* Return
|
||||
* Address of the traced function.
|
||||
* Address of the traced function for kprobe.
|
||||
* 0 for kprobes placed within the function (not at the entry).
|
||||
* Address of the probe for uprobe and return uprobe.
|
||||
*
|
||||
* u64 bpf_get_attach_cookie(void *ctx)
|
||||
* Description
|
||||
|
Loading…
Reference in New Issue
Block a user