perf/uprobe: split uprobe_unregister()

With uprobe_unregister() having grown a synchronize_srcu(), it becomes
fairly slow to call. Esp. since both users of this API call it in a
loop.

Peel off the sync_srcu() and do it once, after the loop.

We also need to add uprobe_unregister_sync() into uprobe_register()'s
error handling path, as we need to be careful about returning to the
caller before we have a guarantee that partially attached consumer won't
be called anymore. This is an unlikely slow path and this should be
totally fine to be slow in the case of a failed attach.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: "Peter Zijlstra (Intel)" <peterz@infradead.org>
Co-developed-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Oleg Nesterov <oleg@redhat.com>
Link: https://lore.kernel.org/r/20240903174603.3554182-6-andrii@kernel.org
This commit is contained in:
Peter Zijlstra 2024-09-03 10:46:00 -07:00
parent cc01bd044e
commit 04b01625da
5 changed files with 32 additions and 11 deletions

View File

@ -115,7 +115,8 @@ extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
extern struct uprobe *uprobe_register(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc);
extern int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool);
extern void uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc);
extern void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc);
extern void uprobe_unregister_sync(void);
extern int uprobe_mmap(struct vm_area_struct *vma);
extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end);
extern void uprobe_start_dup_mmap(void);
@ -164,7 +165,10 @@ uprobe_apply(struct uprobe* uprobe, struct uprobe_consumer *uc, bool add)
return -ENOSYS;
}
static inline void
uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc)
{
}
static inline void uprobe_unregister_sync(void)
{
}
static inline int uprobe_mmap(struct vm_area_struct *vma)

View File

@ -1105,11 +1105,11 @@ register_for_each_vma(struct uprobe *uprobe, struct uprobe_consumer *new)
}
/**
* uprobe_unregister - unregister an already registered probe.
* uprobe_unregister_nosync - unregister an already registered probe.
* @uprobe: uprobe to remove
* @uc: identify which probe if multiple probes are colocated.
*/
void uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc)
{
int err;
@ -1121,12 +1121,15 @@ void uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
/* TODO : cant unregister? schedule a worker thread */
if (unlikely(err)) {
uprobe_warn(current, "unregister, leaking uprobe");
goto out_sync;
return;
}
put_uprobe(uprobe);
}
EXPORT_SYMBOL_GPL(uprobe_unregister_nosync);
out_sync:
void uprobe_unregister_sync(void)
{
/*
* Now that handler_chain() and handle_uretprobe_chain() iterate over
* uprobe->consumers list under RCU protection without holding
@ -1138,7 +1141,7 @@ out_sync:
*/
synchronize_srcu(&uprobes_srcu);
}
EXPORT_SYMBOL_GPL(uprobe_unregister);
EXPORT_SYMBOL_GPL(uprobe_unregister_sync);
/**
* uprobe_register - register a probe
@ -1196,7 +1199,13 @@ struct uprobe *uprobe_register(struct inode *inode,
up_write(&uprobe->register_rwsem);
if (ret) {
uprobe_unregister(uprobe, uc);
uprobe_unregister_nosync(uprobe, uc);
/*
* Registration might have partially succeeded, so we can have
* this consumer being called right at this time. We need to
* sync here. It's ok, it's unlikely slow path.
*/
uprobe_unregister_sync();
return ERR_PTR(ret);
}

View File

@ -3184,7 +3184,10 @@ static void bpf_uprobe_unregister(struct bpf_uprobe *uprobes, u32 cnt)
u32 i;
for (i = 0; i < cnt; i++)
uprobe_unregister(uprobes[i].uprobe, &uprobes[i].consumer);
uprobe_unregister_nosync(uprobes[i].uprobe, &uprobes[i].consumer);
if (cnt)
uprobe_unregister_sync();
}
static void bpf_uprobe_multi_link_release(struct bpf_link *link)

View File

@ -1097,6 +1097,7 @@ static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter)
static void __probe_event_disable(struct trace_probe *tp)
{
struct trace_uprobe *tu;
bool sync = false;
tu = container_of(tp, struct trace_uprobe, tp);
WARN_ON(!uprobe_filter_is_empty(tu->tp.event->filter));
@ -1105,9 +1106,12 @@ static void __probe_event_disable(struct trace_probe *tp)
if (!tu->uprobe)
continue;
uprobe_unregister(tu->uprobe, &tu->consumer);
uprobe_unregister_nosync(tu->uprobe, &tu->consumer);
sync = true;
tu->uprobe = NULL;
}
if (sync)
uprobe_unregister_sync();
}
static int probe_event_enable(struct trace_event_call *call,

View File

@ -475,7 +475,8 @@ static void testmod_unregister_uprobe(void)
mutex_lock(&testmod_uprobe_mutex);
if (uprobe.uprobe) {
uprobe_unregister(uprobe.uprobe, &uprobe.consumer);
uprobe_unregister_nosync(uprobe.uprobe, &uprobe.consumer);
uprobe_unregister_sync();
path_put(&uprobe.path);
uprobe.uprobe = NULL;
}