mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
Tracing fixes for 6.1-rc3:
- Fixed NULL pointer dereference in the ring buffer wait-waiters code for machines that have less CPUs than what nr_cpu_ids returns. The buffer array is of size nr_cpu_ids, but only the online CPUs get initialized. - Fixed use after free call in ftrace_shutdown. - Fix accounting of if a kprobe is enabled - Fix NULL pointer dereference on error path of fprobe rethook_alloc(). - Fix unregistering of fprobe_kprobe_handler - Fix memory leak in kprobe test module -----BEGIN PGP SIGNATURE----- iIoEABYIADIWIQRRSw7ePDh/lE+zeZMp5XQQmuv6qgUCY2bPChQccm9zdGVkdEBn b29kbWlzLm9yZwAKCRAp5XQQmuv6qrOzAP95LEYzhi0pbxtuDHBv+HOTALi8Lttk 4FOcdrSj7tXn5wD/ZtNbOhq3OxTonPrIkZTBqpOohElIoXRSlt+Og68QCQE= =4DN2 -----END PGP SIGNATURE----- Merge tag 'trace-v6.1-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace Pull `lTracing fixes for 6.1-rc3: - Fixed NULL pointer dereference in the ring buffer wait-waiters code for machines that have less CPUs than what nr_cpu_ids returns. The buffer array is of size nr_cpu_ids, but only the online CPUs get initialized. - Fixed use after free call in ftrace_shutdown. - Fix accounting of if a kprobe is enabled - Fix NULL pointer dereference on error path of fprobe rethook_alloc(). - Fix unregistering of fprobe_kprobe_handler - Fix memory leak in kprobe test module * tag 'trace-v6.1-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace: tracing: kprobe: Fix memory leak in test_gen_kprobe/kretprobe_cmd() tracing/fprobe: Fix to check whether fprobe is registered correctly fprobe: Check rethook_alloc() return in rethook initialization kprobe: reverse kp->flags when arm_kprobe failed ftrace: Fix use-after-free for dynamic ftrace_ops ring-buffer: Check for NULL cpu_buffer in ring_buffer_wake_waiters()
This commit is contained in:
commit
8391aa4b4c
@ -2429,8 +2429,11 @@ int enable_kprobe(struct kprobe *kp)
|
||||
if (!kprobes_all_disarmed && kprobe_disabled(p)) {
|
||||
p->flags &= ~KPROBE_FLAG_DISABLED;
|
||||
ret = arm_kprobe(p);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
p->flags |= KPROBE_FLAG_DISABLED;
|
||||
if (p != kp)
|
||||
kp->flags |= KPROBE_FLAG_DISABLED;
|
||||
}
|
||||
}
|
||||
out:
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
|
@ -141,6 +141,8 @@ static int fprobe_init_rethook(struct fprobe *fp, int num)
|
||||
return -E2BIG;
|
||||
|
||||
fp->rethook = rethook_alloc((void *)fp, fprobe_exit_handler);
|
||||
if (!fp->rethook)
|
||||
return -ENOMEM;
|
||||
for (i = 0; i < size; i++) {
|
||||
struct fprobe_rethook_node *node;
|
||||
|
||||
@ -301,7 +303,8 @@ int unregister_fprobe(struct fprobe *fp)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!fp || fp->ops.func != fprobe_handler)
|
||||
if (!fp || (fp->ops.saved_func != fprobe_handler &&
|
||||
fp->ops.saved_func != fprobe_kprobe_handler))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
|
@ -3028,18 +3028,8 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
|
||||
command |= FTRACE_UPDATE_TRACE_FUNC;
|
||||
}
|
||||
|
||||
if (!command || !ftrace_enabled) {
|
||||
/*
|
||||
* If these are dynamic or per_cpu ops, they still
|
||||
* need their data freed. Since, function tracing is
|
||||
* not currently active, we can just free them
|
||||
* without synchronizing all CPUs.
|
||||
*/
|
||||
if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
|
||||
goto free_ops;
|
||||
|
||||
return 0;
|
||||
}
|
||||
if (!command || !ftrace_enabled)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* If the ops uses a trampoline, then it needs to be
|
||||
@ -3076,6 +3066,7 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
|
||||
removed_ops = NULL;
|
||||
ops->flags &= ~FTRACE_OPS_FL_REMOVING;
|
||||
|
||||
out:
|
||||
/*
|
||||
* Dynamic ops may be freed, we must make sure that all
|
||||
* callers are done before leaving this function.
|
||||
@ -3103,7 +3094,6 @@ int ftrace_shutdown(struct ftrace_ops *ops, int command)
|
||||
if (IS_ENABLED(CONFIG_PREEMPTION))
|
||||
synchronize_rcu_tasks();
|
||||
|
||||
free_ops:
|
||||
ftrace_trampoline_free(ops);
|
||||
}
|
||||
|
||||
|
@ -100,20 +100,20 @@ static int __init test_gen_kprobe_cmd(void)
|
||||
KPROBE_GEN_TEST_FUNC,
|
||||
KPROBE_GEN_TEST_ARG0, KPROBE_GEN_TEST_ARG1);
|
||||
if (ret)
|
||||
goto free;
|
||||
goto out;
|
||||
|
||||
/* Use kprobe_event_add_fields to add the rest of the fields */
|
||||
|
||||
ret = kprobe_event_add_fields(&cmd, KPROBE_GEN_TEST_ARG2, KPROBE_GEN_TEST_ARG3);
|
||||
if (ret)
|
||||
goto free;
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* This actually creates the event.
|
||||
*/
|
||||
ret = kprobe_event_gen_cmd_end(&cmd);
|
||||
if (ret)
|
||||
goto free;
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Now get the gen_kprobe_test event file. We need to prevent
|
||||
@ -136,13 +136,11 @@ static int __init test_gen_kprobe_cmd(void)
|
||||
goto delete;
|
||||
}
|
||||
out:
|
||||
kfree(buf);
|
||||
return ret;
|
||||
delete:
|
||||
/* We got an error after creating the event, delete it */
|
||||
ret = kprobe_event_delete("gen_kprobe_test");
|
||||
free:
|
||||
kfree(buf);
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -170,14 +168,14 @@ static int __init test_gen_kretprobe_cmd(void)
|
||||
KPROBE_GEN_TEST_FUNC,
|
||||
"$retval");
|
||||
if (ret)
|
||||
goto free;
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* This actually creates the event.
|
||||
*/
|
||||
ret = kretprobe_event_gen_cmd_end(&cmd);
|
||||
if (ret)
|
||||
goto free;
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Now get the gen_kretprobe_test event file. We need to
|
||||
@ -201,13 +199,11 @@ static int __init test_gen_kretprobe_cmd(void)
|
||||
goto delete;
|
||||
}
|
||||
out:
|
||||
kfree(buf);
|
||||
return ret;
|
||||
delete:
|
||||
/* We got an error after creating the event, delete it */
|
||||
ret = kprobe_event_delete("gen_kretprobe_test");
|
||||
free:
|
||||
kfree(buf);
|
||||
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -937,6 +937,9 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
|
||||
struct ring_buffer_per_cpu *cpu_buffer;
|
||||
struct rb_irq_work *rbwork;
|
||||
|
||||
if (!buffer)
|
||||
return;
|
||||
|
||||
if (cpu == RING_BUFFER_ALL_CPUS) {
|
||||
|
||||
/* Wake up individual ones too. One level recursion */
|
||||
@ -945,7 +948,15 @@ void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu)
|
||||
|
||||
rbwork = &buffer->irq_work;
|
||||
} else {
|
||||
if (WARN_ON_ONCE(!buffer->buffers))
|
||||
return;
|
||||
if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
|
||||
return;
|
||||
|
||||
cpu_buffer = buffer->buffers[cpu];
|
||||
/* The CPU buffer may not have been initialized yet */
|
||||
if (!cpu_buffer)
|
||||
return;
|
||||
rbwork = &cpu_buffer->irq_work;
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user