mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
perf: Enqueue SIGTRAP always via task_work.
A signal is delivered by raising irq_work() which works from any context including NMI. irq_work() can be delayed if the architecture does not provide an interrupt vector. In order not to lose a signal, the signal is injected via task_work during event_sched_out(). Instead going via irq_work, the signal could be added directly via task_work. The signal is sent to current and can be enqueued on its return path to userland. Queue signal via task_work and consider possible NMI context. Remove perf_event::pending_sigtrap and and use perf_event::pending_work instead. Reported-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Marco Elver <elver@google.com> Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> Link: https://lore.kernel.org/r/20240704170424.1466941-4-bigeasy@linutronix.de
This commit is contained in:
parent
466e4d801c
commit
c5d93d23a2
@ -781,7 +781,6 @@ struct perf_event {
|
|||||||
unsigned int pending_wakeup;
|
unsigned int pending_wakeup;
|
||||||
unsigned int pending_kill;
|
unsigned int pending_kill;
|
||||||
unsigned int pending_disable;
|
unsigned int pending_disable;
|
||||||
unsigned int pending_sigtrap;
|
|
||||||
unsigned long pending_addr; /* SIGTRAP */
|
unsigned long pending_addr; /* SIGTRAP */
|
||||||
struct irq_work pending_irq;
|
struct irq_work pending_irq;
|
||||||
struct callback_head pending_task;
|
struct callback_head pending_task;
|
||||||
@ -963,7 +962,7 @@ struct perf_event_context {
|
|||||||
struct rcu_head rcu_head;
|
struct rcu_head rcu_head;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Sum (event->pending_sigtrap + event->pending_work)
|
* Sum (event->pending_work + event->pending_work)
|
||||||
*
|
*
|
||||||
* The SIGTRAP is targeted at ctx->task, as such it won't do changing
|
* The SIGTRAP is targeted at ctx->task, as such it won't do changing
|
||||||
* that until the signal is delivered.
|
* that until the signal is delivered.
|
||||||
|
@ -2283,17 +2283,6 @@ event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
|
|||||||
state = PERF_EVENT_STATE_OFF;
|
state = PERF_EVENT_STATE_OFF;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (event->pending_sigtrap) {
|
|
||||||
event->pending_sigtrap = 0;
|
|
||||||
if (state != PERF_EVENT_STATE_OFF &&
|
|
||||||
!event->pending_work &&
|
|
||||||
!task_work_add(current, &event->pending_task, TWA_RESUME)) {
|
|
||||||
event->pending_work = 1;
|
|
||||||
} else {
|
|
||||||
local_dec(&event->ctx->nr_pending);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
perf_event_set_state(event, state);
|
perf_event_set_state(event, state);
|
||||||
|
|
||||||
if (!is_software_event(event))
|
if (!is_software_event(event))
|
||||||
@ -6776,11 +6765,6 @@ static void __perf_pending_irq(struct perf_event *event)
|
|||||||
* Yay, we hit home and are in the context of the event.
|
* Yay, we hit home and are in the context of the event.
|
||||||
*/
|
*/
|
||||||
if (cpu == smp_processor_id()) {
|
if (cpu == smp_processor_id()) {
|
||||||
if (event->pending_sigtrap) {
|
|
||||||
event->pending_sigtrap = 0;
|
|
||||||
perf_sigtrap(event);
|
|
||||||
local_dec(&event->ctx->nr_pending);
|
|
||||||
}
|
|
||||||
if (event->pending_disable) {
|
if (event->pending_disable) {
|
||||||
event->pending_disable = 0;
|
event->pending_disable = 0;
|
||||||
perf_event_disable_local(event);
|
perf_event_disable_local(event);
|
||||||
@ -9721,21 +9705,26 @@ static int __perf_event_overflow(struct perf_event *event,
|
|||||||
*/
|
*/
|
||||||
bool valid_sample = sample_is_allowed(event, regs);
|
bool valid_sample = sample_is_allowed(event, regs);
|
||||||
unsigned int pending_id = 1;
|
unsigned int pending_id = 1;
|
||||||
|
enum task_work_notify_mode notify_mode;
|
||||||
|
|
||||||
if (regs)
|
if (regs)
|
||||||
pending_id = hash32_ptr((void *)instruction_pointer(regs)) ?: 1;
|
pending_id = hash32_ptr((void *)instruction_pointer(regs)) ?: 1;
|
||||||
if (!event->pending_sigtrap) {
|
|
||||||
event->pending_sigtrap = pending_id;
|
notify_mode = in_nmi() ? TWA_NMI_CURRENT : TWA_RESUME;
|
||||||
|
|
||||||
|
if (!event->pending_work &&
|
||||||
|
!task_work_add(current, &event->pending_task, notify_mode)) {
|
||||||
|
event->pending_work = pending_id;
|
||||||
local_inc(&event->ctx->nr_pending);
|
local_inc(&event->ctx->nr_pending);
|
||||||
|
|
||||||
event->pending_addr = 0;
|
event->pending_addr = 0;
|
||||||
if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
|
if (valid_sample && (data->sample_flags & PERF_SAMPLE_ADDR))
|
||||||
event->pending_addr = data->addr;
|
event->pending_addr = data->addr;
|
||||||
irq_work_queue(&event->pending_irq);
|
|
||||||
} else if (event->attr.exclude_kernel && valid_sample) {
|
} else if (event->attr.exclude_kernel && valid_sample) {
|
||||||
/*
|
/*
|
||||||
* Should not be able to return to user space without
|
* Should not be able to return to user space without
|
||||||
* consuming pending_sigtrap; with exceptions:
|
* consuming pending_work; with exceptions:
|
||||||
*
|
*
|
||||||
* 1. Where !exclude_kernel, events can overflow again
|
* 1. Where !exclude_kernel, events can overflow again
|
||||||
* in the kernel without returning to user space.
|
* in the kernel without returning to user space.
|
||||||
@ -9745,7 +9734,7 @@ static int __perf_event_overflow(struct perf_event *event,
|
|||||||
* To approximate progress (with false negatives),
|
* To approximate progress (with false negatives),
|
||||||
* check 32-bit hash of the current IP.
|
* check 32-bit hash of the current IP.
|
||||||
*/
|
*/
|
||||||
WARN_ON_ONCE(event->pending_sigtrap != pending_id);
|
WARN_ON_ONCE(event->pending_work != pending_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user