mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-16 09:13:55 +08:00
perf: Fix double start/stop in x86_pmu_start()
The following patch fixes a bug introduced by the following commit:e050e3f0a7
("perf: Fix broken interrupt rate throttling") The patch caused the following warning to pop up depending on the sampling frequency adjustments: ------------[ cut here ]------------ WARNING: at arch/x86/kernel/cpu/perf_event.c:995 x86_pmu_start+0x79/0xd4() It was caused by the following call sequence: perf_adjust_freq_unthr_context.part() { stop() if (delta > 0) { perf_adjust_period() { if (period > 8*...) { stop() ... start() } } } start() } Which caused a double start and a double stop, thus triggering the assert in x86_pmu_start(). The patch fixes the problem by avoiding the double calls. We pass a new argument to perf_adjust_period() to indicate whether or not the event is already stopped. We can't just remove the start/stop from that function because it's called from __perf_event_overflow where the event needs to be reloaded via a stop/start back-toback call. The patch reintroduces the assertion in x86_pmu_start() which was removed by commit:84f2b9b
("perf: Remove deprecated WARN_ON_ONCE()") In this second version, we've added calls to disable/enable PMU during unthrottling or frequency adjustment based on bug report of spurious NMI interrupts from Eric Dumazet. Reported-and-tested-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: Stephane Eranian <eranian@google.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: markus@trippelsdorf.de Cc: paulus@samba.org Link: http://lkml.kernel.org/r/20120207133956.GA4932@quad [ Minor edits to the changelog and to the code ] Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
136e0b8eab
commit
f39d47ff81
@ -986,6 +986,9 @@ static void x86_pmu_start(struct perf_event *event, int flags)
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
int idx = event->hw.idx;
|
||||
|
||||
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
|
||||
return;
|
||||
|
||||
if (WARN_ON_ONCE(idx == -1))
|
||||
return;
|
||||
|
||||
|
@ -2303,7 +2303,7 @@ do { \
|
||||
static DEFINE_PER_CPU(int, perf_throttled_count);
|
||||
static DEFINE_PER_CPU(u64, perf_throttled_seq);
|
||||
|
||||
static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
|
||||
static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
|
||||
{
|
||||
struct hw_perf_event *hwc = &event->hw;
|
||||
s64 period, sample_period;
|
||||
@ -2322,9 +2322,13 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
|
||||
hwc->sample_period = sample_period;
|
||||
|
||||
if (local64_read(&hwc->period_left) > 8*sample_period) {
|
||||
event->pmu->stop(event, PERF_EF_UPDATE);
|
||||
if (disable)
|
||||
event->pmu->stop(event, PERF_EF_UPDATE);
|
||||
|
||||
local64_set(&hwc->period_left, 0);
|
||||
event->pmu->start(event, PERF_EF_RELOAD);
|
||||
|
||||
if (disable)
|
||||
event->pmu->start(event, PERF_EF_RELOAD);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2350,6 +2354,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
|
||||
return;
|
||||
|
||||
raw_spin_lock(&ctx->lock);
|
||||
perf_pmu_disable(ctx->pmu);
|
||||
|
||||
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
|
||||
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
||||
@ -2381,13 +2386,17 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
|
||||
/*
|
||||
* restart the event
|
||||
* reload only if value has changed
|
||||
* we have stopped the event so tell that
|
||||
* to perf_adjust_period() to avoid stopping it
|
||||
* twice.
|
||||
*/
|
||||
if (delta > 0)
|
||||
perf_adjust_period(event, period, delta);
|
||||
perf_adjust_period(event, period, delta, false);
|
||||
|
||||
event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
|
||||
}
|
||||
|
||||
perf_pmu_enable(ctx->pmu);
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
}
|
||||
|
||||
@ -4562,7 +4571,7 @@ static int __perf_event_overflow(struct perf_event *event,
|
||||
hwc->freq_time_stamp = now;
|
||||
|
||||
if (delta > 0 && delta < 2*TICK_NSEC)
|
||||
perf_adjust_period(event, delta, hwc->last_period);
|
||||
perf_adjust_period(event, delta, hwc->last_period, true);
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user