mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2025-01-24 23:04:17 +08:00
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar: "An ABI documentation fix, and a mixed-PMU perf-info-corruption fix" * 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: perf: Document the new transaction sample type perf: Disable all pmus on unthrottling and rescheduling
This commit is contained in:
commit
58cac3faef
@ -679,6 +679,7 @@ enum perf_event_type {
|
||||
*
|
||||
* { u64 weight; } && PERF_SAMPLE_WEIGHT
|
||||
* { u64 data_src; } && PERF_SAMPLE_DATA_SRC
|
||||
* { u64 transaction; } && PERF_SAMPLE_TRANSACTION
|
||||
* };
|
||||
*/
|
||||
PERF_RECORD_SAMPLE = 9,
|
||||
|
@ -1396,6 +1396,8 @@ event_sched_out(struct perf_event *event,
|
||||
if (event->state != PERF_EVENT_STATE_ACTIVE)
|
||||
return;
|
||||
|
||||
perf_pmu_disable(event->pmu);
|
||||
|
||||
event->state = PERF_EVENT_STATE_INACTIVE;
|
||||
if (event->pending_disable) {
|
||||
event->pending_disable = 0;
|
||||
@ -1412,6 +1414,8 @@ event_sched_out(struct perf_event *event,
|
||||
ctx->nr_freq--;
|
||||
if (event->attr.exclusive || !cpuctx->active_oncpu)
|
||||
cpuctx->exclusive = 0;
|
||||
|
||||
perf_pmu_enable(event->pmu);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1652,6 +1656,7 @@ event_sched_in(struct perf_event *event,
|
||||
struct perf_event_context *ctx)
|
||||
{
|
||||
u64 tstamp = perf_event_time(event);
|
||||
int ret = 0;
|
||||
|
||||
if (event->state <= PERF_EVENT_STATE_OFF)
|
||||
return 0;
|
||||
@ -1674,10 +1679,13 @@ event_sched_in(struct perf_event *event,
|
||||
*/
|
||||
smp_wmb();
|
||||
|
||||
perf_pmu_disable(event->pmu);
|
||||
|
||||
if (event->pmu->add(event, PERF_EF_START)) {
|
||||
event->state = PERF_EVENT_STATE_INACTIVE;
|
||||
event->oncpu = -1;
|
||||
return -EAGAIN;
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
event->tstamp_running += tstamp - event->tstamp_stopped;
|
||||
@ -1693,7 +1701,10 @@ event_sched_in(struct perf_event *event,
|
||||
if (event->attr.exclusive)
|
||||
cpuctx->exclusive = 1;
|
||||
|
||||
return 0;
|
||||
out:
|
||||
perf_pmu_enable(event->pmu);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -2743,6 +2754,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
|
||||
if (!event_filter_match(event))
|
||||
continue;
|
||||
|
||||
perf_pmu_disable(event->pmu);
|
||||
|
||||
hwc = &event->hw;
|
||||
|
||||
if (hwc->interrupts == MAX_INTERRUPTS) {
|
||||
@ -2752,7 +2765,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
|
||||
}
|
||||
|
||||
if (!event->attr.freq || !event->attr.sample_freq)
|
||||
continue;
|
||||
goto next;
|
||||
|
||||
/*
|
||||
* stop the event and update event->count
|
||||
@ -2774,6 +2787,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
|
||||
perf_adjust_period(event, period, delta, false);
|
||||
|
||||
event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
|
||||
next:
|
||||
perf_pmu_enable(event->pmu);
|
||||
}
|
||||
|
||||
perf_pmu_enable(ctx->pmu);
|
||||
|
Loading…
Reference in New Issue
Block a user