mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-21 03:33:59 +08:00
perf_events: Fix __perf_event_exit_task() vs. update_event_times() locking
Move the update_event_times() call in __perf_event_exit_task() into list_del_event() because that holds the proper lock (ctx->lock) and seems a more natural place to do the last time update. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <20091123103819.842455480@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
5e942bb333
commit
f67218c3e9
@ -246,6 +246,44 @@ static void perf_unpin_context(struct perf_event_context *ctx)
|
||||
put_ctx(ctx);
|
||||
}
|
||||
|
||||
static inline u64 perf_clock(void)
|
||||
{
|
||||
return cpu_clock(smp_processor_id());
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the record of the current time in a context.
|
||||
*/
|
||||
static void update_context_time(struct perf_event_context *ctx)
|
||||
{
|
||||
u64 now = perf_clock();
|
||||
|
||||
ctx->time += now - ctx->timestamp;
|
||||
ctx->timestamp = now;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the total_time_enabled and total_time_running fields for a event.
|
||||
*/
|
||||
static void update_event_times(struct perf_event *event)
|
||||
{
|
||||
struct perf_event_context *ctx = event->ctx;
|
||||
u64 run_end;
|
||||
|
||||
if (event->state < PERF_EVENT_STATE_INACTIVE ||
|
||||
event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
|
||||
return;
|
||||
|
||||
event->total_time_enabled = ctx->time - event->tstamp_enabled;
|
||||
|
||||
if (event->state == PERF_EVENT_STATE_INACTIVE)
|
||||
run_end = event->tstamp_stopped;
|
||||
else
|
||||
run_end = ctx->time;
|
||||
|
||||
event->total_time_running = run_end - event->tstamp_running;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add a event from the lists for its context.
|
||||
* Must be called with ctx->mutex and ctx->lock held.
|
||||
@ -294,6 +332,7 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
|
||||
if (event->group_leader != event)
|
||||
event->group_leader->nr_siblings--;
|
||||
|
||||
update_event_times(event);
|
||||
event->state = PERF_EVENT_STATE_OFF;
|
||||
|
||||
/*
|
||||
@ -454,44 +493,6 @@ retry:
|
||||
spin_unlock_irq(&ctx->lock);
|
||||
}
|
||||
|
||||
static inline u64 perf_clock(void)
|
||||
{
|
||||
return cpu_clock(smp_processor_id());
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the record of the current time in a context.
|
||||
*/
|
||||
static void update_context_time(struct perf_event_context *ctx)
|
||||
{
|
||||
u64 now = perf_clock();
|
||||
|
||||
ctx->time += now - ctx->timestamp;
|
||||
ctx->timestamp = now;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the total_time_enabled and total_time_running fields for a event.
|
||||
*/
|
||||
static void update_event_times(struct perf_event *event)
|
||||
{
|
||||
struct perf_event_context *ctx = event->ctx;
|
||||
u64 run_end;
|
||||
|
||||
if (event->state < PERF_EVENT_STATE_INACTIVE ||
|
||||
event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
|
||||
return;
|
||||
|
||||
event->total_time_enabled = ctx->time - event->tstamp_enabled;
|
||||
|
||||
if (event->state == PERF_EVENT_STATE_INACTIVE)
|
||||
run_end = event->tstamp_stopped;
|
||||
else
|
||||
run_end = ctx->time;
|
||||
|
||||
event->total_time_running = run_end - event->tstamp_running;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update total_time_enabled and total_time_running for all events in a group.
|
||||
*/
|
||||
@ -4931,7 +4932,6 @@ __perf_event_exit_task(struct perf_event *child_event,
|
||||
{
|
||||
struct perf_event *parent_event;
|
||||
|
||||
update_event_times(child_event);
|
||||
perf_event_remove_from_context(child_event);
|
||||
|
||||
parent_event = child_event->parent;
|
||||
|
Loading…
Reference in New Issue
Block a user