mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-18 18:23:53 +08:00
perf counters: clean up state transitions
Impact: cleanup Introduce a proper enum for the 3 states of a counter: PERF_COUNTER_STATE_OFF = -1 PERF_COUNTER_STATE_INACTIVE = 0 PERF_COUNTER_STATE_ACTIVE = 1 and rename counter->active to counter->state and propagate the changes everywhere. Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
1d1c7ddbfa
commit
6a930700c8
@ -332,7 +332,7 @@ perf_handle_group(struct perf_counter *sibling, u64 *status, u64 *overflown)
|
||||
* Then store sibling timestamps (if any):
|
||||
*/
|
||||
list_for_each_entry(counter, &group_leader->sibling_list, list_entry) {
|
||||
if (!counter->active) {
|
||||
if (counter->state != PERF_COUNTER_STATE_ACTIVE) {
|
||||
/*
|
||||
* When counter was not in the overflow mask, we have to
|
||||
* read it from hardware. We read it as well, when it
|
||||
|
@ -127,6 +127,15 @@ struct hw_perf_counter_ops {
|
||||
void (*hw_perf_counter_read) (struct perf_counter *counter);
|
||||
};
|
||||
|
||||
/**
|
||||
* enum perf_counter_active_state - the states of a counter
|
||||
*/
|
||||
enum perf_counter_active_state {
|
||||
PERF_COUNTER_STATE_OFF = -1,
|
||||
PERF_COUNTER_STATE_INACTIVE = 0,
|
||||
PERF_COUNTER_STATE_ACTIVE = 1,
|
||||
};
|
||||
|
||||
/**
|
||||
* struct perf_counter - performance counter kernel representation:
|
||||
*/
|
||||
@ -136,7 +145,7 @@ struct perf_counter {
|
||||
struct perf_counter *group_leader;
|
||||
const struct hw_perf_counter_ops *hw_ops;
|
||||
|
||||
int active;
|
||||
enum perf_counter_active_state state;
|
||||
#if BITS_PER_LONG == 64
|
||||
atomic64_t count;
|
||||
#else
|
||||
|
@ -167,9 +167,9 @@ static void __perf_counter_remove_from_context(void *info)
|
||||
|
||||
spin_lock(&ctx->lock);
|
||||
|
||||
if (counter->active) {
|
||||
if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
|
||||
counter->hw_ops->hw_perf_counter_disable(counter);
|
||||
counter->active = 0;
|
||||
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
||||
ctx->nr_active--;
|
||||
cpuctx->active_oncpu--;
|
||||
counter->task = NULL;
|
||||
@ -281,7 +281,7 @@ static void __perf_install_in_context(void *info)
|
||||
|
||||
if (cpuctx->active_oncpu < perf_max_counters) {
|
||||
counter->hw_ops->hw_perf_counter_enable(counter);
|
||||
counter->active = 1;
|
||||
counter->state = PERF_COUNTER_STATE_ACTIVE;
|
||||
counter->oncpu = cpu;
|
||||
ctx->nr_active++;
|
||||
cpuctx->active_oncpu++;
|
||||
@ -328,7 +328,6 @@ retry:
|
||||
|
||||
spin_lock_irq(&ctx->lock);
|
||||
/*
|
||||
* If the context is active and the counter has not been added
|
||||
* we need to retry the smp call.
|
||||
*/
|
||||
if (ctx->nr_active && list_empty(&counter->list_entry)) {
|
||||
@ -353,12 +352,12 @@ counter_sched_out(struct perf_counter *counter,
|
||||
struct perf_cpu_context *cpuctx,
|
||||
struct perf_counter_context *ctx)
|
||||
{
|
||||
if (!counter->active)
|
||||
if (counter->state != PERF_COUNTER_STATE_ACTIVE)
|
||||
return;
|
||||
|
||||
counter->hw_ops->hw_perf_counter_disable(counter);
|
||||
counter->active = 0;
|
||||
counter->oncpu = -1;
|
||||
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
||||
counter->oncpu = -1;
|
||||
|
||||
cpuctx->active_oncpu--;
|
||||
ctx->nr_active--;
|
||||
@ -415,11 +414,11 @@ counter_sched_in(struct perf_counter *counter,
|
||||
struct perf_counter_context *ctx,
|
||||
int cpu)
|
||||
{
|
||||
if (counter->active == -1)
|
||||
if (counter->state == PERF_COUNTER_STATE_OFF)
|
||||
return;
|
||||
|
||||
counter->hw_ops->hw_perf_counter_enable(counter);
|
||||
counter->active = 1;
|
||||
counter->state = PERF_COUNTER_STATE_ACTIVE;
|
||||
counter->oncpu = cpu; /* TODO: put 'cpu' into cpuctx->cpu */
|
||||
|
||||
cpuctx->active_oncpu++;
|
||||
@ -506,8 +505,8 @@ int perf_counter_task_disable(void)
|
||||
perf_flags = hw_perf_save_disable();
|
||||
|
||||
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
|
||||
WARN_ON_ONCE(counter->active == 1);
|
||||
counter->active = -1;
|
||||
WARN_ON_ONCE(counter->state == PERF_COUNTER_STATE_ACTIVE);
|
||||
counter->state = PERF_COUNTER_STATE_OFF;
|
||||
}
|
||||
hw_perf_restore(perf_flags);
|
||||
|
||||
@ -540,9 +539,9 @@ int perf_counter_task_enable(void)
|
||||
perf_flags = hw_perf_save_disable();
|
||||
|
||||
list_for_each_entry(counter, &ctx->counter_list, list_entry) {
|
||||
if (counter->active != -1)
|
||||
if (counter->state != PERF_COUNTER_STATE_OFF)
|
||||
continue;
|
||||
counter->active = 0;
|
||||
counter->state = PERF_COUNTER_STATE_INACTIVE;
|
||||
}
|
||||
hw_perf_restore(perf_flags);
|
||||
|
||||
@ -620,7 +619,7 @@ static u64 perf_counter_read(struct perf_counter *counter)
|
||||
* If counter is enabled and currently active on a CPU, update the
|
||||
* value in the counter structure:
|
||||
*/
|
||||
if (counter->active) {
|
||||
if (counter->state == PERF_COUNTER_STATE_ACTIVE) {
|
||||
smp_call_function_single(counter->oncpu,
|
||||
__hw_perf_counter_read, counter, 1);
|
||||
}
|
||||
@ -673,7 +672,7 @@ static struct perf_data *perf_switch_irq_data(struct perf_counter *counter)
|
||||
|
||||
retry:
|
||||
spin_lock_irq(&ctx->lock);
|
||||
if (!counter->active) {
|
||||
if (counter->state != PERF_COUNTER_STATE_ACTIVE) {
|
||||
counter->irqdata = counter->usrdata;
|
||||
counter->usrdata = oldirqdata;
|
||||
spin_unlock_irq(&ctx->lock);
|
||||
|
Loading…
Reference in New Issue
Block a user