2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-19 02:34:01 +08:00

perf_counter: Add unique counter id

Stephan raised the issue that we currently cannot distinguish between
similar counters within a group (PERF_RECORD_GROUP uses the config
value as identifier).

Therefore, generate a new ID for each counter using a global u64
sequence counter.

Reported-by: Stephane Eranian <eranian@googlemail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Peter Zijlstra 2009-06-02 15:08:15 +02:00 committed by Ingo Molnar
parent 53e111a730
commit 8e5799b1ad
2 changed files with 12 additions and 5 deletions

View File

@ -114,8 +114,9 @@ enum perf_counter_record_format {
* in increasing order of bit value, after the counter value. * in increasing order of bit value, after the counter value.
*/ */
enum perf_counter_read_format { enum perf_counter_read_format {
PERF_FORMAT_TOTAL_TIME_ENABLED = 1, PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
PERF_FORMAT_TOTAL_TIME_RUNNING = 2, PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
PERF_FORMAT_ID = 1U << 2,
}; };
/* /*
@ -290,7 +291,7 @@ enum perf_event_type {
* { u32 cpu, res; } && PERF_RECORD_CPU * { u32 cpu, res; } && PERF_RECORD_CPU
* *
* { u64 nr; * { u64 nr;
* { u64 event, val; } cnt[nr]; } && PERF_RECORD_GROUP * { u64 id, val; } cnt[nr]; } && PERF_RECORD_GROUP
* *
* { u16 nr, * { u16 nr,
* hv, * hv,
@ -503,6 +504,7 @@ struct perf_counter {
struct rcu_head rcu_head; struct rcu_head rcu_head;
struct pid_namespace *ns; struct pid_namespace *ns;
u64 id;
#endif #endif
}; };

View File

@ -1510,6 +1510,8 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) if (counter->hw_event.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = counter->total_time_running + values[n++] = counter->total_time_running +
atomic64_read(&counter->child_total_time_running); atomic64_read(&counter->child_total_time_running);
if (counter->hw_event.read_format & PERF_FORMAT_ID)
values[n++] = counter->id;
mutex_unlock(&counter->child_mutex); mutex_unlock(&counter->child_mutex);
if (count < n * sizeof(u64)) if (count < n * sizeof(u64))
@ -2303,7 +2305,7 @@ static void perf_counter_output(struct perf_counter *counter,
u32 pid, tid; u32 pid, tid;
} tid_entry; } tid_entry;
struct { struct {
u64 event; u64 id;
u64 counter; u64 counter;
} group_entry; } group_entry;
struct perf_callchain_entry *callchain = NULL; struct perf_callchain_entry *callchain = NULL;
@ -2416,7 +2418,7 @@ static void perf_counter_output(struct perf_counter *counter,
if (sub != counter) if (sub != counter)
sub->pmu->read(sub); sub->pmu->read(sub);
group_entry.event = sub->hw_event.config; group_entry.id = sub->id;
group_entry.counter = atomic64_read(&sub->count); group_entry.counter = atomic64_read(&sub->count);
perf_output_put(&handle, group_entry); perf_output_put(&handle, group_entry);
@ -3375,6 +3377,8 @@ done:
return counter; return counter;
} }
static atomic64_t perf_counter_id;
/** /**
* sys_perf_counter_open - open a performance counter, associate it to a task/cpu * sys_perf_counter_open - open a performance counter, associate it to a task/cpu
* *
@ -3470,6 +3474,7 @@ SYSCALL_DEFINE5(perf_counter_open,
mutex_unlock(&current->perf_counter_mutex); mutex_unlock(&current->perf_counter_mutex);
counter->ns = get_pid_ns(current->nsproxy->pid_ns); counter->ns = get_pid_ns(current->nsproxy->pid_ns);
counter->id = atomic64_inc_return(&perf_counter_id);
fput_light(counter_file, fput_needed2); fput_light(counter_file, fput_needed2);