2008-12-05 03:12:29 +08:00
|
|
|
/*
|
|
|
|
* Performance counters:
|
|
|
|
*
|
|
|
|
* Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
* Copyright(C) 2008, Red Hat, Inc., Ingo Molnar
|
|
|
|
*
|
|
|
|
* Data type definitions, declarations, prototypes.
|
|
|
|
*
|
|
|
|
* Started by: Thomas Gleixner and Ingo Molnar
|
|
|
|
*
|
|
|
|
* For licencing details see kernel-base/COPYING
|
|
|
|
*/
|
|
|
|
#ifndef _LINUX_PERF_COUNTER_H
|
|
|
|
#define _LINUX_PERF_COUNTER_H
|
|
|
|
|
2009-02-26 19:43:46 +08:00
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/ioctl.h>
|
2009-03-21 12:31:47 +08:00
|
|
|
#include <asm/byteorder.h>
|
2008-12-05 03:12:29 +08:00
|
|
|
|
|
|
|
/*
|
2008-12-10 19:33:23 +08:00
|
|
|
* User-space ABI bits:
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
2009-03-20 03:26:18 +08:00
|
|
|
* hw_event.type
|
2008-12-05 03:12:29 +08:00
|
|
|
*/
|
2009-03-20 03:26:18 +08:00
|
|
|
enum perf_event_types {
|
|
|
|
PERF_TYPE_HARDWARE = 0,
|
|
|
|
PERF_TYPE_SOFTWARE = 1,
|
|
|
|
PERF_TYPE_TRACEPOINT = 2,
|
|
|
|
|
2008-12-05 03:12:29 +08:00
|
|
|
/*
|
2009-03-20 03:26:18 +08:00
|
|
|
* available TYPE space, raw is the max value.
|
2008-12-05 03:12:29 +08:00
|
|
|
*/
|
2008-12-10 19:33:23 +08:00
|
|
|
|
2009-03-20 03:26:18 +08:00
|
|
|
PERF_TYPE_RAW = 128,
|
|
|
|
};
|
2008-12-14 19:34:15 +08:00
|
|
|
|
2009-03-20 03:26:18 +08:00
|
|
|
/*
|
|
|
|
* Generalized performance counter event types, used by the hw_event.event_id
|
|
|
|
* parameter of the sys_perf_counter_open() syscall:
|
|
|
|
*/
|
|
|
|
enum hw_event_ids {
|
2008-12-10 19:33:23 +08:00
|
|
|
/*
|
2009-03-20 03:26:18 +08:00
|
|
|
* Common hardware events, generalized by the kernel:
|
2008-12-10 19:33:23 +08:00
|
|
|
*/
|
2009-03-20 03:26:18 +08:00
|
|
|
PERF_COUNT_CPU_CYCLES = 0,
|
|
|
|
PERF_COUNT_INSTRUCTIONS = 1,
|
|
|
|
PERF_COUNT_CACHE_REFERENCES = 2,
|
|
|
|
PERF_COUNT_CACHE_MISSES = 3,
|
|
|
|
PERF_COUNT_BRANCH_INSTRUCTIONS = 4,
|
|
|
|
PERF_COUNT_BRANCH_MISSES = 5,
|
|
|
|
PERF_COUNT_BUS_CYCLES = 6,
|
|
|
|
|
|
|
|
PERF_HW_EVENTS_MAX = 7,
|
|
|
|
};
|
2009-03-20 03:26:17 +08:00
|
|
|
|
2009-03-20 03:26:18 +08:00
|
|
|
/*
|
|
|
|
* Special "software" counters provided by the kernel, even if the hardware
|
|
|
|
* does not support performance counters. These counters measure various
|
|
|
|
* physical and sw events of the kernel (and allow the profiling of them as
|
|
|
|
* well):
|
|
|
|
*/
|
|
|
|
enum sw_event_ids {
|
|
|
|
PERF_COUNT_CPU_CLOCK = 0,
|
|
|
|
PERF_COUNT_TASK_CLOCK = 1,
|
|
|
|
PERF_COUNT_PAGE_FAULTS = 2,
|
|
|
|
PERF_COUNT_CONTEXT_SWITCHES = 3,
|
|
|
|
PERF_COUNT_CPU_MIGRATIONS = 4,
|
|
|
|
PERF_COUNT_PAGE_FAULTS_MIN = 5,
|
|
|
|
PERF_COUNT_PAGE_FAULTS_MAJ = 6,
|
|
|
|
|
|
|
|
PERF_SW_EVENTS_MAX = 7,
|
2008-12-05 03:12:29 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* IRQ-notification data record type:
|
|
|
|
*/
|
2008-12-10 19:33:23 +08:00
|
|
|
enum perf_counter_record_type {
|
2009-03-20 03:26:18 +08:00
|
|
|
PERF_RECORD_SIMPLE = 0,
|
|
|
|
PERF_RECORD_IRQ = 1,
|
|
|
|
PERF_RECORD_GROUP = 2,
|
2008-12-05 03:12:29 +08:00
|
|
|
};
|
|
|
|
|
2009-03-24 01:22:06 +08:00
|
|
|
#define __PERF_COUNTER_MASK(name) \
|
|
|
|
(((1ULL << PERF_COUNTER_##name##_BITS) - 1) << \
|
|
|
|
PERF_COUNTER_##name##_SHIFT)
|
|
|
|
|
|
|
|
#define PERF_COUNTER_RAW_BITS 1
|
|
|
|
#define PERF_COUNTER_RAW_SHIFT 63
|
|
|
|
#define PERF_COUNTER_RAW_MASK __PERF_COUNTER_MASK(RAW)
|
|
|
|
|
|
|
|
#define PERF_COUNTER_CONFIG_BITS 63
|
|
|
|
#define PERF_COUNTER_CONFIG_SHIFT 0
|
|
|
|
#define PERF_COUNTER_CONFIG_MASK __PERF_COUNTER_MASK(CONFIG)
|
|
|
|
|
|
|
|
#define PERF_COUNTER_TYPE_BITS 7
|
|
|
|
#define PERF_COUNTER_TYPE_SHIFT 56
|
|
|
|
#define PERF_COUNTER_TYPE_MASK __PERF_COUNTER_MASK(TYPE)
|
|
|
|
|
|
|
|
#define PERF_COUNTER_EVENT_BITS 56
|
|
|
|
#define PERF_COUNTER_EVENT_SHIFT 0
|
|
|
|
#define PERF_COUNTER_EVENT_MASK __PERF_COUNTER_MASK(EVENT)
|
|
|
|
|
2008-12-10 19:33:23 +08:00
|
|
|
/*
|
|
|
|
* Hardware event to monitor via a performance monitoring counter:
|
|
|
|
*/
|
|
|
|
struct perf_counter_hw_event {
|
2009-03-24 01:22:06 +08:00
|
|
|
/*
|
|
|
|
* The MSB of the config word signifies if the rest contains cpu
|
|
|
|
* specific (raw) counter configuration data, if unset, the next
|
|
|
|
* 7 bits are an event type and the rest of the bits are the event
|
|
|
|
* identifier.
|
|
|
|
*/
|
|
|
|
__u64 config;
|
2008-12-10 19:33:23 +08:00
|
|
|
|
2009-02-26 19:43:46 +08:00
|
|
|
__u64 irq_period;
|
2009-03-04 17:36:51 +08:00
|
|
|
__u64 record_type;
|
|
|
|
__u64 read_format;
|
2008-12-10 19:33:23 +08:00
|
|
|
|
2009-03-04 17:36:51 +08:00
|
|
|
__u64 disabled : 1, /* off by default */
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-11 11:35:35 +08:00
|
|
|
nmi : 1, /* NMI sampling */
|
|
|
|
inherit : 1, /* children inherit it */
|
|
|
|
pinned : 1, /* must always be on PMU */
|
|
|
|
exclusive : 1, /* only group on PMU */
|
|
|
|
exclude_user : 1, /* don't count user */
|
|
|
|
exclude_kernel : 1, /* ditto kernel */
|
|
|
|
exclude_hv : 1, /* ditto hypervisor */
|
2009-03-04 17:36:51 +08:00
|
|
|
exclude_idle : 1, /* don't count when idle */
|
perf_counters: allow users to count user, kernel and/or hypervisor events
Impact: new perf_counter feature
This extends the perf_counter_hw_event struct with bits that specify
that events in user, kernel and/or hypervisor mode should not be
counted (i.e. should be excluded), and adds code to program the PMU
mode selection bits accordingly on x86 and powerpc.
For software counters, we don't currently have the infrastructure to
distinguish which mode an event occurs in, so we currently fail the
counter initialization if the setting of the hw_event.exclude_* bits
would require us to distinguish. Context switches and CPU migrations
are currently considered to occur in kernel mode.
On x86, this changes the previous policy that only root can count
kernel events. Now non-root users can count kernel events or exclude
them. Non-root users still can't use NMI events, though. On x86 we
don't appear to have any way to control whether hypervisor events are
counted or not, so hw_event.exclude_hv is ignored.
On powerpc, the selection of whether to count events in user, kernel
and/or hypervisor mode is PMU-wide, not per-counter, so this adds a
check that the hw_event.exclude_* settings are the same as other events
on the PMU. Counters being added to a group have to have the same
settings as the other hardware counters in the group. Counters and
groups can only be enabled in hw_perf_group_sched_in or power_perf_enable
if they have the same settings as any other counters already on the
PMU. If we are not running on a hypervisor, the exclude_hv setting
is ignored (by forcing it to 0) since we can't ever get any
hypervisor events.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-02-11 11:35:35 +08:00
|
|
|
|
2009-03-20 03:26:18 +08:00
|
|
|
__reserved_1 : 55;
|
2009-03-04 17:36:51 +08:00
|
|
|
|
|
|
|
__u32 extra_config_len;
|
|
|
|
__u32 __reserved_4;
|
2008-12-10 19:33:23 +08:00
|
|
|
|
2009-02-26 19:43:46 +08:00
|
|
|
__u64 __reserved_2;
|
2009-03-04 17:36:51 +08:00
|
|
|
__u64 __reserved_3;
|
2008-12-09 02:26:59 +08:00
|
|
|
};
|
|
|
|
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 15:10:22 +08:00
|
|
|
/*
|
|
|
|
* Ioctls that can be done on a perf counter fd:
|
|
|
|
*/
|
|
|
|
#define PERF_COUNTER_IOC_ENABLE _IO('$', 0)
|
|
|
|
#define PERF_COUNTER_IOC_DISABLE _IO('$', 1)
|
|
|
|
|
2009-03-24 01:22:08 +08:00
|
|
|
/*
|
|
|
|
* Structure of the page that can be mapped via mmap
|
|
|
|
*/
|
|
|
|
struct perf_counter_mmap_page {
|
|
|
|
__u32 version; /* version number of this structure */
|
|
|
|
__u32 compat_version; /* lowest version this is compat with */
|
|
|
|
__u32 lock; /* seqlock for synchronization */
|
|
|
|
__u32 index; /* hardware counter identifier */
|
|
|
|
__s64 offset; /* add to hardware counter value */
|
2009-03-24 01:22:10 +08:00
|
|
|
|
|
|
|
__u32 data_head; /* head in the data section */
|
2009-03-24 01:22:08 +08:00
|
|
|
};
|
|
|
|
|
2009-02-26 19:43:46 +08:00
|
|
|
#ifdef __KERNEL__
|
2008-12-10 19:33:23 +08:00
|
|
|
/*
|
2009-02-26 19:43:46 +08:00
|
|
|
* Kernel-internal data types and definitions:
|
2008-12-10 19:33:23 +08:00
|
|
|
*/
|
|
|
|
|
2009-02-26 19:43:46 +08:00
|
|
|
#ifdef CONFIG_PERF_COUNTERS
|
|
|
|
# include <asm/perf_counter.h>
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/rculist.h>
|
|
|
|
#include <linux/rcupdate.h>
|
|
|
|
#include <linux/spinlock.h>
|
2009-03-13 19:21:35 +08:00
|
|
|
#include <linux/hrtimer.h>
|
2009-02-26 19:43:46 +08:00
|
|
|
#include <asm/atomic.h>
|
|
|
|
|
|
|
|
struct task_struct;
|
|
|
|
|
2009-03-24 01:22:06 +08:00
|
|
|
static inline u64 perf_event_raw(struct perf_counter_hw_event *hw_event)
|
|
|
|
{
|
|
|
|
return hw_event->config & PERF_COUNTER_RAW_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 perf_event_config(struct perf_counter_hw_event *hw_event)
|
|
|
|
{
|
|
|
|
return hw_event->config & PERF_COUNTER_CONFIG_MASK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 perf_event_type(struct perf_counter_hw_event *hw_event)
|
|
|
|
{
|
|
|
|
return (hw_event->config & PERF_COUNTER_TYPE_MASK) >>
|
|
|
|
PERF_COUNTER_TYPE_SHIFT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline u64 perf_event_id(struct perf_counter_hw_event *hw_event)
|
|
|
|
{
|
|
|
|
return hw_event->config & PERF_COUNTER_EVENT_MASK;
|
|
|
|
}
|
|
|
|
|
2008-12-05 03:12:29 +08:00
|
|
|
/**
|
2008-12-10 19:33:23 +08:00
|
|
|
* struct hw_perf_counter - performance counter hardware details:
|
2008-12-05 03:12:29 +08:00
|
|
|
*/
|
|
|
|
struct hw_perf_counter {
|
2008-12-13 16:00:03 +08:00
|
|
|
#ifdef CONFIG_PERF_COUNTERS
|
2009-03-13 19:21:35 +08:00
|
|
|
union {
|
|
|
|
struct { /* hardware */
|
|
|
|
u64 config;
|
|
|
|
unsigned long config_base;
|
|
|
|
unsigned long counter_base;
|
|
|
|
int nmi;
|
|
|
|
unsigned int idx;
|
|
|
|
};
|
|
|
|
union { /* software */
|
|
|
|
atomic64_t count;
|
|
|
|
struct hrtimer hrtimer;
|
|
|
|
};
|
|
|
|
};
|
2008-12-13 16:00:03 +08:00
|
|
|
atomic64_t prev_count;
|
2008-12-10 19:33:23 +08:00
|
|
|
u64 irq_period;
|
2008-12-13 16:00:03 +08:00
|
|
|
atomic64_t period_left;
|
|
|
|
#endif
|
2008-12-05 03:12:29 +08:00
|
|
|
};
|
|
|
|
|
2008-12-11 19:46:46 +08:00
|
|
|
struct perf_counter;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct hw_perf_counter_ops - performance counter hw ops
|
|
|
|
*/
|
|
|
|
struct hw_perf_counter_ops {
|
2008-12-21 20:50:42 +08:00
|
|
|
int (*enable) (struct perf_counter *counter);
|
2008-12-17 21:20:28 +08:00
|
|
|
void (*disable) (struct perf_counter *counter);
|
|
|
|
void (*read) (struct perf_counter *counter);
|
2008-12-11 19:46:46 +08:00
|
|
|
};
|
|
|
|
|
2008-12-11 22:17:03 +08:00
|
|
|
/**
|
|
|
|
* enum perf_counter_active_state - the states of a counter
|
|
|
|
*/
|
|
|
|
enum perf_counter_active_state {
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 18:00:30 +08:00
|
|
|
PERF_COUNTER_STATE_ERROR = -2,
|
2008-12-11 22:17:03 +08:00
|
|
|
PERF_COUNTER_STATE_OFF = -1,
|
|
|
|
PERF_COUNTER_STATE_INACTIVE = 0,
|
|
|
|
PERF_COUNTER_STATE_ACTIVE = 1,
|
|
|
|
};
|
|
|
|
|
2008-12-12 20:49:45 +08:00
|
|
|
struct file;
|
|
|
|
|
2009-03-24 01:22:10 +08:00
|
|
|
struct perf_mmap_data {
|
|
|
|
struct rcu_head rcu_head;
|
|
|
|
int nr_pages;
|
2009-03-24 20:18:16 +08:00
|
|
|
atomic_t wakeup;
|
2009-03-24 01:22:10 +08:00
|
|
|
atomic_t head;
|
|
|
|
struct perf_counter_mmap_page *user_page;
|
|
|
|
void *data_pages[0];
|
|
|
|
};
|
|
|
|
|
2008-12-05 03:12:29 +08:00
|
|
|
/**
|
|
|
|
* struct perf_counter - performance counter kernel representation:
|
|
|
|
*/
|
|
|
|
struct perf_counter {
|
2008-12-13 16:00:03 +08:00
|
|
|
#ifdef CONFIG_PERF_COUNTERS
|
2008-12-11 15:38:42 +08:00
|
|
|
struct list_head list_entry;
|
2009-03-13 19:21:36 +08:00
|
|
|
struct list_head event_entry;
|
2008-12-11 15:38:42 +08:00
|
|
|
struct list_head sibling_list;
|
|
|
|
struct perf_counter *group_leader;
|
2008-12-11 20:21:10 +08:00
|
|
|
const struct hw_perf_counter_ops *hw_ops;
|
2008-12-11 15:38:42 +08:00
|
|
|
|
2008-12-11 22:17:03 +08:00
|
|
|
enum perf_counter_active_state state;
|
perfcounters: make context switch and migration software counters work again
Jaswinder Singh Rajput reported that commit 23a185ca8abbeef caused the
context switch and migration software counters to report zero always.
With that commit, the software counters only count events that occur
between sched-in and sched-out for a task. This is necessary for the
counter enable/disable prctls and ioctls to work. However, the
context switch and migration counts are incremented after sched-out
for one task and before sched-in for the next. Since the increment
doesn't occur while a task is scheduled in (as far as the software
counters are concerned) it doesn't count towards any counter.
Thus the context switch and migration counters need to count events
that occur at any time, provided the counter is enabled, not just
those that occur while the task is scheduled in (from the perf_counter
subsystem's point of view). The problem though is that the software
counter code can't tell the difference between being enabled and being
scheduled in, and between being disabled and being scheduled out,
since we use the one pair of enable/disable entry points for both.
That is, the high-level disable operation simply arranges for the
counter to not be scheduled in any more, and the high-level enable
operation arranges for it to be scheduled in again.
One way to solve this would be to have sched_in/out operations in the
hw_perf_counter_ops struct as well as enable/disable. However, this
takes a simpler approach: it adds a 'prev_state' field to the
perf_counter struct that allows a counter's enable method to know
whether the counter was previously disabled or just inactive
(scheduled out), and therefore whether the enable method is being
called as a result of a high-level enable or a schedule-in operation.
This then allows the context switch, migration and page fault counters
to reset their hw.prev_count value in their enable functions only if
they are called as a result of a high-level enable operation.
Although page faults would normally only occur while the counter is
scheduled in, this changes the page fault counter code too in case
there are ever circumstances where page faults get counted against a
task while its counters are not scheduled in.
Reported-by: Jaswinder Singh Rajput <jaswinder@kernel.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2009-02-13 19:10:34 +08:00
|
|
|
enum perf_counter_active_state prev_state;
|
2008-12-05 03:12:29 +08:00
|
|
|
atomic64_t count;
|
2008-12-13 16:00:03 +08:00
|
|
|
|
2008-12-10 19:33:23 +08:00
|
|
|
struct perf_counter_hw_event hw_event;
|
2008-12-05 03:12:29 +08:00
|
|
|
struct hw_perf_counter hw;
|
|
|
|
|
|
|
|
struct perf_counter_context *ctx;
|
|
|
|
struct task_struct *task;
|
2008-12-12 20:49:45 +08:00
|
|
|
struct file *filp;
|
2008-12-05 03:12:29 +08:00
|
|
|
|
2008-12-12 20:49:45 +08:00
|
|
|
struct perf_counter *parent;
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 15:10:22 +08:00
|
|
|
struct list_head child_list;
|
|
|
|
|
2008-12-05 03:12:29 +08:00
|
|
|
/*
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 15:10:22 +08:00
|
|
|
* Protect attach/detach and child_list:
|
2008-12-05 03:12:29 +08:00
|
|
|
*/
|
|
|
|
struct mutex mutex;
|
|
|
|
|
|
|
|
int oncpu;
|
|
|
|
int cpu;
|
|
|
|
|
2009-03-24 01:22:10 +08:00
|
|
|
/* mmap bits */
|
|
|
|
struct mutex mmap_mutex;
|
|
|
|
atomic_t mmap_count;
|
|
|
|
struct perf_mmap_data *data;
|
2009-03-24 01:22:08 +08:00
|
|
|
|
2009-03-24 01:22:10 +08:00
|
|
|
/* poll related */
|
2008-12-05 03:12:29 +08:00
|
|
|
wait_queue_head_t waitq;
|
|
|
|
/* optional: for NMIs */
|
|
|
|
int wakeup_pending;
|
2009-03-13 19:21:36 +08:00
|
|
|
|
2009-03-20 03:26:17 +08:00
|
|
|
void (*destroy)(struct perf_counter *);
|
2009-03-13 19:21:36 +08:00
|
|
|
struct rcu_head rcu_head;
|
2008-12-13 16:00:03 +08:00
|
|
|
#endif
|
2008-12-05 03:12:29 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct perf_counter_context - counter context structure
|
|
|
|
*
|
|
|
|
* Used as a container for task counters and CPU counters as well:
|
|
|
|
*/
|
|
|
|
struct perf_counter_context {
|
|
|
|
#ifdef CONFIG_PERF_COUNTERS
|
|
|
|
/*
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 15:10:22 +08:00
|
|
|
* Protect the states of the counters in the list,
|
|
|
|
* nr_active, and the list:
|
2008-12-05 03:12:29 +08:00
|
|
|
*/
|
|
|
|
spinlock_t lock;
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 15:10:22 +08:00
|
|
|
/*
|
|
|
|
* Protect the list of counters. Locking either mutex or lock
|
|
|
|
* is sufficient to ensure the list doesn't change; to change
|
|
|
|
* the list you need to lock both the mutex and the spinlock.
|
|
|
|
*/
|
|
|
|
struct mutex mutex;
|
2008-12-11 15:38:42 +08:00
|
|
|
|
|
|
|
struct list_head counter_list;
|
2009-03-13 19:21:36 +08:00
|
|
|
struct list_head event_list;
|
2008-12-05 03:12:29 +08:00
|
|
|
int nr_counters;
|
|
|
|
int nr_active;
|
perf_counter: Add counter enable/disable ioctls
Impact: New perf_counter features
This primarily adds a way for perf_counter users to enable and disable
counters and groups. Enabling or disabling a counter or group also
enables or disables all of the child counters that have been cloned
from it to monitor children of the task monitored by the top-level
counter. The userspace interface to enable/disable counters is via
ioctl on the counter file descriptor.
Along the way this extends the code that handles child counters to
handle child counter groups properly. A group with multiple counters
will be cloned to child tasks if and only if the group leader has the
hw_event.inherit bit set - if it is set the whole group is cloned as a
group in the child task.
In order to be able to enable or disable all child counters of a given
top-level counter, we need a way to find them all. Hence I have added
a child_list field to struct perf_counter, which is the head of the
list of children for a top-level counter, or the link in that list for
a child counter. That list is protected by the perf_counter.mutex
field.
This also adds a mutex to the perf_counter_context struct. Previously
the list of counters was protected just by the lock field in the
context, which meant that perf_counter_init_task had to take that lock
and then take whatever lock/mutex protects the top-level counter's
child_list. But the counter enable/disable functions need to take
that lock in order to traverse the list, then for each counter take
the lock in that counter's context in order to change the counter's
state safely, which will lead to a deadlock.
To solve this, we now have both a mutex and a spinlock in the context,
and taking either is sufficient to ensure the list of counters can't
change - you have to take both before changing the list. Now
perf_counter_init_task takes the mutex instead of the lock (which
incidentally means that inherit_counter can use GFP_KERNEL instead of
GFP_ATOMIC) and thus avoids the possible deadlock. Similarly the new
enable/disable functions can take the mutex while traversing the list
of child counters without incurring a possible deadlock when the
counter manipulation code locks the context for a child counter.
We also had an misfeature that the first counter added to a context
would possibly not go on until the next sched-in, because we were
using ctx->nr_active to detect if the context was running on a CPU.
But nr_active is the number of active counters, and if that was zero
(because the context didn't have any counters yet) it would look like
the context wasn't running on a cpu and so the retry code in
__perf_install_in_context wouldn't retry. So this adds an 'is_active'
field that is set when the context is on a CPU, even if it has no
counters. The is_active field is only used for task contexts, not for
per-cpu contexts.
If we enable a subsidiary counter in a group that is active on a CPU,
and the arch code can't enable the counter, then we have to pull the
whole group off the CPU. We do this with group_sched_out, which gets
moved up in the file so it comes before all its callers. This also
adds similar logic to __perf_install_in_context so that the "all on,
or none" invariant of groups is preserved when adding a new counter to
a group.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-17 15:10:22 +08:00
|
|
|
int is_active;
|
2008-12-05 03:12:29 +08:00
|
|
|
struct task_struct *task;
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct perf_counter_cpu_context - per cpu counter context structure
|
|
|
|
*/
|
|
|
|
struct perf_cpu_context {
|
|
|
|
struct perf_counter_context ctx;
|
|
|
|
struct perf_counter_context *task_ctx;
|
|
|
|
int active_oncpu;
|
|
|
|
int max_pertask;
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 18:00:30 +08:00
|
|
|
int exclusive;
|
2009-03-24 01:22:07 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Recursion avoidance:
|
|
|
|
*
|
|
|
|
* task, softirq, irq, nmi context
|
|
|
|
*/
|
|
|
|
int recursion[4];
|
2008-12-05 03:12:29 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set by architecture code:
|
|
|
|
*/
|
|
|
|
extern int perf_max_counters;
|
|
|
|
|
|
|
|
#ifdef CONFIG_PERF_COUNTERS
|
2008-12-11 20:21:10 +08:00
|
|
|
extern const struct hw_perf_counter_ops *
|
2008-12-11 19:46:46 +08:00
|
|
|
hw_perf_counter_init(struct perf_counter *counter);
|
|
|
|
|
2008-12-05 03:12:29 +08:00
|
|
|
extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
|
|
|
|
extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
|
|
|
|
extern void perf_counter_task_tick(struct task_struct *task, int cpu);
|
2008-12-12 20:49:45 +08:00
|
|
|
extern void perf_counter_init_task(struct task_struct *child);
|
|
|
|
extern void perf_counter_exit_task(struct task_struct *child);
|
2008-12-05 03:12:29 +08:00
|
|
|
extern void perf_counter_notify(struct pt_regs *regs);
|
|
|
|
extern void perf_counter_print_debug(void);
|
2009-01-23 17:13:01 +08:00
|
|
|
extern void perf_counter_unthrottle(void);
|
2008-12-11 20:45:51 +08:00
|
|
|
extern u64 hw_perf_save_disable(void);
|
|
|
|
extern void hw_perf_restore(u64 ctrl);
|
2008-12-11 21:59:31 +08:00
|
|
|
extern int perf_counter_task_disable(void);
|
|
|
|
extern int perf_counter_task_enable(void);
|
2009-01-09 13:43:42 +08:00
|
|
|
extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
|
|
|
|
struct perf_cpu_context *cpuctx,
|
|
|
|
struct perf_counter_context *ctx, int cpu);
|
2009-03-24 01:22:08 +08:00
|
|
|
extern void perf_counter_update_userpage(struct perf_counter *counter);
|
2008-12-11 20:21:10 +08:00
|
|
|
|
2009-03-20 03:26:19 +08:00
|
|
|
extern void perf_counter_output(struct perf_counter *counter,
|
|
|
|
int nmi, struct pt_regs *regs);
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 18:00:30 +08:00
|
|
|
/*
|
|
|
|
* Return 1 for a software counter, 0 for a hardware counter
|
|
|
|
*/
|
|
|
|
static inline int is_software_counter(struct perf_counter *counter)
|
|
|
|
{
|
2009-03-24 01:22:06 +08:00
|
|
|
return !perf_event_raw(&counter->hw_event) &&
|
|
|
|
perf_event_type(&counter->hw_event) != PERF_TYPE_HARDWARE;
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 18:00:30 +08:00
|
|
|
}
|
|
|
|
|
2009-03-20 03:26:18 +08:00
|
|
|
extern void perf_swcounter_event(u32, u64, int, struct pt_regs *);
|
2009-03-13 19:21:32 +08:00
|
|
|
|
2008-12-05 03:12:29 +08:00
|
|
|
#else
|
|
|
|
static inline void
|
|
|
|
perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
|
|
|
|
static inline void
|
|
|
|
perf_counter_task_sched_out(struct task_struct *task, int cpu) { }
|
|
|
|
static inline void
|
|
|
|
perf_counter_task_tick(struct task_struct *task, int cpu) { }
|
2008-12-12 20:49:45 +08:00
|
|
|
static inline void perf_counter_init_task(struct task_struct *child) { }
|
|
|
|
static inline void perf_counter_exit_task(struct task_struct *child) { }
|
2008-12-05 03:12:29 +08:00
|
|
|
static inline void perf_counter_notify(struct pt_regs *regs) { }
|
|
|
|
static inline void perf_counter_print_debug(void) { }
|
2009-01-23 17:13:01 +08:00
|
|
|
static inline void perf_counter_unthrottle(void) { }
|
2009-03-13 19:21:32 +08:00
|
|
|
static inline void hw_perf_restore(u64 ctrl) { }
|
2008-12-11 20:45:51 +08:00
|
|
|
static inline u64 hw_perf_save_disable(void) { return 0; }
|
2008-12-11 21:59:31 +08:00
|
|
|
static inline int perf_counter_task_disable(void) { return -EINVAL; }
|
|
|
|
static inline int perf_counter_task_enable(void) { return -EINVAL; }
|
2009-03-13 19:21:32 +08:00
|
|
|
|
2009-03-20 03:26:18 +08:00
|
|
|
static inline void perf_swcounter_event(u32 event, u64 nr,
|
2009-03-13 19:21:32 +08:00
|
|
|
int nmi, struct pt_regs *regs) { }
|
2008-12-05 03:12:29 +08:00
|
|
|
#endif
|
|
|
|
|
2009-02-26 19:43:46 +08:00
|
|
|
#endif /* __KERNEL__ */
|
2008-12-05 03:12:29 +08:00
|
|
|
#endif /* _LINUX_PERF_COUNTER_H */
|