2008-12-05 03:12:29 +08:00
|
|
|
/*
|
|
|
|
* Performance counters:
|
|
|
|
*
|
|
|
|
* Copyright(C) 2008, Thomas Gleixner <tglx@linutronix.de>
|
|
|
|
* Copyright(C) 2008, Red Hat, Inc., Ingo Molnar
|
|
|
|
*
|
|
|
|
* Data type definitions, declarations, prototypes.
|
|
|
|
*
|
|
|
|
* Started by: Thomas Gleixner and Ingo Molnar
|
|
|
|
*
|
|
|
|
* For licencing details see kernel-base/COPYING
|
|
|
|
*/
|
|
|
|
#ifndef _LINUX_PERF_COUNTER_H
|
|
|
|
#define _LINUX_PERF_COUNTER_H
|
|
|
|
|
|
|
|
#include <asm/atomic.h>
|
2008-12-25 16:02:11 +08:00
|
|
|
|
|
|
|
#ifdef CONFIG_PERF_COUNTERS
|
|
|
|
# include <asm/perf_counter.h>
|
|
|
|
#endif
|
2008-12-05 03:12:29 +08:00
|
|
|
|
|
|
|
#include <linux/list.h>
|
|
|
|
#include <linux/mutex.h>
|
|
|
|
#include <linux/rculist.h>
|
|
|
|
#include <linux/rcupdate.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
|
|
|
|
struct task_struct;
|
|
|
|
|
|
|
|
/*
|
2008-12-10 19:33:23 +08:00
|
|
|
* User-space ABI bits:
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Generalized performance counter event types, used by the hw_event.type
|
|
|
|
* parameter of the sys_perf_counter_open() syscall:
|
2008-12-05 03:12:29 +08:00
|
|
|
*/
|
|
|
|
enum hw_event_types {
|
|
|
|
/*
|
2008-12-10 19:33:23 +08:00
|
|
|
* Common hardware events, generalized by the kernel:
|
2008-12-05 03:12:29 +08:00
|
|
|
*/
|
2008-12-23 19:17:29 +08:00
|
|
|
PERF_COUNT_CPU_CYCLES = 0,
|
2008-12-10 19:33:23 +08:00
|
|
|
PERF_COUNT_INSTRUCTIONS = 1,
|
|
|
|
PERF_COUNT_CACHE_REFERENCES = 2,
|
|
|
|
PERF_COUNT_CACHE_MISSES = 3,
|
|
|
|
PERF_COUNT_BRANCH_INSTRUCTIONS = 4,
|
|
|
|
PERF_COUNT_BRANCH_MISSES = 5,
|
2008-12-23 19:17:29 +08:00
|
|
|
PERF_COUNT_BUS_CYCLES = 6,
|
2008-12-10 19:33:23 +08:00
|
|
|
|
2008-12-23 19:17:29 +08:00
|
|
|
PERF_HW_EVENTS_MAX = 7,
|
2008-12-14 19:34:15 +08:00
|
|
|
|
2008-12-10 19:33:23 +08:00
|
|
|
/*
|
|
|
|
* Special "software" counters provided by the kernel, even if
|
|
|
|
* the hardware does not support performance counters. These
|
|
|
|
* counters measure various physical and sw events of the
|
|
|
|
* kernel (and allow the profiling of them as well):
|
|
|
|
*/
|
|
|
|
PERF_COUNT_CPU_CLOCK = -1,
|
|
|
|
PERF_COUNT_TASK_CLOCK = -2,
|
2008-12-14 19:28:33 +08:00
|
|
|
PERF_COUNT_PAGE_FAULTS = -3,
|
|
|
|
PERF_COUNT_CONTEXT_SWITCHES = -4,
|
2008-12-14 19:34:15 +08:00
|
|
|
PERF_COUNT_CPU_MIGRATIONS = -5,
|
|
|
|
|
|
|
|
PERF_SW_EVENTS_MIN = -6,
|
2008-12-05 03:12:29 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* IRQ-notification data record type:
|
|
|
|
*/
|
2008-12-10 19:33:23 +08:00
|
|
|
enum perf_counter_record_type {
|
|
|
|
PERF_RECORD_SIMPLE = 0,
|
|
|
|
PERF_RECORD_IRQ = 1,
|
|
|
|
PERF_RECORD_GROUP = 2,
|
2008-12-05 03:12:29 +08:00
|
|
|
};
|
|
|
|
|
2008-12-10 19:33:23 +08:00
|
|
|
/*
|
|
|
|
* Hardware event to monitor via a performance monitoring counter:
|
|
|
|
*/
|
|
|
|
struct perf_counter_hw_event {
|
2008-12-11 20:45:51 +08:00
|
|
|
s64 type;
|
2008-12-10 19:33:23 +08:00
|
|
|
|
|
|
|
u64 irq_period;
|
|
|
|
u32 record_type;
|
|
|
|
|
2008-12-12 20:49:45 +08:00
|
|
|
u32 disabled : 1, /* off by default */
|
|
|
|
nmi : 1, /* NMI sampling */
|
|
|
|
raw : 1, /* raw event type */
|
|
|
|
inherit : 1, /* children inherit it */
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 18:00:30 +08:00
|
|
|
pinned : 1, /* must always be on PMU */
|
|
|
|
exclusive : 1, /* only counter on PMU */
|
|
|
|
|
|
|
|
__reserved_1 : 26;
|
2008-12-10 19:33:23 +08:00
|
|
|
|
|
|
|
u64 __reserved_2;
|
2008-12-09 02:26:59 +08:00
|
|
|
};
|
|
|
|
|
2008-12-10 19:33:23 +08:00
|
|
|
/*
|
|
|
|
* Kernel-internal data types:
|
|
|
|
*/
|
|
|
|
|
2008-12-05 03:12:29 +08:00
|
|
|
/**
|
2008-12-10 19:33:23 +08:00
|
|
|
* struct hw_perf_counter - performance counter hardware details:
|
2008-12-05 03:12:29 +08:00
|
|
|
*/
|
|
|
|
struct hw_perf_counter {
|
2008-12-13 16:00:03 +08:00
|
|
|
#ifdef CONFIG_PERF_COUNTERS
|
2008-12-10 19:33:23 +08:00
|
|
|
u64 config;
|
|
|
|
unsigned long config_base;
|
|
|
|
unsigned long counter_base;
|
|
|
|
int nmi;
|
|
|
|
unsigned int idx;
|
2008-12-13 16:00:03 +08:00
|
|
|
atomic64_t prev_count;
|
2008-12-10 19:33:23 +08:00
|
|
|
u64 irq_period;
|
2008-12-13 16:00:03 +08:00
|
|
|
atomic64_t period_left;
|
|
|
|
#endif
|
2008-12-05 03:12:29 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Hardcoded buffer length limit for now, for IRQ-fed events:
|
|
|
|
*/
|
2008-12-10 19:33:23 +08:00
|
|
|
#define PERF_DATA_BUFLEN 2048
|
2008-12-05 03:12:29 +08:00
|
|
|
|
|
|
|
/**
|
|
|
|
* struct perf_data - performance counter IRQ data sampling ...
|
|
|
|
*/
|
|
|
|
struct perf_data {
|
2008-12-10 19:33:23 +08:00
|
|
|
int len;
|
|
|
|
int rd_idx;
|
|
|
|
int overrun;
|
|
|
|
u8 data[PERF_DATA_BUFLEN];
|
2008-12-05 03:12:29 +08:00
|
|
|
};
|
|
|
|
|
2008-12-11 19:46:46 +08:00
|
|
|
struct perf_counter;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct hw_perf_counter_ops - performance counter hw ops
|
|
|
|
*/
|
|
|
|
struct hw_perf_counter_ops {
|
2008-12-21 20:50:42 +08:00
|
|
|
int (*enable) (struct perf_counter *counter);
|
2008-12-17 21:20:28 +08:00
|
|
|
void (*disable) (struct perf_counter *counter);
|
|
|
|
void (*read) (struct perf_counter *counter);
|
2008-12-11 19:46:46 +08:00
|
|
|
};
|
|
|
|
|
2008-12-11 22:17:03 +08:00
|
|
|
/**
|
|
|
|
* enum perf_counter_active_state - the states of a counter
|
|
|
|
*/
|
|
|
|
enum perf_counter_active_state {
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 18:00:30 +08:00
|
|
|
PERF_COUNTER_STATE_ERROR = -2,
|
2008-12-11 22:17:03 +08:00
|
|
|
PERF_COUNTER_STATE_OFF = -1,
|
|
|
|
PERF_COUNTER_STATE_INACTIVE = 0,
|
|
|
|
PERF_COUNTER_STATE_ACTIVE = 1,
|
|
|
|
};
|
|
|
|
|
2008-12-12 20:49:45 +08:00
|
|
|
struct file;
|
|
|
|
|
2008-12-05 03:12:29 +08:00
|
|
|
/**
|
|
|
|
* struct perf_counter - performance counter kernel representation:
|
|
|
|
*/
|
|
|
|
struct perf_counter {
|
2008-12-13 16:00:03 +08:00
|
|
|
#ifdef CONFIG_PERF_COUNTERS
|
2008-12-11 15:38:42 +08:00
|
|
|
struct list_head list_entry;
|
|
|
|
struct list_head sibling_list;
|
|
|
|
struct perf_counter *group_leader;
|
2008-12-11 20:21:10 +08:00
|
|
|
const struct hw_perf_counter_ops *hw_ops;
|
2008-12-11 15:38:42 +08:00
|
|
|
|
2008-12-11 22:17:03 +08:00
|
|
|
enum perf_counter_active_state state;
|
2008-12-05 03:12:29 +08:00
|
|
|
atomic64_t count;
|
2008-12-13 16:00:03 +08:00
|
|
|
|
2008-12-10 19:33:23 +08:00
|
|
|
struct perf_counter_hw_event hw_event;
|
2008-12-05 03:12:29 +08:00
|
|
|
struct hw_perf_counter hw;
|
|
|
|
|
|
|
|
struct perf_counter_context *ctx;
|
|
|
|
struct task_struct *task;
|
2008-12-12 20:49:45 +08:00
|
|
|
struct file *filp;
|
2008-12-05 03:12:29 +08:00
|
|
|
|
2008-12-12 20:49:45 +08:00
|
|
|
struct perf_counter *parent;
|
2008-12-05 03:12:29 +08:00
|
|
|
/*
|
|
|
|
* Protect attach/detach:
|
|
|
|
*/
|
|
|
|
struct mutex mutex;
|
|
|
|
|
|
|
|
int oncpu;
|
|
|
|
int cpu;
|
|
|
|
|
|
|
|
/* read() / irq related data */
|
|
|
|
wait_queue_head_t waitq;
|
|
|
|
/* optional: for NMIs */
|
|
|
|
int wakeup_pending;
|
|
|
|
struct perf_data *irqdata;
|
|
|
|
struct perf_data *usrdata;
|
|
|
|
struct perf_data data[2];
|
2008-12-13 16:00:03 +08:00
|
|
|
#endif
|
2008-12-05 03:12:29 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct perf_counter_context - counter context structure
|
|
|
|
*
|
|
|
|
* Used as a container for task counters and CPU counters as well:
|
|
|
|
*/
|
|
|
|
struct perf_counter_context {
|
|
|
|
#ifdef CONFIG_PERF_COUNTERS
|
|
|
|
/*
|
|
|
|
* Protect the list of counters:
|
|
|
|
*/
|
|
|
|
spinlock_t lock;
|
2008-12-11 15:38:42 +08:00
|
|
|
|
|
|
|
struct list_head counter_list;
|
2008-12-05 03:12:29 +08:00
|
|
|
int nr_counters;
|
|
|
|
int nr_active;
|
|
|
|
struct task_struct *task;
|
|
|
|
#endif
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* struct perf_counter_cpu_context - per cpu counter context structure
|
|
|
|
*/
|
|
|
|
struct perf_cpu_context {
|
|
|
|
struct perf_counter_context ctx;
|
|
|
|
struct perf_counter_context *task_ctx;
|
|
|
|
int active_oncpu;
|
|
|
|
int max_pertask;
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 18:00:30 +08:00
|
|
|
int exclusive;
|
2008-12-05 03:12:29 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set by architecture code:
|
|
|
|
*/
|
|
|
|
extern int perf_max_counters;
|
|
|
|
|
|
|
|
#ifdef CONFIG_PERF_COUNTERS
|
2008-12-11 20:21:10 +08:00
|
|
|
extern const struct hw_perf_counter_ops *
|
2008-12-11 19:46:46 +08:00
|
|
|
hw_perf_counter_init(struct perf_counter *counter);
|
|
|
|
|
2008-12-05 03:12:29 +08:00
|
|
|
extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
|
|
|
|
extern void perf_counter_task_sched_out(struct task_struct *task, int cpu);
|
|
|
|
extern void perf_counter_task_tick(struct task_struct *task, int cpu);
|
2008-12-12 20:49:45 +08:00
|
|
|
extern void perf_counter_init_task(struct task_struct *child);
|
|
|
|
extern void perf_counter_exit_task(struct task_struct *child);
|
2008-12-05 03:12:29 +08:00
|
|
|
extern void perf_counter_notify(struct pt_regs *regs);
|
|
|
|
extern void perf_counter_print_debug(void);
|
2008-12-11 20:45:51 +08:00
|
|
|
extern u64 hw_perf_save_disable(void);
|
|
|
|
extern void hw_perf_restore(u64 ctrl);
|
2008-12-11 21:59:31 +08:00
|
|
|
extern int perf_counter_task_disable(void);
|
|
|
|
extern int perf_counter_task_enable(void);
|
2009-01-09 13:43:42 +08:00
|
|
|
extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
|
|
|
|
struct perf_cpu_context *cpuctx,
|
|
|
|
struct perf_counter_context *ctx, int cpu);
|
2008-12-11 20:21:10 +08:00
|
|
|
|
perf_counter: Add support for pinned and exclusive counter groups
Impact: New perf_counter features
A pinned counter group is one that the user wants to have on the CPU
whenever possible, i.e. whenever the associated task is running, for
a per-task group, or always for a per-cpu group. If the system
cannot satisfy that, it puts the group into an error state where
it is not scheduled any more and reads from it return EOF (i.e. 0
bytes read). The group can be released from error state and made
readable again using prctl(PR_TASK_PERF_COUNTERS_ENABLE). When we
have finer-grained enable/disable controls on counters we'll be able
to reset the error state on individual groups.
An exclusive group is one that the user wants to be the only group
using the CPU performance monitor hardware whenever it is on. The
counter group scheduler will not schedule an exclusive group if there
are already other groups on the CPU and will not schedule other groups
onto the CPU if there is an exclusive group scheduled (that statement
does not apply to groups containing only software counters, which can
always go on and which do not prevent an exclusive group from going on).
With an exclusive group, we will be able to let users program PMU
registers at a low level without the concern that those settings will
perturb other measurements.
Along the way this reorganizes things a little:
- is_software_counter() is moved to perf_counter.h.
- cpuctx->active_oncpu now records the number of hardware counters on
the CPU, i.e. it now excludes software counters. Nothing was reading
cpuctx->active_oncpu before, so this change is harmless.
- A new cpuctx->exclusive field records whether we currently have an
exclusive group on the CPU.
- counter_sched_out moves higher up in perf_counter.c and gets called
from __perf_counter_remove_from_context and __perf_counter_exit_task,
where we used to have essentially the same code.
- __perf_counter_sched_in now goes through the counter list twice, doing
the pinned counters in the first loop and the non-pinned counters in
the second loop, in order to give the pinned counters the best chance
to be scheduled in.
Note that only a group leader can be exclusive or pinned, and that
attribute applies to the whole group. This avoids some awkwardness in
some corner cases (e.g. where a group leader is closed and the other
group members get added to the context list). If we want to relax that
restriction later, we can, and it is easier to relax a restriction than
to apply a new one.
This doesn't yet handle the case where a pinned counter is inherited
and goes into error state in the child - the error state is not
propagated up to the parent when the child exits, and arguably it
should.
Signed-off-by: Paul Mackerras <paulus@samba.org>
2009-01-14 18:00:30 +08:00
|
|
|
/*
|
|
|
|
* Return 1 for a software counter, 0 for a hardware counter
|
|
|
|
*/
|
|
|
|
static inline int is_software_counter(struct perf_counter *counter)
|
|
|
|
{
|
|
|
|
return !counter->hw_event.raw && counter->hw_event.type < 0;
|
|
|
|
}
|
|
|
|
|
2008-12-05 03:12:29 +08:00
|
|
|
#else
|
|
|
|
static inline void
|
|
|
|
perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
|
|
|
|
static inline void
|
|
|
|
perf_counter_task_sched_out(struct task_struct *task, int cpu) { }
|
|
|
|
static inline void
|
|
|
|
perf_counter_task_tick(struct task_struct *task, int cpu) { }
|
2008-12-12 20:49:45 +08:00
|
|
|
static inline void perf_counter_init_task(struct task_struct *child) { }
|
|
|
|
static inline void perf_counter_exit_task(struct task_struct *child) { }
|
2008-12-05 03:12:29 +08:00
|
|
|
static inline void perf_counter_notify(struct pt_regs *regs) { }
|
|
|
|
static inline void perf_counter_print_debug(void) { }
|
2008-12-11 20:45:51 +08:00
|
|
|
static inline void hw_perf_restore(u64 ctrl) { }
|
|
|
|
static inline u64 hw_perf_save_disable(void) { return 0; }
|
2008-12-11 21:59:31 +08:00
|
|
|
static inline int perf_counter_task_disable(void) { return -EINVAL; }
|
|
|
|
static inline int perf_counter_task_enable(void) { return -EINVAL; }
|
2008-12-05 03:12:29 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#endif /* _LINUX_PERF_COUNTER_H */
|