2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 04:34:11 +08:00

perf, x86: Add simple group validation

Refuse to add events when the group wouldn't fit onto the PMU
anymore.

Naive implementation.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Stephane Eranian <eranian@gmail.com>
LKML-Reference: <1254911461.26976.239.camel@twins>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Peter Zijlstra 2009-10-08 11:56:07 +02:00 committed by Ingo Molnar
parent b690081d4d
commit fe9081cc9b

View File

@ -114,7 +114,8 @@ struct x86_pmu {
u64 intel_ctrl; u64 intel_ctrl;
void (*enable_bts)(u64 config); void (*enable_bts)(u64 config);
void (*disable_bts)(void); void (*disable_bts)(void);
int (*get_event_idx)(struct hw_perf_event *hwc); int (*get_event_idx)(struct cpu_hw_events *cpuc,
struct hw_perf_event *hwc);
}; };
static struct x86_pmu x86_pmu __read_mostly; static struct x86_pmu x86_pmu __read_mostly;
@ -523,7 +524,7 @@ static u64 intel_pmu_raw_event(u64 hw_event)
#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL #define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL #define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
#define CORE_EVNTSEL_INV_MASK 0x00800000ULL #define CORE_EVNTSEL_INV_MASK 0x00800000ULL
#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL #define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
#define CORE_EVNTSEL_MASK \ #define CORE_EVNTSEL_MASK \
(CORE_EVNTSEL_EVENT_MASK | \ (CORE_EVNTSEL_EVENT_MASK | \
@ -1390,8 +1391,7 @@ static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
x86_pmu_enable_event(hwc, idx); x86_pmu_enable_event(hwc, idx);
} }
static int static int fixed_mode_idx(struct hw_perf_event *hwc)
fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
{ {
unsigned int hw_event; unsigned int hw_event;
@ -1424,9 +1424,9 @@ fixed_mode_idx(struct perf_event *event, struct hw_perf_event *hwc)
/* /*
* generic counter allocator: get next free counter * generic counter allocator: get next free counter
*/ */
static int gen_get_event_idx(struct hw_perf_event *hwc) static int
gen_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
{ {
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
int idx; int idx;
idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events); idx = find_first_zero_bit(cpuc->used_mask, x86_pmu.num_events);
@ -1436,16 +1436,16 @@ static int gen_get_event_idx(struct hw_perf_event *hwc)
/* /*
* intel-specific counter allocator: check event constraints * intel-specific counter allocator: check event constraints
*/ */
static int intel_get_event_idx(struct hw_perf_event *hwc) static int
intel_get_event_idx(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
{ {
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
const struct event_constraint *event_constraint; const struct event_constraint *event_constraint;
int i, code; int i, code;
if (!event_constraint) if (!event_constraint)
goto skip; goto skip;
code = hwc->config & 0xff; code = hwc->config & CORE_EVNTSEL_EVENT_MASK;
for_each_event_constraint(event_constraint, event_constraint) { for_each_event_constraint(event_constraint, event_constraint) {
if (code == event_constraint->code) { if (code == event_constraint->code) {
@ -1457,26 +1457,22 @@ static int intel_get_event_idx(struct hw_perf_event *hwc)
} }
} }
skip: skip:
return gen_get_event_idx(hwc); return gen_get_event_idx(cpuc, hwc);
} }
/* static int
* Find a PMC slot for the freshly enabled / scheduled in event: x86_schedule_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
*/
static int x86_pmu_enable(struct perf_event *event)
{ {
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx; int idx;
idx = fixed_mode_idx(event, hwc); idx = fixed_mode_idx(hwc);
if (idx == X86_PMC_IDX_FIXED_BTS) { if (idx == X86_PMC_IDX_FIXED_BTS) {
/* BTS is already occupied. */ /* BTS is already occupied. */
if (test_and_set_bit(idx, cpuc->used_mask)) if (test_and_set_bit(idx, cpuc->used_mask))
return -EAGAIN; return -EAGAIN;
hwc->config_base = 0; hwc->config_base = 0;
hwc->event_base = 0; hwc->event_base = 0;
hwc->idx = idx; hwc->idx = idx;
} else if (idx >= 0) { } else if (idx >= 0) {
/* /*
@ -1499,17 +1495,33 @@ static int x86_pmu_enable(struct perf_event *event)
/* Try to get the previous generic event again */ /* Try to get the previous generic event again */
if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) { if (idx == -1 || test_and_set_bit(idx, cpuc->used_mask)) {
try_generic: try_generic:
idx = x86_pmu.get_event_idx(hwc); idx = x86_pmu.get_event_idx(cpuc, hwc);
if (idx == -1) if (idx == -1)
return -EAGAIN; return -EAGAIN;
set_bit(idx, cpuc->used_mask); set_bit(idx, cpuc->used_mask);
hwc->idx = idx; hwc->idx = idx;
} }
hwc->config_base = x86_pmu.eventsel; hwc->config_base = x86_pmu.eventsel;
hwc->event_base = x86_pmu.perfctr; hwc->event_base = x86_pmu.perfctr;
} }
return idx;
}
/*
* Find a PMC slot for the freshly enabled / scheduled in event:
*/
static int x86_pmu_enable(struct perf_event *event)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx;
idx = x86_schedule_event(cpuc, hwc);
if (idx < 0)
return idx;
perf_events_lapic_init(); perf_events_lapic_init();
x86_pmu.disable(hwc, idx); x86_pmu.disable(hwc, idx);
@ -2212,11 +2224,47 @@ static const struct pmu pmu = {
.unthrottle = x86_pmu_unthrottle, .unthrottle = x86_pmu_unthrottle,
}; };
static int
validate_event(struct cpu_hw_events *cpuc, struct perf_event *event)
{
struct hw_perf_event fake_event = event->hw;
if (event->pmu != &pmu)
return 0;
return x86_schedule_event(cpuc, &fake_event);
}
static int validate_group(struct perf_event *event)
{
struct perf_event *sibling, *leader = event->group_leader;
struct cpu_hw_events fake_pmu;
memset(&fake_pmu, 0, sizeof(fake_pmu));
if (!validate_event(&fake_pmu, leader))
return -ENOSPC;
list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
if (!validate_event(&fake_pmu, sibling))
return -ENOSPC;
}
if (!validate_event(&fake_pmu, event))
return -ENOSPC;
return 0;
}
const struct pmu *hw_perf_event_init(struct perf_event *event) const struct pmu *hw_perf_event_init(struct perf_event *event)
{ {
int err; int err;
err = __hw_perf_event_init(event); err = __hw_perf_event_init(event);
if (!err) {
if (event->group_leader != event)
err = validate_group(event);
}
if (err) { if (err) {
if (event->destroy) if (event->destroy)
event->destroy(event); event->destroy(event);