mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-12 13:34:10 +08:00
perf: Cleanup {start,commit,cancel}_txn details
Clarify some of the transactional group scheduling API details and change it so that a successfull ->commit_txn also closes the transaction. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1274803086.5882.1752.camel@twins> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
3af9e85928
commit
8d2cacbbb8
@ -754,7 +754,7 @@ static int power_pmu_enable(struct perf_event *event)
|
||||
* skip the schedulability test here, it will be peformed
|
||||
* at commit time(->commit_txn) as a whole
|
||||
*/
|
||||
if (cpuhw->group_flag & PERF_EVENT_TXN_STARTED)
|
||||
if (cpuhw->group_flag & PERF_EVENT_TXN)
|
||||
goto nocheck;
|
||||
|
||||
if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
|
||||
@ -858,7 +858,7 @@ void power_pmu_start_txn(const struct pmu *pmu)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
|
||||
cpuhw->group_flag |= PERF_EVENT_TXN;
|
||||
cpuhw->n_txn_start = cpuhw->n_events;
|
||||
}
|
||||
|
||||
@ -871,7 +871,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
|
||||
cpuhw->group_flag &= ~PERF_EVENT_TXN;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -897,6 +897,7 @@ int power_pmu_commit_txn(const struct pmu *pmu)
|
||||
for (i = cpuhw->n_txn_start; i < n; ++i)
|
||||
cpuhw->event[i]->hw.config = cpuhw->events[i];
|
||||
|
||||
cpuhw->group_flag &= ~PERF_EVENT_TXN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1005,7 +1005,7 @@ static int sparc_pmu_enable(struct perf_event *event)
|
||||
* skip the schedulability test here, it will be peformed
|
||||
* at commit time(->commit_txn) as a whole
|
||||
*/
|
||||
if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
|
||||
if (cpuc->group_flag & PERF_EVENT_TXN)
|
||||
goto nocheck;
|
||||
|
||||
if (check_excludes(cpuc->event, n0, 1))
|
||||
@ -1102,7 +1102,7 @@ static void sparc_pmu_start_txn(const struct pmu *pmu)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
cpuhw->group_flag |= PERF_EVENT_TXN_STARTED;
|
||||
cpuhw->group_flag |= PERF_EVENT_TXN;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1114,7 +1114,7 @@ static void sparc_pmu_cancel_txn(const struct pmu *pmu)
|
||||
{
|
||||
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
cpuhw->group_flag &= ~PERF_EVENT_TXN_STARTED;
|
||||
cpuhw->group_flag &= ~PERF_EVENT_TXN;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1137,6 +1137,7 @@ static int sparc_pmu_commit_txn(const struct pmu *pmu)
|
||||
if (sparc_check_constraints(cpuc->event, cpuc->events, n))
|
||||
return -EAGAIN;
|
||||
|
||||
cpuc->group_flag &= ~PERF_EVENT_TXN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -969,7 +969,7 @@ static int x86_pmu_enable(struct perf_event *event)
|
||||
* skip the schedulability test here, it will be peformed
|
||||
* at commit time(->commit_txn) as a whole
|
||||
*/
|
||||
if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
|
||||
if (cpuc->group_flag & PERF_EVENT_TXN)
|
||||
goto out;
|
||||
|
||||
ret = x86_pmu.schedule_events(cpuc, n, assign);
|
||||
@ -1096,7 +1096,7 @@ static void x86_pmu_disable(struct perf_event *event)
|
||||
* The events never got scheduled and ->cancel_txn will truncate
|
||||
* the event_list.
|
||||
*/
|
||||
if (cpuc->group_flag & PERF_EVENT_TXN_STARTED)
|
||||
if (cpuc->group_flag & PERF_EVENT_TXN)
|
||||
return;
|
||||
|
||||
x86_pmu_stop(event);
|
||||
@ -1388,7 +1388,7 @@ static void x86_pmu_start_txn(const struct pmu *pmu)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
cpuc->group_flag |= PERF_EVENT_TXN_STARTED;
|
||||
cpuc->group_flag |= PERF_EVENT_TXN;
|
||||
cpuc->n_txn = 0;
|
||||
}
|
||||
|
||||
@ -1401,7 +1401,7 @@ static void x86_pmu_cancel_txn(const struct pmu *pmu)
|
||||
{
|
||||
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
|
||||
|
||||
cpuc->group_flag &= ~PERF_EVENT_TXN_STARTED;
|
||||
cpuc->group_flag &= ~PERF_EVENT_TXN;
|
||||
/*
|
||||
* Truncate the collected events.
|
||||
*/
|
||||
@ -1435,11 +1435,7 @@ static int x86_pmu_commit_txn(const struct pmu *pmu)
|
||||
*/
|
||||
memcpy(cpuc->assign, assign, n*sizeof(int));
|
||||
|
||||
/*
|
||||
* Clear out the txn count so that ->cancel_txn() which gets
|
||||
* run after ->commit_txn() doesn't undo things.
|
||||
*/
|
||||
cpuc->n_txn = 0;
|
||||
cpuc->group_flag &= ~PERF_EVENT_TXN;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -549,7 +549,10 @@ struct hw_perf_event {
|
||||
|
||||
struct perf_event;
|
||||
|
||||
#define PERF_EVENT_TXN_STARTED 1
|
||||
/*
|
||||
* Common implementation detail of pmu::{start,commit,cancel}_txn
|
||||
*/
|
||||
#define PERF_EVENT_TXN 0x1
|
||||
|
||||
/**
|
||||
* struct pmu - generic performance monitoring unit
|
||||
@ -563,14 +566,28 @@ struct pmu {
|
||||
void (*unthrottle) (struct perf_event *event);
|
||||
|
||||
/*
|
||||
* group events scheduling is treated as a transaction,
|
||||
* add group events as a whole and perform one schedulability test.
|
||||
* If test fails, roll back the whole group
|
||||
* Group events scheduling is treated as a transaction, add group
|
||||
* events as a whole and perform one schedulability test. If the test
|
||||
* fails, roll back the whole group
|
||||
*/
|
||||
|
||||
/*
|
||||
* Start the transaction, after this ->enable() doesn't need
|
||||
* to do schedulability tests.
|
||||
*/
|
||||
void (*start_txn) (const struct pmu *pmu);
|
||||
void (*cancel_txn) (const struct pmu *pmu);
|
||||
/*
|
||||
* If ->start_txn() disabled the ->enable() schedulability test
|
||||
* then ->commit_txn() is required to perform one. On success
|
||||
* the transaction is closed. On error the transaction is kept
|
||||
* open until ->cancel_txn() is called.
|
||||
*/
|
||||
int (*commit_txn) (const struct pmu *pmu);
|
||||
/*
|
||||
* Will cancel the transaction, assumes ->disable() is called for
|
||||
* each successfull ->enable() during the transaction.
|
||||
*/
|
||||
void (*cancel_txn) (const struct pmu *pmu);
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -675,7 +675,6 @@ group_sched_in(struct perf_event *group_event,
|
||||
struct perf_event *event, *partial_group = NULL;
|
||||
const struct pmu *pmu = group_event->pmu;
|
||||
bool txn = false;
|
||||
int ret;
|
||||
|
||||
if (group_event->state == PERF_EVENT_STATE_OFF)
|
||||
return 0;
|
||||
@ -703,15 +702,9 @@ group_sched_in(struct perf_event *group_event,
|
||||
}
|
||||
}
|
||||
|
||||
if (!txn)
|
||||
if (!txn || !pmu->commit_txn(pmu))
|
||||
return 0;
|
||||
|
||||
ret = pmu->commit_txn(pmu);
|
||||
if (!ret) {
|
||||
pmu->cancel_txn(pmu);
|
||||
return 0;
|
||||
}
|
||||
|
||||
group_error:
|
||||
/*
|
||||
* Groups can be scheduled in as one unit only, so undo any
|
||||
|
Loading…
Reference in New Issue
Block a user