2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-19 18:53:52 +08:00

perf/x86/intel/cstate: Convert Intel CSTATE to hotplug state machine

Install the callbacks via the state machine and let the core invoke
the callbacks on the already online CPUs.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@suse.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: kbuild test robot <fengguang.wu@intel.com>
Cc: rt@linutronix.de
Link: http://lkml.kernel.org/r/20160713153334.184061086@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Sebastian Andrzej Siewior 2016-07-13 17:16:18 +00:00 committed by Ingo Molnar
parent f070482704
commit 77c34ef1c3
2 changed files with 18 additions and 37 deletions

View File

@ -365,7 +365,7 @@ static int cstate_pmu_event_add(struct perf_event *event, int mode)
* Check if exiting cpu is the designated reader. If so migrate the * Check if exiting cpu is the designated reader. If so migrate the
* events when there is a valid target available * events when there is a valid target available
*/ */
static void cstate_cpu_exit(int cpu) static int cstate_cpu_exit(unsigned int cpu)
{ {
unsigned int target; unsigned int target;
@ -390,9 +390,10 @@ static void cstate_cpu_exit(int cpu)
perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target); perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target);
} }
} }
return 0;
} }
static void cstate_cpu_init(int cpu) static int cstate_cpu_init(unsigned int cpu)
{ {
unsigned int target; unsigned int target;
@ -414,31 +415,10 @@ static void cstate_cpu_init(int cpu)
topology_core_cpumask(cpu)); topology_core_cpumask(cpu));
if (has_cstate_pkg && target >= nr_cpu_ids) if (has_cstate_pkg && target >= nr_cpu_ids)
cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask); cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask);
return 0;
} }
static int cstate_cpu_notifier(struct notifier_block *self,
unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_STARTING:
cstate_cpu_init(cpu);
break;
case CPU_DOWN_PREPARE:
cstate_cpu_exit(cpu);
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block cstate_cpu_nb = {
.notifier_call = cstate_cpu_notifier,
.priority = CPU_PRI_PERF + 1,
};
static struct pmu cstate_core_pmu = { static struct pmu cstate_core_pmu = {
.attr_groups = core_attr_groups, .attr_groups = core_attr_groups,
.name = "cstate_core", .name = "cstate_core",
@ -599,18 +579,20 @@ static inline void cstate_cleanup(void)
static int __init cstate_init(void) static int __init cstate_init(void)
{ {
int cpu, err; int err;
cpu_notifier_register_begin(); cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING,
for_each_online_cpu(cpu) "AP_PERF_X86_CSTATE_STARTING", cstate_cpu_init,
cstate_cpu_init(cpu); NULL);
cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE,
"AP_PERF_X86_CSTATE_ONLINE", NULL, cstate_cpu_exit);
if (has_cstate_core) { if (has_cstate_core) {
err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1); err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1);
if (err) { if (err) {
has_cstate_core = false; has_cstate_core = false;
pr_info("Failed to register cstate core pmu\n"); pr_info("Failed to register cstate core pmu\n");
goto out; return err;
} }
} }
@ -620,12 +602,10 @@ static int __init cstate_init(void)
has_cstate_pkg = false; has_cstate_pkg = false;
pr_info("Failed to register cstate pkg pmu\n"); pr_info("Failed to register cstate pkg pmu\n");
cstate_cleanup(); cstate_cleanup();
goto out; return err;
} }
} }
__register_cpu_notifier(&cstate_cpu_nb);
out:
cpu_notifier_register_done();
return err; return err;
} }
@ -651,9 +631,8 @@ module_init(cstate_pmu_init);
static void __exit cstate_pmu_exit(void) static void __exit cstate_pmu_exit(void)
{ {
cpu_notifier_register_begin(); cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE);
__unregister_cpu_notifier(&cstate_cpu_nb); cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING);
cstate_cleanup(); cstate_cleanup();
cpu_notifier_register_done();
} }
module_exit(cstate_pmu_exit); module_exit(cstate_pmu_exit);

View File

@ -26,6 +26,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_STARTING, CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING, CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
CPUHP_AP_PERF_X86_CQM_STARTING, CPUHP_AP_PERF_X86_CQM_STARTING,
CPUHP_AP_PERF_X86_CSTATE_STARTING,
CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_NOTIFY_STARTING,
CPUHP_AP_ONLINE, CPUHP_AP_ONLINE,
CPUHP_TEARDOWN_CPU, CPUHP_TEARDOWN_CPU,
@ -38,6 +39,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE,
CPUHP_AP_PERF_X86_RAPL_ONLINE, CPUHP_AP_PERF_X86_RAPL_ONLINE,
CPUHP_AP_PERF_X86_CQM_ONLINE, CPUHP_AP_PERF_X86_CQM_ONLINE,
CPUHP_AP_PERF_X86_CSTATE_ONLINE,
CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_NOTIFY_ONLINE,
CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN,
CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,