2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 12:43:55 +08:00

perf/x86/intel/uncore: Make package handling more robust

The package management code in uncore relies on package mapping being
available before a CPU is started. This changed with:

  9d85eb9119 ("x86/smpboot: Make logical package management more robust")

because the ACPI/BIOS information turned out to be unreliable, but that
left uncore in broken state. This was not noticed because on a regular boot
all CPUs are online before uncore is initialized.

Move the allocation to the CPU online callback and simplify the hotplug
handling. At this point the package mapping is established and correct.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Stephane Eranian <eranian@google.com>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: Yasuaki Ishimatsu <yasu.isimatu@gmail.com>
Fixes: 9d85eb9119 ("x86/smpboot: Make logical package management more robust")
Link: http://lkml.kernel.org/r/20170131230141.377156255@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
Thomas Gleixner 2017-01-31 23:58:40 +01:00 committed by Ingo Molnar
parent 1aa6cfd33d
commit fff4b87e59
2 changed files with 91 additions and 107 deletions

View File

@ -100,7 +100,13 @@ ssize_t uncore_event_show(struct kobject *kobj,
struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu) struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu)
{ {
return pmu->boxes[topology_logical_package_id(cpu)]; unsigned int pkgid = topology_logical_package_id(cpu);
/*
* The unsigned check also catches the '-1' return value for non
* existent mappings in the topology map.
*/
return pkgid < max_packages ? pmu->boxes[pkgid] : NULL;
} }
u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event)
@ -1034,76 +1040,6 @@ static void uncore_pci_exit(void)
} }
} }
static int uncore_cpu_dying(unsigned int cpu)
{
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
int i, pkg;
pkg = topology_logical_package_id(cpu);
for (; *types; types++) {
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[pkg];
if (box && atomic_dec_return(&box->refcnt) == 0)
uncore_box_exit(box);
}
}
return 0;
}
static int uncore_cpu_starting(unsigned int cpu)
{
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
int i, pkg;
pkg = topology_logical_package_id(cpu);
for (; *types; types++) {
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[pkg];
if (!box)
continue;
/* The first cpu on a package activates the box */
if (atomic_inc_return(&box->refcnt) == 1)
uncore_box_init(box);
}
}
return 0;
}
static int uncore_cpu_prepare(unsigned int cpu)
{
struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
int i, pkg;
pkg = topology_logical_package_id(cpu);
for (; *types; types++) {
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
if (pmu->boxes[pkg])
continue;
/* First cpu of a package allocates the box */
box = uncore_alloc_box(type, cpu_to_node(cpu));
if (!box)
return -ENOMEM;
box->pmu = pmu;
box->pkgid = pkg;
pmu->boxes[pkg] = box;
}
}
return 0;
}
static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu, static void uncore_change_type_ctx(struct intel_uncore_type *type, int old_cpu,
int new_cpu) int new_cpu)
{ {
@ -1143,12 +1079,14 @@ static void uncore_change_context(struct intel_uncore_type **uncores,
static int uncore_event_cpu_offline(unsigned int cpu) static int uncore_event_cpu_offline(unsigned int cpu)
{ {
int target; struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
int i, pkg, target;
/* Check if exiting cpu is used for collecting uncore events */ /* Check if exiting cpu is used for collecting uncore events */
if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask))
return 0; goto unref;
/* Find a new cpu to collect uncore events */ /* Find a new cpu to collect uncore events */
target = cpumask_any_but(topology_core_cpumask(cpu), cpu); target = cpumask_any_but(topology_core_cpumask(cpu), cpu);
@ -1160,12 +1098,82 @@ static int uncore_event_cpu_offline(unsigned int cpu)
uncore_change_context(uncore_msr_uncores, cpu, target); uncore_change_context(uncore_msr_uncores, cpu, target);
uncore_change_context(uncore_pci_uncores, cpu, target); uncore_change_context(uncore_pci_uncores, cpu, target);
unref:
/* Clear the references */
pkg = topology_logical_package_id(cpu);
for (; *types; types++) {
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[pkg];
if (box && atomic_dec_return(&box->refcnt) == 0)
uncore_box_exit(box);
}
}
return 0; return 0;
} }
static int allocate_boxes(struct intel_uncore_type **types,
unsigned int pkg, unsigned int cpu)
{
struct intel_uncore_box *box, *tmp;
struct intel_uncore_type *type;
struct intel_uncore_pmu *pmu;
LIST_HEAD(allocated);
int i;
/* Try to allocate all required boxes */
for (; *types; types++) {
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
if (pmu->boxes[pkg])
continue;
box = uncore_alloc_box(type, cpu_to_node(cpu));
if (!box)
goto cleanup;
box->pmu = pmu;
box->pkgid = pkg;
list_add(&box->active_list, &allocated);
}
}
/* Install them in the pmus */
list_for_each_entry_safe(box, tmp, &allocated, active_list) {
list_del_init(&box->active_list);
box->pmu->boxes[pkg] = box;
}
return 0;
cleanup:
list_for_each_entry_safe(box, tmp, &allocated, active_list) {
list_del_init(&box->active_list);
kfree(box);
}
return -ENOMEM;
}
static int uncore_event_cpu_online(unsigned int cpu) static int uncore_event_cpu_online(unsigned int cpu)
{ {
int target; struct intel_uncore_type *type, **types = uncore_msr_uncores;
struct intel_uncore_pmu *pmu;
struct intel_uncore_box *box;
int i, ret, pkg, target;
pkg = topology_logical_package_id(cpu);
ret = allocate_boxes(types, pkg, cpu);
if (ret)
return ret;
for (; *types; types++) {
type = *types;
pmu = type->pmus;
for (i = 0; i < type->num_boxes; i++, pmu++) {
box = pmu->boxes[pkg];
if (!box && atomic_inc_return(&box->refcnt) == 1)
uncore_box_init(box);
}
}
/* /*
* Check if there is an online cpu in the package * Check if there is an online cpu in the package
@ -1355,33 +1363,13 @@ static int __init intel_uncore_init(void)
if (cret && pret) if (cret && pret)
return -ENODEV; return -ENODEV;
/* /* Install hotplug callbacks to setup the targets for each package */
* Install callbacks. Core will call them for each online cpu. ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
* "perf/x86/intel/uncore:online",
* The first online cpu of each package allocates and takes uncore_event_cpu_online,
* the refcounts for all other online cpus in that package. uncore_event_cpu_offline);
* If msrs are not enabled no allocation is required and if (ret)
* uncore_cpu_prepare() is not called for each online cpu. goto err;
*/
if (!cret) {
ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP,
"perf/x86/intel/uncore:prepare",
uncore_cpu_prepare, NULL);
if (ret)
goto err;
} else {
cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP,
"perf/x86/intel/uncore:prepare",
uncore_cpu_prepare, NULL);
}
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING,
"perf/x86/uncore:starting",
uncore_cpu_starting, uncore_cpu_dying);
cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE,
"perf/x86/uncore:online",
uncore_event_cpu_online, uncore_event_cpu_offline);
return 0; return 0;
err: err:
@ -1393,9 +1381,7 @@ module_init(intel_uncore_init);
static void __exit intel_uncore_exit(void) static void __exit intel_uncore_exit(void)
{ {
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE); cpuhp_remove_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE);
cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING);
cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP);
uncore_types_exit(uncore_msr_uncores); uncore_types_exit(uncore_msr_uncores);
uncore_pci_exit(); uncore_pci_exit();
} }

View File

@ -8,7 +8,6 @@ enum cpuhp_state {
CPUHP_CREATE_THREADS, CPUHP_CREATE_THREADS,
CPUHP_PERF_PREPARE, CPUHP_PERF_PREPARE,
CPUHP_PERF_X86_PREPARE, CPUHP_PERF_X86_PREPARE,
CPUHP_PERF_X86_UNCORE_PREP,
CPUHP_PERF_X86_AMD_UNCORE_PREP, CPUHP_PERF_X86_AMD_UNCORE_PREP,
CPUHP_PERF_BFIN, CPUHP_PERF_BFIN,
CPUHP_PERF_POWER, CPUHP_PERF_POWER,
@ -85,7 +84,6 @@ enum cpuhp_state {
CPUHP_AP_IRQ_ARMADA_XP_STARTING, CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_PERF_X86_UNCORE_STARTING,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
CPUHP_AP_PERF_X86_STARTING, CPUHP_AP_PERF_X86_STARTING,
CPUHP_AP_PERF_X86_AMD_IBS_STARTING, CPUHP_AP_PERF_X86_AMD_IBS_STARTING,