2019-05-27 14:55:01 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
|
|
|
|
*
|
2005-10-12 13:55:09 +08:00
|
|
|
* Modifications for ppc64:
|
|
|
|
* Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/sched.h>
|
|
|
|
#include <linux/threads.h>
|
|
|
|
#include <linux/init.h>
|
2011-07-23 06:24:23 +08:00
|
|
|
#include <linux/export.h>
|
powerpc: Add option to use jump label for cpu_has_feature()
We do binary patching of asm code using CPU features, which is a
one-time operation, done during early boot. However checks of CPU
features in C code are currently done at run time, even though the set
of CPU features can never change after boot.
We can optimise this by using jump labels to implement cpu_has_feature(),
meaning checks in C code are binary patched into a single nop or branch.
For a C sequence along the lines of:
if (cpu_has_feature(FOO))
return 2;
The generated code before is roughly:
ld r9,-27640(r2)
ld r9,0(r9)
lwz r9,32(r9)
cmpwi cr7,r9,0
bge cr7, 1f
li r3,2
blr
1: ...
After (true):
nop
li r3,2
blr
After (false):
b 1f
li r3,2
blr
1: ...
mpe: Rename MAX_CPU_FEATURES as we already have a #define with that
name, and define it simply as a constant, rather than doing tricks with
sizeof and NULL pointers. Rename the array to cpu_feature_keys. Use the
kconfig we added to guard it. Add BUILD_BUG_ON() if the feature is not a
compile time constant. Rewrite the change log.
Signed-off-by: Kevin Hao <haokexin@gmail.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-23 17:12:41 +08:00
|
|
|
#include <linux/jump_label.h>
|
2022-03-09 03:20:25 +08:00
|
|
|
#include <linux/of.h>
|
2005-09-28 04:13:12 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm/cputable.h>
|
2020-09-16 11:02:33 +08:00
|
|
|
#include <asm/mce.h>
|
2008-12-19 03:13:32 +08:00
|
|
|
#include <asm/mmu.h>
|
2012-03-29 01:30:02 +08:00
|
|
|
#include <asm/setup.h>
|
2022-09-20 01:01:29 +08:00
|
|
|
#include <asm/cpu_setup.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-05-09 11:16:52 +08:00
|
|
|
static struct cpu_spec the_cpu_spec __read_mostly;
|
|
|
|
|
|
|
|
struct cpu_spec* cur_cpu_spec __read_mostly = NULL;
|
2005-10-12 13:55:09 +08:00
|
|
|
EXPORT_SYMBOL(cur_cpu_spec);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-07-16 07:58:51 +08:00
|
|
|
/* The platform string corresponding to the real PVR */
|
|
|
|
const char *powerpc_base_platform;
|
|
|
|
|
2022-09-20 01:01:30 +08:00
|
|
|
#include "cpu_specs.h"
|
2006-10-24 14:42:40 +08:00
|
|
|
|
2017-05-09 11:16:52 +08:00
|
|
|
void __init set_cur_cpu_spec(struct cpu_spec *s)
|
|
|
|
{
|
|
|
|
struct cpu_spec *t = &the_cpu_spec;
|
|
|
|
|
|
|
|
t = PTRRELOC(t);
|
2019-04-27 00:23:29 +08:00
|
|
|
/*
|
|
|
|
* use memcpy() instead of *t = *s so that GCC replaces it
|
|
|
|
* by __memcpy() when KASAN is active
|
|
|
|
*/
|
|
|
|
memcpy(t, s, sizeof(*t));
|
2017-05-09 11:16:52 +08:00
|
|
|
|
|
|
|
*PTRRELOC(&cur_cpu_spec) = &the_cpu_spec;
|
|
|
|
}
|
[POWERPC] Fix performance monitor on machines with logical PVR
Some IBM machines supply a "logical" PVR (processor version register)
value in the device tree in the cpu nodes rather than the real PVR.
This is used for instance to indicate that the processors in a POWER6
partition have been configured by the hypervisor to run in POWER5+
mode rather than POWER6 mode. To cope with this, we call identify_cpu
a second time with the logical PVR value (the first call is with the
real PVR value in the very early setup code).
However, POWER5+ machines can also supply a logical PVR value, and use
the same value (the value that indicates a v2.04 architecture
compliant processor). This causes problems for code that uses the
performance monitor (such as oprofile), because the PMU registers are
different in POWER6 (even in POWER5+ mode) from the real POWER5+.
This change works around this problem by taking out the PMU
information from the cputable entries for the logical PVR values, and
changing identify_cpu so that the second call to it won't overwrite
the PMU information that was established by the first call (the one
with the real PVR), but does update the other fields. Specifically,
if the cputable entry for the logical PVR value has num_pmcs == 0,
none of the PMU-related fields get used.
So that we can create a mixed cputable entry, we now make cur_cpu_spec
point to a single static struct cpu_spec, and copy stuff from
cpu_specs[i] into it. This has the side-effect that we can now make
cpu_specs[] be initdata.
Ultimately it would be good to move the PMU-related fields out to a
separate structure, pointed to by the cputable entries, and change
identify_cpu so that it saves the PMU info pointer, copies the whole
structure, and restores the PMU info pointer, rather than identify_cpu
having to list all the fields that are *not* PMU-related.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2007-10-04 12:18:01 +08:00
|
|
|
|
2011-07-25 19:04:36 +08:00
|
|
|
static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
|
|
|
|
struct cpu_spec *s)
|
2006-10-24 14:42:40 +08:00
|
|
|
{
|
[POWERPC] Fix performance monitor on machines with logical PVR
Some IBM machines supply a "logical" PVR (processor version register)
value in the device tree in the cpu nodes rather than the real PVR.
This is used for instance to indicate that the processors in a POWER6
partition have been configured by the hypervisor to run in POWER5+
mode rather than POWER6 mode. To cope with this, we call identify_cpu
a second time with the logical PVR value (the first call is with the
real PVR value in the very early setup code).
However, POWER5+ machines can also supply a logical PVR value, and use
the same value (the value that indicates a v2.04 architecture
compliant processor). This causes problems for code that uses the
performance monitor (such as oprofile), because the PMU registers are
different in POWER6 (even in POWER5+ mode) from the real POWER5+.
This change works around this problem by taking out the PMU
information from the cputable entries for the logical PVR values, and
changing identify_cpu so that the second call to it won't overwrite
the PMU information that was established by the first call (the one
with the real PVR), but does update the other fields. Specifically,
if the cputable entry for the logical PVR value has num_pmcs == 0,
none of the PMU-related fields get used.
So that we can create a mixed cputable entry, we now make cur_cpu_spec
point to a single static struct cpu_spec, and copy stuff from
cpu_specs[i] into it. This has the side-effect that we can now make
cpu_specs[] be initdata.
Ultimately it would be good to move the PMU-related fields out to a
separate structure, pointed to by the cputable entries, and change
identify_cpu so that it saves the PMU info pointer, copies the whole
structure, and restores the PMU info pointer, rather than identify_cpu
having to list all the fields that are *not* PMU-related.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2007-10-04 12:18:01 +08:00
|
|
|
struct cpu_spec *t = &the_cpu_spec;
|
2009-02-23 00:25:45 +08:00
|
|
|
struct cpu_spec old;
|
|
|
|
|
[POWERPC] Fix performance monitor on machines with logical PVR
Some IBM machines supply a "logical" PVR (processor version register)
value in the device tree in the cpu nodes rather than the real PVR.
This is used for instance to indicate that the processors in a POWER6
partition have been configured by the hypervisor to run in POWER5+
mode rather than POWER6 mode. To cope with this, we call identify_cpu
a second time with the logical PVR value (the first call is with the
real PVR value in the very early setup code).
However, POWER5+ machines can also supply a logical PVR value, and use
the same value (the value that indicates a v2.04 architecture
compliant processor). This causes problems for code that uses the
performance monitor (such as oprofile), because the PMU registers are
different in POWER6 (even in POWER5+ mode) from the real POWER5+.
This change works around this problem by taking out the PMU
information from the cputable entries for the logical PVR values, and
changing identify_cpu so that the second call to it won't overwrite
the PMU information that was established by the first call (the one
with the real PVR), but does update the other fields. Specifically,
if the cputable entry for the logical PVR value has num_pmcs == 0,
none of the PMU-related fields get used.
So that we can create a mixed cputable entry, we now make cur_cpu_spec
point to a single static struct cpu_spec, and copy stuff from
cpu_specs[i] into it. This has the side-effect that we can now make
cpu_specs[] be initdata.
Ultimately it would be good to move the PMU-related fields out to a
separate structure, pointed to by the cputable entries, and change
identify_cpu so that it saves the PMU info pointer, copies the whole
structure, and restores the PMU info pointer, rather than identify_cpu
having to list all the fields that are *not* PMU-related.
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
2007-10-04 12:18:01 +08:00
|
|
|
t = PTRRELOC(t);
|
2009-02-23 00:25:45 +08:00
|
|
|
old = *t;
|
|
|
|
|
2019-04-27 00:23:29 +08:00
|
|
|
/*
|
|
|
|
* Copy everything, then do fixups. Use memcpy() instead of *t = *s
|
|
|
|
* so that GCC replaces it by __memcpy() when KASAN is active
|
|
|
|
*/
|
|
|
|
memcpy(t, s, sizeof(*t));
|
2006-10-24 14:42:40 +08:00
|
|
|
|
2009-02-23 00:25:43 +08:00
|
|
|
/*
|
|
|
|
* If we are overriding a previous value derived from the real
|
|
|
|
* PVR with a new value obtained using a logical PVR value,
|
|
|
|
* don't modify the performance monitor fields.
|
|
|
|
*/
|
2009-02-23 00:25:45 +08:00
|
|
|
if (old.num_pmcs && !s->num_pmcs) {
|
|
|
|
t->num_pmcs = old.num_pmcs;
|
|
|
|
t->pmc_type = old.pmc_type;
|
|
|
|
|
2009-02-23 00:25:43 +08:00
|
|
|
/*
|
2022-07-07 22:37:18 +08:00
|
|
|
* Let's ensure that the
|
2020-02-27 21:47:15 +08:00
|
|
|
* fix for the PMAO bug is enabled on compatibility mode.
|
2009-02-23 00:25:43 +08:00
|
|
|
*/
|
2022-07-07 22:37:18 +08:00
|
|
|
t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
|
2009-02-23 00:25:45 +08:00
|
|
|
}
|
2009-02-23 00:25:43 +08:00
|
|
|
|
|
|
|
*PTRRELOC(&cur_cpu_spec) = &the_cpu_spec;
|
2008-07-16 07:58:51 +08:00
|
|
|
|
2009-02-23 00:25:43 +08:00
|
|
|
/*
|
|
|
|
* Set the base platform string once; assumes
|
|
|
|
* we're called with real pvr first.
|
|
|
|
*/
|
|
|
|
if (*PTRRELOC(&powerpc_base_platform) == NULL)
|
|
|
|
*PTRRELOC(&powerpc_base_platform) = t->platform;
|
2008-07-16 07:58:51 +08:00
|
|
|
|
2007-09-21 22:44:38 +08:00
|
|
|
#if defined(CONFIG_PPC64) || defined(CONFIG_BOOKE)
|
2009-02-23 00:25:43 +08:00
|
|
|
/* ppc64 and booke expect identify_cpu to also call setup_cpu for
|
|
|
|
* that processor. I will consolidate that at a later time, for now,
|
|
|
|
* just use #ifdef. We also don't need to PTRRELOC the function
|
|
|
|
* pointer on ppc64 and booke as we are running at 0 in real mode
|
|
|
|
* on ppc64 and reloc_offset is always 0 on booke.
|
|
|
|
*/
|
2011-01-21 04:36:03 +08:00
|
|
|
if (t->cpu_setup) {
|
|
|
|
t->cpu_setup(offset, t);
|
2009-02-23 00:25:43 +08:00
|
|
|
}
|
2007-09-21 22:44:38 +08:00
|
|
|
#endif /* CONFIG_PPC64 || CONFIG_BOOKE */
|
2011-07-25 19:04:36 +08:00
|
|
|
|
|
|
|
return t;
|
2009-02-23 00:25:43 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr)
|
|
|
|
{
|
|
|
|
struct cpu_spec *s = cpu_specs;
|
|
|
|
int i;
|
|
|
|
|
2022-09-20 01:01:26 +08:00
|
|
|
BUILD_BUG_ON(!ARRAY_SIZE(cpu_specs));
|
|
|
|
|
2009-02-23 00:25:43 +08:00
|
|
|
s = PTRRELOC(s);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) {
|
2011-07-25 19:04:36 +08:00
|
|
|
if ((pvr & s->pvr_mask) == s->pvr_value)
|
|
|
|
return setup_cpu_spec(offset, s);
|
2009-02-23 00:25:43 +08:00
|
|
|
}
|
|
|
|
|
2006-10-24 14:42:40 +08:00
|
|
|
BUG();
|
2009-02-23 00:25:43 +08:00
|
|
|
|
2006-10-24 14:42:40 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
powerpc: Add option to use jump label for cpu_has_feature()
We do binary patching of asm code using CPU features, which is a
one-time operation, done during early boot. However checks of CPU
features in C code are currently done at run time, even though the set
of CPU features can never change after boot.
We can optimise this by using jump labels to implement cpu_has_feature(),
meaning checks in C code are binary patched into a single nop or branch.
For a C sequence along the lines of:
if (cpu_has_feature(FOO))
return 2;
The generated code before is roughly:
ld r9,-27640(r2)
ld r9,0(r9)
lwz r9,32(r9)
cmpwi cr7,r9,0
bge cr7, 1f
li r3,2
blr
1: ...
After (true):
nop
li r3,2
blr
After (false):
b 1f
li r3,2
blr
1: ...
mpe: Rename MAX_CPU_FEATURES as we already have a #define with that
name, and define it simply as a constant, rather than doing tricks with
sizeof and NULL pointers. Rename the array to cpu_feature_keys. Use the
kconfig we added to guard it. Add BUILD_BUG_ON() if the feature is not a
compile time constant. Rewrite the change log.
Signed-off-by: Kevin Hao <haokexin@gmail.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-23 17:12:41 +08:00
|
|
|
|
2017-05-09 11:16:52 +08:00
|
|
|
/*
|
|
|
|
* Used by cpufeatures to get the name for CPUs with a PVR table.
|
|
|
|
* If they don't hae a PVR table, cpufeatures gets the name from
|
|
|
|
* cpu device-tree node.
|
|
|
|
*/
|
|
|
|
void __init identify_cpu_name(unsigned int pvr)
|
|
|
|
{
|
|
|
|
struct cpu_spec *s = cpu_specs;
|
|
|
|
struct cpu_spec *t = &the_cpu_spec;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
s = PTRRELOC(s);
|
|
|
|
t = PTRRELOC(t);
|
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) {
|
|
|
|
if ((pvr & s->pvr_mask) == s->pvr_value) {
|
|
|
|
t->cpu_name = s->cpu_name;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
powerpc: Add option to use jump label for cpu_has_feature()
We do binary patching of asm code using CPU features, which is a
one-time operation, done during early boot. However checks of CPU
features in C code are currently done at run time, even though the set
of CPU features can never change after boot.
We can optimise this by using jump labels to implement cpu_has_feature(),
meaning checks in C code are binary patched into a single nop or branch.
For a C sequence along the lines of:
if (cpu_has_feature(FOO))
return 2;
The generated code before is roughly:
ld r9,-27640(r2)
ld r9,0(r9)
lwz r9,32(r9)
cmpwi cr7,r9,0
bge cr7, 1f
li r3,2
blr
1: ...
After (true):
nop
li r3,2
blr
After (false):
b 1f
li r3,2
blr
1: ...
mpe: Rename MAX_CPU_FEATURES as we already have a #define with that
name, and define it simply as a constant, rather than doing tricks with
sizeof and NULL pointers. Rename the array to cpu_feature_keys. Use the
kconfig we added to guard it. Add BUILD_BUG_ON() if the feature is not a
compile time constant. Rewrite the change log.
Signed-off-by: Kevin Hao <haokexin@gmail.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-23 17:12:41 +08:00
|
|
|
#ifdef CONFIG_JUMP_LABEL_FEATURE_CHECKS
|
|
|
|
struct static_key_true cpu_feature_keys[NUM_CPU_FTR_KEYS] = {
|
|
|
|
[0 ... NUM_CPU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
|
|
|
|
};
|
|
|
|
EXPORT_SYMBOL_GPL(cpu_feature_keys);
|
|
|
|
|
|
|
|
void __init cpu_feature_keys_init(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < NUM_CPU_FTR_KEYS; i++) {
|
|
|
|
unsigned long f = 1ul << i;
|
|
|
|
|
|
|
|
if (!(cur_cpu_spec->cpu_features & f))
|
|
|
|
static_branch_disable(&cpu_feature_keys[i]);
|
|
|
|
}
|
|
|
|
}
|
2016-07-23 17:12:42 +08:00
|
|
|
|
|
|
|
struct static_key_true mmu_feature_keys[NUM_MMU_FTR_KEYS] = {
|
|
|
|
[0 ... NUM_MMU_FTR_KEYS - 1] = STATIC_KEY_TRUE_INIT
|
|
|
|
};
|
2022-03-29 16:57:09 +08:00
|
|
|
EXPORT_SYMBOL(mmu_feature_keys);
|
2016-07-23 17:12:42 +08:00
|
|
|
|
|
|
|
void __init mmu_feature_keys_init(void)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < NUM_MMU_FTR_KEYS; i++) {
|
|
|
|
unsigned long f = 1ul << i;
|
|
|
|
|
|
|
|
if (!(cur_cpu_spec->mmu_features & f))
|
|
|
|
static_branch_disable(&mmu_feature_keys[i]);
|
|
|
|
}
|
|
|
|
}
|
powerpc: Add option to use jump label for cpu_has_feature()
We do binary patching of asm code using CPU features, which is a
one-time operation, done during early boot. However checks of CPU
features in C code are currently done at run time, even though the set
of CPU features can never change after boot.
We can optimise this by using jump labels to implement cpu_has_feature(),
meaning checks in C code are binary patched into a single nop or branch.
For a C sequence along the lines of:
if (cpu_has_feature(FOO))
return 2;
The generated code before is roughly:
ld r9,-27640(r2)
ld r9,0(r9)
lwz r9,32(r9)
cmpwi cr7,r9,0
bge cr7, 1f
li r3,2
blr
1: ...
After (true):
nop
li r3,2
blr
After (false):
b 1f
li r3,2
blr
1: ...
mpe: Rename MAX_CPU_FEATURES as we already have a #define with that
name, and define it simply as a constant, rather than doing tricks with
sizeof and NULL pointers. Rename the array to cpu_feature_keys. Use the
kconfig we added to guard it. Add BUILD_BUG_ON() if the feature is not a
compile time constant. Rewrite the change log.
Signed-off-by: Kevin Hao <haokexin@gmail.com>
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
2016-07-23 17:12:41 +08:00
|
|
|
#endif
|