mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-11 12:28:41 +08:00
arm64: alternatives: use tpidr_el2 on VHE hosts
Now that KVM uses tpidr_el2 in the same way as Linux's cpu_offset in tpidr_el1, merge the two. This saves KVM from save/restoring tpidr_el1 on VHE hosts, and allows future code to blindly access per-cpu variables without triggering world-switch. Signed-off-by: James Morse <james.morse@arm.com> Reviewed-by: Christoffer Dall <cdall@linaro.org> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
c97e166e54
commit
6d99b68933
@ -12,6 +12,8 @@
|
|||||||
#include <linux/stddef.h>
|
#include <linux/stddef.h>
|
||||||
#include <linux/stringify.h>
|
#include <linux/stringify.h>
|
||||||
|
|
||||||
|
extern int alternatives_applied;
|
||||||
|
|
||||||
struct alt_instr {
|
struct alt_instr {
|
||||||
s32 orig_offset; /* offset to original instruction */
|
s32 orig_offset; /* offset to original instruction */
|
||||||
s32 alt_offset; /* offset to replacement instruction */
|
s32 alt_offset; /* offset to replacement instruction */
|
||||||
|
@ -254,7 +254,11 @@ lr .req x30 // link register
|
|||||||
#else
|
#else
|
||||||
adr_l \dst, \sym
|
adr_l \dst, \sym
|
||||||
#endif
|
#endif
|
||||||
|
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
||||||
mrs \tmp, tpidr_el1
|
mrs \tmp, tpidr_el1
|
||||||
|
alternative_else
|
||||||
|
mrs \tmp, tpidr_el2
|
||||||
|
alternative_endif
|
||||||
add \dst, \dst, \tmp
|
add \dst, \dst, \tmp
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
@ -265,7 +269,11 @@ lr .req x30 // link register
|
|||||||
*/
|
*/
|
||||||
.macro ldr_this_cpu dst, sym, tmp
|
.macro ldr_this_cpu dst, sym, tmp
|
||||||
adr_l \dst, \sym
|
adr_l \dst, \sym
|
||||||
|
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
||||||
mrs \tmp, tpidr_el1
|
mrs \tmp, tpidr_el1
|
||||||
|
alternative_else
|
||||||
|
mrs \tmp, tpidr_el2
|
||||||
|
alternative_endif
|
||||||
ldr \dst, [\dst, \tmp]
|
ldr \dst, [\dst, \tmp]
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
@ -16,11 +16,15 @@
|
|||||||
#ifndef __ASM_PERCPU_H
|
#ifndef __ASM_PERCPU_H
|
||||||
#define __ASM_PERCPU_H
|
#define __ASM_PERCPU_H
|
||||||
|
|
||||||
|
#include <asm/alternative.h>
|
||||||
#include <asm/stack_pointer.h>
|
#include <asm/stack_pointer.h>
|
||||||
|
|
||||||
static inline void set_my_cpu_offset(unsigned long off)
|
static inline void set_my_cpu_offset(unsigned long off)
|
||||||
{
|
{
|
||||||
asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
|
asm volatile(ALTERNATIVE("msr tpidr_el1, %0",
|
||||||
|
"msr tpidr_el2, %0",
|
||||||
|
ARM64_HAS_VIRT_HOST_EXTN)
|
||||||
|
:: "r" (off) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned long __my_cpu_offset(void)
|
static inline unsigned long __my_cpu_offset(void)
|
||||||
@ -31,7 +35,10 @@ static inline unsigned long __my_cpu_offset(void)
|
|||||||
* We want to allow caching the value, so avoid using volatile and
|
* We want to allow caching the value, so avoid using volatile and
|
||||||
* instead use a fake stack read to hazard against barrier().
|
* instead use a fake stack read to hazard against barrier().
|
||||||
*/
|
*/
|
||||||
asm("mrs %0, tpidr_el1" : "=r" (off) :
|
asm(ALTERNATIVE("mrs %0, tpidr_el1",
|
||||||
|
"mrs %0, tpidr_el2",
|
||||||
|
ARM64_HAS_VIRT_HOST_EXTN)
|
||||||
|
: "=r" (off) :
|
||||||
"Q" (*(const unsigned long *)current_stack_pointer));
|
"Q" (*(const unsigned long *)current_stack_pointer));
|
||||||
|
|
||||||
return off;
|
return off;
|
||||||
|
@ -32,6 +32,8 @@
|
|||||||
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
|
#define ALT_ORIG_PTR(a) __ALT_PTR(a, orig_offset)
|
||||||
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
|
#define ALT_REPL_PTR(a) __ALT_PTR(a, alt_offset)
|
||||||
|
|
||||||
|
int alternatives_applied;
|
||||||
|
|
||||||
struct alt_region {
|
struct alt_region {
|
||||||
struct alt_instr *begin;
|
struct alt_instr *begin;
|
||||||
struct alt_instr *end;
|
struct alt_instr *end;
|
||||||
@ -143,7 +145,6 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
|
|||||||
*/
|
*/
|
||||||
static int __apply_alternatives_multi_stop(void *unused)
|
static int __apply_alternatives_multi_stop(void *unused)
|
||||||
{
|
{
|
||||||
static int patched = 0;
|
|
||||||
struct alt_region region = {
|
struct alt_region region = {
|
||||||
.begin = (struct alt_instr *)__alt_instructions,
|
.begin = (struct alt_instr *)__alt_instructions,
|
||||||
.end = (struct alt_instr *)__alt_instructions_end,
|
.end = (struct alt_instr *)__alt_instructions_end,
|
||||||
@ -151,14 +152,14 @@ static int __apply_alternatives_multi_stop(void *unused)
|
|||||||
|
|
||||||
/* We always have a CPU 0 at this point (__init) */
|
/* We always have a CPU 0 at this point (__init) */
|
||||||
if (smp_processor_id()) {
|
if (smp_processor_id()) {
|
||||||
while (!READ_ONCE(patched))
|
while (!READ_ONCE(alternatives_applied))
|
||||||
cpu_relax();
|
cpu_relax();
|
||||||
isb();
|
isb();
|
||||||
} else {
|
} else {
|
||||||
BUG_ON(patched);
|
BUG_ON(alternatives_applied);
|
||||||
__apply_alternatives(®ion, true);
|
__apply_alternatives(®ion, true);
|
||||||
/* Barriers provided by the cache flushing */
|
/* Barriers provided by the cache flushing */
|
||||||
WRITE_ONCE(patched, 1);
|
WRITE_ONCE(alternatives_applied, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -886,6 +886,22 @@ static int __init parse_kpti(char *str)
|
|||||||
__setup("kpti=", parse_kpti);
|
__setup("kpti=", parse_kpti);
|
||||||
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
|
||||||
|
|
||||||
|
static int cpu_copy_el2regs(void *__unused)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Copy register values that aren't redirected by hardware.
|
||||||
|
*
|
||||||
|
* Before code patching, we only set tpidr_el1, all CPUs need to copy
|
||||||
|
* this value to tpidr_el2 before we patch the code. Once we've done
|
||||||
|
* that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
|
||||||
|
* do anything here.
|
||||||
|
*/
|
||||||
|
if (!alternatives_applied)
|
||||||
|
write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static const struct arm64_cpu_capabilities arm64_features[] = {
|
static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||||
{
|
{
|
||||||
.desc = "GIC system register CPU interface",
|
.desc = "GIC system register CPU interface",
|
||||||
@ -955,6 +971,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||||||
.capability = ARM64_HAS_VIRT_HOST_EXTN,
|
.capability = ARM64_HAS_VIRT_HOST_EXTN,
|
||||||
.def_scope = SCOPE_SYSTEM,
|
.def_scope = SCOPE_SYSTEM,
|
||||||
.matches = runs_at_el2,
|
.matches = runs_at_el2,
|
||||||
|
.enable = cpu_copy_el2regs,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
.desc = "32-bit EL0 Support",
|
.desc = "32-bit EL0 Support",
|
||||||
|
@ -70,7 +70,11 @@ ENTRY(cpu_do_suspend)
|
|||||||
mrs x8, mdscr_el1
|
mrs x8, mdscr_el1
|
||||||
mrs x9, oslsr_el1
|
mrs x9, oslsr_el1
|
||||||
mrs x10, sctlr_el1
|
mrs x10, sctlr_el1
|
||||||
|
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
||||||
mrs x11, tpidr_el1
|
mrs x11, tpidr_el1
|
||||||
|
alternative_else
|
||||||
|
mrs x11, tpidr_el2
|
||||||
|
alternative_endif
|
||||||
mrs x12, sp_el0
|
mrs x12, sp_el0
|
||||||
stp x2, x3, [x0]
|
stp x2, x3, [x0]
|
||||||
stp x4, xzr, [x0, #16]
|
stp x4, xzr, [x0, #16]
|
||||||
@ -116,7 +120,11 @@ ENTRY(cpu_do_resume)
|
|||||||
msr mdscr_el1, x10
|
msr mdscr_el1, x10
|
||||||
|
|
||||||
msr sctlr_el1, x12
|
msr sctlr_el1, x12
|
||||||
|
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
||||||
msr tpidr_el1, x13
|
msr tpidr_el1, x13
|
||||||
|
alternative_else
|
||||||
|
msr tpidr_el2, x13
|
||||||
|
alternative_endif
|
||||||
msr sp_el0, x14
|
msr sp_el0, x14
|
||||||
/*
|
/*
|
||||||
* Restore oslsr_el1 by writing oslar_el1
|
* Restore oslsr_el1 by writing oslar_el1
|
||||||
|
Loading…
Reference in New Issue
Block a user