2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-19 02:34:01 +08:00

arm64: kernel: use x30 for __enable_mmu return address

Using x27 for passing to __enable_mmu what is essentially the return
address makes the code look more complicated than it needs to be. So
switch to x30/lr, and update the secondary and cpu_resume call sites to
simply call __enable_mmu as an ordinary function, with a bl instruction.
This requires the callers to be covered by .idmap.text.

Reviewed-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
This commit is contained in:
Ard Biesheuvel 2016-08-31 12:05:14 +01:00 committed by Will Deacon
parent 3c5e9f238b
commit 9dcf7914ae
2 changed files with 9 additions and 20 deletions

View File

@ -675,9 +675,9 @@ secondary_startup:
* Common entry point for secondary CPUs. * Common entry point for secondary CPUs.
*/ */
bl __cpu_setup // initialise processor bl __cpu_setup // initialise processor
bl __enable_mmu
adr_l x27, __secondary_switch // address to jump to after enabling the MMU ldr x8, =__secondary_switched
b __enable_mmu br x8
ENDPROC(secondary_startup) ENDPROC(secondary_startup)
__secondary_switched: __secondary_switched:
@ -716,9 +716,9 @@ ENDPROC(__secondary_switched)
* Enable the MMU. * Enable the MMU.
* *
* x0 = SCTLR_EL1 value for turning on the MMU. * x0 = SCTLR_EL1 value for turning on the MMU.
* x27 = *virtual* address to jump to upon completion
* *
* Other registers depend on the function called upon completion. * Returns to the caller via x30/lr. This requires the caller to be covered
* by the .idmap.text section.
* *
* Checks if the selected granule size is supported by the CPU. * Checks if the selected granule size is supported by the CPU.
* If it isn't, park the CPU * If it isn't, park the CPU
@ -744,7 +744,7 @@ ENTRY(__enable_mmu)
ic iallu ic iallu
dsb nsh dsb nsh
isb isb
br x27 ret
ENDPROC(__enable_mmu) ENDPROC(__enable_mmu)
__no_granule_support: __no_granule_support:
@ -789,9 +789,7 @@ __primary_switch:
mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value mrs x20, sctlr_el1 // preserve old SCTLR_EL1 value
#endif #endif
adr x27, 0f bl __enable_mmu
b __enable_mmu
0:
#ifdef CONFIG_RELOCATABLE #ifdef CONFIG_RELOCATABLE
bl __relocate_kernel bl __relocate_kernel
#ifdef CONFIG_RANDOMIZE_BASE #ifdef CONFIG_RANDOMIZE_BASE
@ -822,8 +820,3 @@ __primary_switch:
ldr x8, =__primary_switched ldr x8, =__primary_switched
br x8 br x8
ENDPROC(__primary_switch) ENDPROC(__primary_switch)
__secondary_switch:
ldr x8, =__secondary_switched
br x8
ENDPROC(__secondary_switch)

View File

@ -100,14 +100,10 @@ ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly bl el2_setup // if in EL2 drop to EL1 cleanly
bl __cpu_setup bl __cpu_setup
/* enable the MMU early - so we can access sleep_save_stash by va */ /* enable the MMU early - so we can access sleep_save_stash by va */
adr_l x27, _resume_switched /* __enable_mmu will branch here */ bl __enable_mmu
b __enable_mmu
ENDPROC(cpu_resume)
_resume_switched:
ldr x8, =_cpu_resume ldr x8, =_cpu_resume
br x8 br x8
ENDPROC(_resume_switched) ENDPROC(cpu_resume)
.ltorg .ltorg
.popsection .popsection