mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-12 13:34:10 +08:00
463dbba4d1
This fixes a Keystone 2 regression discovered as a side effect of
defining an passing the physical start/end sections of the kernel
to the MMU remapping code.
As the Keystone applies an offset to all physical addresses,
including those identified and patches by phys2virt, we fail to
account for this offset in the kernel_sec_start and kernel_sec_end
variables.
Further these offsets can extend into the 64bit range on LPAE
systems such as the Keystone 2.
Fix it like this:
- Extend kernel_sec_start and kernel_sec_end to be 64bit
- Add the offset also to kernel_sec_start and kernel_sec_end
As passing kernel_sec_start and kernel_sec_end as 64bit invariably
incurs BE8 endianness issues I have attempted to dry-code around
these.
Tested on the Vexpress QEMU model both with and without LPAE
enabled.
Fixes: 6e121df14c
("ARM: 9090/1: Map the lowmem and kernel separately")
Reported-by: Nishanth Menon <nmenon@kernel.org>
Suggested-by: Russell King <rmk+kernel@armlinux.org.uk>
Tested-by: Grygorii Strashko <grygorii.strashko@ti.com>
Tested-by: Nishanth Menon <nmenon@kernel.org>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
86 lines
1.8 KiB
ArmAsm
86 lines
1.8 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2015 Russell King
|
|
*
|
|
* This assembly is required to safely remap the physical address space
|
|
* for Keystone 2
|
|
*/
|
|
#include <linux/linkage.h>
|
|
#include <linux/pgtable.h>
|
|
#include <asm/asm-offsets.h>
|
|
#include <asm/cp15.h>
|
|
#include <asm/memory.h>
|
|
|
|
.section ".idmap.text", "ax"
|
|
|
|
#define L1_ORDER 3
|
|
#define L2_ORDER 3
|
|
|
|
ENTRY(lpae_pgtables_remap_asm)
|
|
stmfd sp!, {r4-r8, lr}
|
|
|
|
mrc p15, 0, r8, c1, c0, 0 @ read control reg
|
|
bic ip, r8, #CR_M @ disable caches and MMU
|
|
mcr p15, 0, ip, c1, c0, 0
|
|
dsb
|
|
isb
|
|
|
|
/* Update level 2 entries covering the kernel */
|
|
ldr r6, =(_end - 1)
|
|
add r7, r2, #0x1000
|
|
add r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER
|
|
add r7, r7, #KERNEL_OFFSET >> (SECTION_SHIFT - L2_ORDER)
|
|
1: ldrd r4, r5, [r7]
|
|
adds r4, r4, r0
|
|
adc r5, r5, r1
|
|
strd r4, r5, [r7], #1 << L2_ORDER
|
|
cmp r7, r6
|
|
bls 1b
|
|
|
|
/* Update level 2 entries for the boot data */
|
|
add r7, r2, #0x1000
|
|
movw r3, #FDT_FIXED_BASE >> (SECTION_SHIFT - L2_ORDER)
|
|
add r7, r7, r3
|
|
ldrd r4, r5, [r7]
|
|
adds r4, r4, r0
|
|
adc r5, r5, r1
|
|
strd r4, r5, [r7], #1 << L2_ORDER
|
|
ldrd r4, r5, [r7]
|
|
adds r4, r4, r0
|
|
adc r5, r5, r1
|
|
strd r4, r5, [r7]
|
|
|
|
/* Update level 1 entries */
|
|
mov r6, #4
|
|
mov r7, r2
|
|
2: ldrd r4, r5, [r7]
|
|
adds r4, r4, r0
|
|
adc r5, r5, r1
|
|
strd r4, r5, [r7], #1 << L1_ORDER
|
|
subs r6, r6, #1
|
|
bne 2b
|
|
|
|
mrrc p15, 0, r4, r5, c2 @ read TTBR0
|
|
adds r4, r4, r0 @ update physical address
|
|
adc r5, r5, r1
|
|
mcrr p15, 0, r4, r5, c2 @ write back TTBR0
|
|
mrrc p15, 1, r4, r5, c2 @ read TTBR1
|
|
adds r4, r4, r0 @ update physical address
|
|
adc r5, r5, r1
|
|
mcrr p15, 1, r4, r5, c2 @ write back TTBR1
|
|
|
|
dsb
|
|
|
|
mov ip, #0
|
|
mcr p15, 0, ip, c7, c5, 0 @ I+BTB cache invalidate
|
|
mcr p15, 0, ip, c8, c7, 0 @ local_flush_tlb_all()
|
|
dsb
|
|
isb
|
|
|
|
mcr p15, 0, r8, c1, c0, 0 @ re-enable MMU
|
|
dsb
|
|
isb
|
|
|
|
ldmfd sp!, {r4-r8, pc}
|
|
ENDPROC(lpae_pgtables_remap_asm)
|