mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-19 18:53:52 +08:00
ARM: decompressor: avoid speculative prefetch from non-RAM areas
We setup identity MMU mappings across the entire 4GB of space, which are permissionless because the domain is set to manager. This unfortunately allows ARMv6 and later CPUs to speculatively prefetch from the entire address space, which can cause undesirable side effects if those regions contain devices. As we setup the mappings with read/write permission, we can switch the domain to client mode, and then use the XN bit for ARMv6 and above to control speculative prefetch to non-RAM areas. Reported-by: R Sricharan <r.sricharan@ti.com> Acked-by: Santosh Shilimkar <santosh.shilimkar@ti.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
This commit is contained in:
parent
bd0493eaaf
commit
1fdc08abfa
@ -567,6 +567,12 @@ __armv3_mpu_cache_on:
|
||||
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
|
||||
mov pc, lr
|
||||
|
||||
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
|
||||
#define CB_BITS 0x08
|
||||
#else
|
||||
#define CB_BITS 0x0c
|
||||
#endif
|
||||
|
||||
__setup_mmu: sub r3, r4, #16384 @ Page directory size
|
||||
bic r3, r3, #0xff @ Align the pointer
|
||||
bic r3, r3, #0x3f00
|
||||
@ -578,17 +584,14 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
|
||||
mov r9, r0, lsr #18
|
||||
mov r9, r9, lsl #18 @ start of RAM
|
||||
add r10, r9, #0x10000000 @ a reasonable RAM size
|
||||
mov r1, #0x12
|
||||
orr r1, r1, #3 << 10
|
||||
mov r1, #0x12 @ XN|U + section mapping
|
||||
orr r1, r1, #3 << 10 @ AP=11
|
||||
add r2, r3, #16384
|
||||
1: cmp r1, r9 @ if virt > start of RAM
|
||||
#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
|
||||
orrhs r1, r1, #0x08 @ set cacheable
|
||||
#else
|
||||
orrhs r1, r1, #0x0c @ set cacheable, bufferable
|
||||
#endif
|
||||
cmp r1, r10 @ if virt > end of RAM
|
||||
bichs r1, r1, #0x0c @ clear cacheable, bufferable
|
||||
cmphs r10, r1 @ && end of RAM > virt
|
||||
bic r1, r1, #0x1c @ clear XN|U + C + B
|
||||
orrlo r1, r1, #0x10 @ Set XN|U for non-RAM
|
||||
orrhs r1, r1, r6 @ set RAM section settings
|
||||
str r1, [r0], #4 @ 1:1 mapping
|
||||
add r1, r1, #1048576
|
||||
teq r0, r2
|
||||
@ -599,7 +602,7 @@ __setup_mmu: sub r3, r4, #16384 @ Page directory size
|
||||
* so there is no map overlap problem for up to 1 MB compressed kernel.
|
||||
* If the execution is in RAM then we would only be duplicating the above.
|
||||
*/
|
||||
mov r1, #0x1e
|
||||
orr r1, r6, #0x04 @ ensure B is set for this
|
||||
orr r1, r1, #3 << 10
|
||||
mov r2, pc
|
||||
mov r2, r2, lsr #20
|
||||
@ -620,6 +623,7 @@ __arm926ejs_mmu_cache_on:
|
||||
__armv4_mmu_cache_on:
|
||||
mov r12, lr
|
||||
#ifdef CONFIG_MMU
|
||||
mov r6, #CB_BITS | 0x12 @ U
|
||||
bl __setup_mmu
|
||||
mov r0, #0
|
||||
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
|
||||
@ -641,6 +645,7 @@ __armv7_mmu_cache_on:
|
||||
#ifdef CONFIG_MMU
|
||||
mrc p15, 0, r11, c0, c1, 4 @ read ID_MMFR0
|
||||
tst r11, #0xf @ VMSA
|
||||
movne r6, #CB_BITS | 0x02 @ !XN
|
||||
blne __setup_mmu
|
||||
mov r0, #0
|
||||
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
|
||||
@ -655,7 +660,7 @@ __armv7_mmu_cache_on:
|
||||
orr r0, r0, #1 << 25 @ big-endian page tables
|
||||
#endif
|
||||
orrne r0, r0, #1 @ MMU enabled
|
||||
movne r1, #-1
|
||||
movne r1, #0xfffffffd @ domain 0 = client
|
||||
mcrne p15, 0, r3, c2, c0, 0 @ load page table pointer
|
||||
mcrne p15, 0, r1, c3, c0, 0 @ load domain access control
|
||||
#endif
|
||||
@ -668,6 +673,7 @@ __armv7_mmu_cache_on:
|
||||
|
||||
__fa526_cache_on:
|
||||
mov r12, lr
|
||||
mov r6, #CB_BITS | 0x12 @ U
|
||||
bl __setup_mmu
|
||||
mov r0, #0
|
||||
mcr p15, 0, r0, c7, c7, 0 @ Invalidate whole cache
|
||||
@ -682,6 +688,7 @@ __fa526_cache_on:
|
||||
|
||||
__arm6_mmu_cache_on:
|
||||
mov r12, lr
|
||||
mov r6, #CB_BITS | 0x12 @ U
|
||||
bl __setup_mmu
|
||||
mov r0, #0
|
||||
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
|
||||
|
Loading…
Reference in New Issue
Block a user