powerpc/fsl_booke/32: implement KASLR infrastructure

This patch add support to boot kernel from places other than KERNELBASE.
Since CONFIG_RELOCATABLE has already supported, what we need to do is
map or copy kernel to a proper place and relocate. Freescale Book-E
parts expect lowmem to be mapped by fixed TLB entries(TLB1). The TLB1
entries are not suitable to map the kernel directly in a randomized
region, so we chose to copy the kernel to a proper place and restart to
relocate.

The offset of the kernel was not randomized yet(a fixed 64M is set). We
will randomize it in the next patch.

Signed-off-by: Jason Yan <yanaijie@huawei.com>
Tested-by: Diana Craciun <diana.craciun@nxp.com>
Reviewed-by: Christophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: Scott Wood <oss@buserror.net>
[mpe: Use PTRRELOC() in early_init()]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
This commit is contained in:
Jason Yan 2019-09-20 17:45:40 +08:00 committed by Michael Ellerman
parent c061b38a3e
commit 2b0e86cc5d
9 changed files with 109 additions and 17 deletions

View File

@ -551,6 +551,17 @@ config RELOCATABLE
setting can still be useful to bootwrappers that need to know the setting can still be useful to bootwrappers that need to know the
load address of the kernel (eg. u-boot/mkimage). load address of the kernel (eg. u-boot/mkimage).
config RANDOMIZE_BASE
bool "Randomize the address of the kernel image"
depends on (FSL_BOOKE && FLATMEM && PPC32)
depends on RELOCATABLE
help
Randomizes the virtual address at which the kernel image is
loaded, as a security feature that deters exploit attempts
relying on knowledge of the location of kernel internals.
If unsure, say Y.
config RELOCATABLE_TEST config RELOCATABLE_TEST
bool "Test relocatable kernel" bool "Test relocatable kernel"
depends on (PPC64 && RELOCATABLE) depends on (PPC64 && RELOCATABLE)

View File

@ -75,7 +75,6 @@
#define MAS2_E 0x00000001 #define MAS2_E 0x00000001
#define MAS2_WIMGE_MASK 0x0000001f #define MAS2_WIMGE_MASK 0x0000001f
#define MAS2_EPN_MASK(size) (~0 << (size + 10)) #define MAS2_EPN_MASK(size) (~0 << (size + 10))
#define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags))
#define MAS3_RPN 0xFFFFF000 #define MAS3_RPN 0xFFFFF000
#define MAS3_U0 0x00000200 #define MAS3_U0 0x00000200

View File

@ -19,9 +19,12 @@
*/ */
notrace unsigned long __init early_init(unsigned long dt_ptr) notrace unsigned long __init early_init(unsigned long dt_ptr)
{ {
unsigned long offset = reloc_offset(); unsigned long kva, offset = reloc_offset();
kva = *PTRRELOC(&kernstart_virt_addr);
/* First zero the BSS */ /* First zero the BSS */
if (kva == KERNELBASE)
memset(PTRRELOC(&__bss_start), 0, __bss_stop - __bss_start); memset(PTRRELOC(&__bss_start), 0, __bss_stop - __bss_start);
/* /*
@ -32,5 +35,5 @@ notrace unsigned long __init early_init(unsigned long dt_ptr)
apply_feature_fixups(); apply_feature_fixups();
return KERNELBASE + offset; return kva + offset;
} }

View File

@ -155,23 +155,22 @@ skpinv: addi r6,r6,1 /* Increment */
#if defined(ENTRY_MAPPING_BOOT_SETUP) #if defined(ENTRY_MAPPING_BOOT_SETUP)
/* 6. Setup KERNELBASE mapping in TLB1[0] */ /* 6. Setup kernstart_virt_addr mapping in TLB1[0] */
lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */ lis r6,0x1000 /* Set MAS0(TLBSEL) = TLB1(1), ESEL = 0 */
mtspr SPRN_MAS0,r6 mtspr SPRN_MAS0,r6
lis r6,(MAS1_VALID|MAS1_IPROT)@h lis r6,(MAS1_VALID|MAS1_IPROT)@h
ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_64M))@l
mtspr SPRN_MAS1,r6 mtspr SPRN_MAS1,r6
lis r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, MAS2_M_IF_NEEDED)@h lis r6,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@h
ori r6,r6,MAS2_VAL(PAGE_OFFSET, BOOK3E_PAGESZ_64M, MAS2_M_IF_NEEDED)@l ori r6,r6,MAS2_EPN_MASK(BOOK3E_PAGESZ_64M)@l
and r6,r6,r20
ori r6,r6,MAS2_M_IF_NEEDED@l
mtspr SPRN_MAS2,r6 mtspr SPRN_MAS2,r6
mtspr SPRN_MAS3,r8 mtspr SPRN_MAS3,r8
tlbwe tlbwe
/* 7. Jump to KERNELBASE mapping */ /* 7. Jump to kernstart_virt_addr mapping */
lis r6,(KERNELBASE & ~0xfff)@h mr r6,r20
ori r6,r6,(KERNELBASE & ~0xfff)@l
rlwinm r7,r25,0,0x03ffffff
add r6,r7,r6
#elif defined(ENTRY_MAPPING_KEXEC_SETUP) #elif defined(ENTRY_MAPPING_KEXEC_SETUP)
/* /*

View File

@ -155,6 +155,8 @@ _ENTRY(_start);
*/ */
_ENTRY(__early_start) _ENTRY(__early_start)
LOAD_REG_ADDR_PIC(r20, kernstart_virt_addr)
lwz r20,0(r20)
#define ENTRY_MAPPING_BOOT_SETUP #define ENTRY_MAPPING_BOOT_SETUP
#include "fsl_booke_entry_mapping.S" #include "fsl_booke_entry_mapping.S"
@ -277,8 +279,8 @@ set_ivor:
ori r6, r6, swapper_pg_dir@l ori r6, r6, swapper_pg_dir@l
lis r5, abatron_pteptrs@h lis r5, abatron_pteptrs@h
ori r5, r5, abatron_pteptrs@l ori r5, r5, abatron_pteptrs@l
lis r4, KERNELBASE@h lis r3, kernstart_virt_addr@ha
ori r4, r4, KERNELBASE@l lwz r4, kernstart_virt_addr@l(r3)
stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */
stw r6, 0(r5) stw r6, 0(r5)
@ -1067,7 +1069,12 @@ __secondary_start:
mr r5,r25 /* phys kernel start */ mr r5,r25 /* phys kernel start */
rlwinm r5,r5,0,~0x3ffffff /* aligned 64M */ rlwinm r5,r5,0,~0x3ffffff /* aligned 64M */
subf r4,r5,r4 /* memstart_addr - phys kernel start */ subf r4,r5,r4 /* memstart_addr - phys kernel start */
li r5,0 /* no device tree */ lis r7,KERNELBASE@h
ori r7,r7,KERNELBASE@l
cmpw r20,r7 /* if kernstart_virt_addr != KERNELBASE, randomized */
beq 2f
li r4,0
2: li r5,0 /* no device tree */
li r6,0 /* not boot cpu */ li r6,0 /* not boot cpu */
bl restore_to_as0 bl restore_to_as0

View File

@ -141,10 +141,17 @@ extern int switch_to_as1(void);
extern void restore_to_as0(int esel, int offset, void *dt_ptr, int bootcpu); extern void restore_to_as0(int esel, int offset, void *dt_ptr, int bootcpu);
void create_kaslr_tlb_entry(int entry, unsigned long virt, phys_addr_t phys); void create_kaslr_tlb_entry(int entry, unsigned long virt, phys_addr_t phys);
void reloc_kernel_entry(void *fdt, int addr); void reloc_kernel_entry(void *fdt, int addr);
extern int is_second_reloc;
#endif #endif
extern void loadcam_entry(unsigned int index); extern void loadcam_entry(unsigned int index);
extern void loadcam_multi(int first_idx, int num, int tmp_idx); extern void loadcam_multi(int first_idx, int num, int tmp_idx);
#ifdef CONFIG_RANDOMIZE_BASE
void kaslr_early_init(void *dt_ptr, phys_addr_t size);
#else
static inline void kaslr_early_init(void *dt_ptr, phys_addr_t size) {}
#endif
struct tlbcam { struct tlbcam {
u32 MAS0; u32 MAS0;
u32 MAS1; u32 MAS1;

View File

@ -8,6 +8,7 @@ obj-$(CONFIG_40x) += 40x.o
obj-$(CONFIG_44x) += 44x.o obj-$(CONFIG_44x) += 44x.o
obj-$(CONFIG_PPC_8xx) += 8xx.o obj-$(CONFIG_PPC_8xx) += 8xx.o
obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke.o obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke.o
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr_booke.o
ifdef CONFIG_HUGETLB_PAGE ifdef CONFIG_HUGETLB_PAGE
obj-$(CONFIG_PPC_FSL_BOOK3E) += book3e_hugetlbpage.o obj-$(CONFIG_PPC_FSL_BOOK3E) += book3e_hugetlbpage.o
endif endif

View File

@ -263,7 +263,8 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
int __initdata is_second_reloc; int __initdata is_second_reloc;
notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start) notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
{ {
unsigned long base = KERNELBASE; unsigned long base = kernstart_virt_addr;
phys_addr_t size;
kernstart_addr = start; kernstart_addr = start;
if (is_second_reloc) { if (is_second_reloc) {
@ -291,7 +292,7 @@ notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
start &= ~0x3ffffff; start &= ~0x3ffffff;
base &= ~0x3ffffff; base &= ~0x3ffffff;
virt_phys_offset = base - start; virt_phys_offset = base - start;
early_get_first_memblock_info(__va(dt_ptr), NULL); early_get_first_memblock_info(__va(dt_ptr), &size);
/* /*
* We now get the memstart_addr, then we should check if this * We now get the memstart_addr, then we should check if this
* address is the same as what the PAGE_OFFSET map to now. If * address is the same as what the PAGE_OFFSET map to now. If
@ -316,6 +317,8 @@ notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
/* We should never reach here */ /* We should never reach here */
panic("Relocation error"); panic("Relocation error");
} }
kaslr_early_init(__va(dt_ptr), size);
} }
#endif #endif
#endif #endif

View File

@ -0,0 +1,62 @@
// SPDX-License-Identifier: GPL-2.0-only
//
// Copyright (C) 2019 Jason Yan <yanaijie@huawei.com>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/stddef.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/memblock.h>
#include <asm/pgalloc.h>
#include <asm/prom.h>
#include <mm/mmu_decl.h>
static unsigned long __init kaslr_choose_location(void *dt_ptr, phys_addr_t size,
unsigned long kernel_sz)
{
/* return a fixed offset of 64M for now */
return SZ_64M;
}
/*
* To see if we need to relocate the kernel to a random offset
* void *dt_ptr - address of the device tree
* phys_addr_t size - size of the first memory block
*/
notrace void __init kaslr_early_init(void *dt_ptr, phys_addr_t size)
{
unsigned long tlb_virt;
phys_addr_t tlb_phys;
unsigned long offset;
unsigned long kernel_sz;
kernel_sz = (unsigned long)_end - (unsigned long)_stext;
offset = kaslr_choose_location(dt_ptr, size, kernel_sz);
if (offset == 0)
return;
kernstart_virt_addr += offset;
kernstart_addr += offset;
is_second_reloc = 1;
if (offset >= SZ_64M) {
tlb_virt = round_down(kernstart_virt_addr, SZ_64M);
tlb_phys = round_down(kernstart_addr, SZ_64M);
/* Create kernel map to relocate in */
create_kaslr_tlb_entry(1, tlb_virt, tlb_phys);
}
/* Copy the kernel to it's new location and run */
memcpy((void *)kernstart_virt_addr, (void *)_stext, kernel_sz);
flush_icache_range(kernstart_virt_addr, kernstart_virt_addr + kernel_sz);
reloc_kernel_entry(dt_ptr, kernstart_virt_addr);
}