2
0
mirror of https://github.com/edk2-porting/linux-next.git synced 2024-12-23 20:53:53 +08:00

Merge branch 'riscv-wx-mappings' into for-next

This contains both the short-term fix for the W+X boot mappings and the
larger cleanup.

* riscv-wx-mappings:
  riscv: Map the kernel with correct permissions the first time
  riscv: Introduce set_kernel_memory helper
  riscv: Simplify xip and !xip kernel address conversion macros
  riscv: Remove CONFIG_PHYS_RAM_BASE_FIXED
  riscv: mm: Fix W+X mappings at boot
This commit is contained in:
Palmer Dabbelt 2021-06-30 21:50:32 -07:00
commit 01112e5e20
No known key found for this signature in database
GPG Key ID: 2E1319F35FBB1889
7 changed files with 102 additions and 94 deletions

View File

@ -494,13 +494,8 @@ config STACKPROTECTOR_PER_TASK
def_bool y def_bool y
depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS depends on STACKPROTECTOR && CC_HAVE_STACKPROTECTOR_TLS
config PHYS_RAM_BASE_FIXED
bool "Explicitly specified physical RAM address"
default n
config PHYS_RAM_BASE config PHYS_RAM_BASE
hex "Platform Physical RAM address" hex "Platform Physical RAM address"
depends on PHYS_RAM_BASE_FIXED
default "0x80000000" default "0x80000000"
help help
This is the physical address of RAM in the system. It has to be This is the physical address of RAM in the system. It has to be
@ -513,7 +508,6 @@ config XIP_KERNEL
# This prevents XIP from being enabled by all{yes,mod}config, which # This prevents XIP from being enabled by all{yes,mod}config, which
# fail to build since XIP doesn't support large kernels. # fail to build since XIP doesn't support large kernels.
depends on !COMPILE_TEST depends on !COMPILE_TEST
select PHYS_RAM_BASE_FIXED
help help
Execute-In-Place allows the kernel to run from non-volatile storage Execute-In-Place allows the kernel to run from non-volatile storage
directly addressable by the CPU, such as NOR flash. This saves RAM directly addressable by the CPU, such as NOR flash. This saves RAM

View File

@ -83,55 +83,58 @@ extern unsigned long va_pa_offset;
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
extern unsigned long va_kernel_pa_offset; extern unsigned long va_kernel_pa_offset;
#endif #endif
#ifdef CONFIG_XIP_KERNEL
extern unsigned long va_kernel_xip_pa_offset; extern unsigned long va_kernel_xip_pa_offset;
#endif
extern unsigned long pfn_base; extern unsigned long pfn_base;
extern uintptr_t load_sz;
#define ARCH_PFN_OFFSET (pfn_base) #define ARCH_PFN_OFFSET (pfn_base)
#else #else
#define va_pa_offset 0 #define va_pa_offset 0
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define va_kernel_pa_offset 0 #define va_kernel_pa_offset 0
#endif #endif
#define va_kernel_xip_pa_offset 0
#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) #define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
extern unsigned long kernel_virt_addr; extern unsigned long kernel_virt_addr;
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define is_kernel_mapping(x) \
((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz))
#define is_linear_mapping(x) \
((x) >= PAGE_OFFSET && (x) < kernel_virt_addr)
#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset)) #define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_pa_offset))
#ifdef CONFIG_XIP_KERNEL
#define kernel_mapping_pa_to_va(y) ({ \ #define kernel_mapping_pa_to_va(y) ({ \
unsigned long _y = y; \ unsigned long _y = y; \
(_y >= CONFIG_PHYS_RAM_BASE) ? \ (_y >= CONFIG_PHYS_RAM_BASE) ? \
(void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \ (void *)((unsigned long)(_y) + va_kernel_pa_offset + XIP_OFFSET) : \
(void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \ (void *)((unsigned long)(_y) + va_kernel_xip_pa_offset); \
}) })
#else
#define kernel_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + va_kernel_pa_offset))
#endif
#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) #define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset) #define linear_mapping_va_to_pa(x) ((unsigned long)(x) - va_pa_offset)
#ifdef CONFIG_XIP_KERNEL
#define kernel_mapping_va_to_pa(y) ({ \ #define kernel_mapping_va_to_pa(y) ({ \
unsigned long _y = y; \ unsigned long _y = y; \
(_y < kernel_virt_addr + XIP_OFFSET) ? \ (_y < kernel_virt_addr + XIP_OFFSET) ? \
((unsigned long)(_y) - va_kernel_xip_pa_offset) : \ ((unsigned long)(_y) - va_kernel_xip_pa_offset) : \
((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \ ((unsigned long)(_y) - va_kernel_pa_offset - XIP_OFFSET); \
}) })
#else
#define kernel_mapping_va_to_pa(x) ((unsigned long)(x) - va_kernel_pa_offset)
#endif
#define __va_to_pa_nodebug(x) ({ \ #define __va_to_pa_nodebug(x) ({ \
unsigned long _x = x; \ unsigned long _x = x; \
(_x < kernel_virt_addr) ? \ is_linear_mapping(_x) ? \
linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \ linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \
}) })
#else #else
#define is_kernel_mapping(x) \
((x) >= kernel_virt_addr && (x) < (kernel_virt_addr + load_sz))
#define is_linear_mapping(x) \
((x) >= PAGE_OFFSET)
#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset)) #define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset) #define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
#endif #endif /* CONFIG_64BIT */
#ifdef CONFIG_DEBUG_VIRTUAL #ifdef CONFIG_DEBUG_VIRTUAL
extern phys_addr_t __virt_to_phys(unsigned long x); extern phys_addr_t __virt_to_phys(unsigned long x);

View File

@ -77,6 +77,8 @@
#ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_XIP_KERNEL
#define XIP_OFFSET SZ_8M #define XIP_OFFSET SZ_8M
#else
#define XIP_OFFSET 0
#endif #endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__

View File

@ -6,6 +6,7 @@
#define __ASM_SECTIONS_H #define __ASM_SECTIONS_H
#include <asm-generic/sections.h> #include <asm-generic/sections.h>
#include <linux/mm.h>
extern char _start[]; extern char _start[];
extern char _start_kernel[]; extern char _start_kernel[];
@ -13,4 +14,20 @@ extern char __init_data_begin[], __init_data_end[];
extern char __init_text_begin[], __init_text_end[]; extern char __init_text_begin[], __init_text_end[];
extern char __alt_start[], __alt_end[]; extern char __alt_start[], __alt_end[];
static inline bool is_va_kernel_text(uintptr_t va)
{
uintptr_t start = (uintptr_t)_start;
uintptr_t end = (uintptr_t)__init_data_begin;
return va >= start && va < end;
}
static inline bool is_va_kernel_lm_alias_text(uintptr_t va)
{
uintptr_t start = (uintptr_t)lm_alias(_start);
uintptr_t end = (uintptr_t)lm_alias(__init_data_begin);
return va >= start && va < end;
}
#endif /* __ASM_SECTIONS_H */ #endif /* __ASM_SECTIONS_H */

View File

@ -16,20 +16,28 @@ int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages);
int set_memory_rw_nx(unsigned long addr, int numpages); int set_memory_rw_nx(unsigned long addr, int numpages);
void protect_kernel_text_data(void); static __always_inline int set_kernel_memory(char *startp, char *endp,
int (*set_memory)(unsigned long start,
int num_pages))
{
unsigned long start = (unsigned long)startp;
unsigned long end = (unsigned long)endp;
int num_pages = PAGE_ALIGN(end - start) >> PAGE_SHIFT;
return set_memory(start, num_pages);
}
#else #else
static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
static inline void protect_kernel_text_data(void) {}
static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; } static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
#endif static inline int set_kernel_memory(char *startp, char *endp,
int (*set_memory)(unsigned long start,
#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX) int num_pages))
void __init protect_kernel_linear_mapping_text_rodata(void); {
#else return 0;
static inline void protect_kernel_linear_mapping_text_rodata(void) {} }
#endif #endif
int set_direct_map_invalid_noflush(struct page *page); int set_direct_map_invalid_noflush(struct page *page);

View File

@ -289,11 +289,6 @@ void __init setup_arch(char **cmdline_p)
init_resources(); init_resources();
sbi_init(); sbi_init();
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) {
protect_kernel_text_data();
protect_kernel_linear_mapping_text_rodata();
}
#ifdef CONFIG_KASAN #ifdef CONFIG_KASAN
kasan_init(); kasan_init();
#endif #endif
@ -328,11 +323,10 @@ subsys_initcall(topology_init);
void free_initmem(void) void free_initmem(void)
{ {
unsigned long init_begin = (unsigned long)__init_begin;
unsigned long init_end = (unsigned long)__init_end;
if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) if (IS_ENABLED(CONFIG_STRICT_KERNEL_RWX))
set_memory_rw_nx(init_begin, (init_end - init_begin) >> PAGE_SHIFT); set_kernel_memory(lm_alias(__init_begin), lm_alias(__init_end),
IS_ENABLED(CONFIG_64BIT) ?
set_memory_rw : set_memory_rw_nx);
free_initmem_default(POISON_FREE_INITMEM); free_initmem_default(POISON_FREE_INITMEM);
} }

View File

@ -455,6 +455,43 @@ asmlinkage void __init __copy_data(void)
} }
#endif #endif
#ifdef CONFIG_STRICT_KERNEL_RWX
static __init pgprot_t pgprot_from_va(uintptr_t va)
{
if (is_va_kernel_text(va))
return PAGE_KERNEL_READ_EXEC;
/*
* In 64-bit kernel, the kernel mapping is outside the linear mapping so
* we must protect its linear mapping alias from being executed and
* written.
* And rodata section is marked readonly in mark_rodata_ro.
*/
if (IS_ENABLED(CONFIG_64BIT) && is_va_kernel_lm_alias_text(va))
return PAGE_KERNEL_READ;
return PAGE_KERNEL;
}
void mark_rodata_ro(void)
{
set_kernel_memory(__start_rodata, _data, set_memory_ro);
if (IS_ENABLED(CONFIG_64BIT))
set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data),
set_memory_ro);
debug_checkwx();
}
#else
static __init pgprot_t pgprot_from_va(uintptr_t va)
{
if (IS_ENABLED(CONFIG_64BIT) && !is_kernel_mapping(va))
return PAGE_KERNEL;
return PAGE_KERNEL_EXEC;
}
#endif /* CONFIG_STRICT_KERNEL_RWX */
/* /*
* setup_vm() is called from head.S with MMU-off. * setup_vm() is called from head.S with MMU-off.
* *
@ -474,7 +511,7 @@ asmlinkage void __init __copy_data(void)
#endif #endif
static uintptr_t load_pa __initdata; static uintptr_t load_pa __initdata;
static uintptr_t load_sz __initdata; uintptr_t load_sz;
#ifdef CONFIG_XIP_KERNEL #ifdef CONFIG_XIP_KERNEL
#define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa))) #define load_pa (*((uintptr_t *)XIP_FIXUP(&load_pa)))
#define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz))) #define load_sz (*((uintptr_t *)XIP_FIXUP(&load_sz)))
@ -486,7 +523,8 @@ static uintptr_t xiprom_sz __initdata;
#define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz))) #define xiprom_sz (*((uintptr_t *)XIP_FIXUP(&xiprom_sz)))
#define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom))) #define xiprom (*((uintptr_t *)XIP_FIXUP(&xiprom)))
static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
__always_unused bool early)
{ {
uintptr_t va, end_va; uintptr_t va, end_va;
@ -505,7 +543,8 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
map_size, PAGE_KERNEL); map_size, PAGE_KERNEL);
} }
#else #else
static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size) static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size,
bool early)
{ {
uintptr_t va, end_va; uintptr_t va, end_va;
@ -513,7 +552,9 @@ static void __init create_kernel_page_table(pgd_t *pgdir, uintptr_t map_size)
for (va = kernel_virt_addr; va < end_va; va += map_size) for (va = kernel_virt_addr; va < end_va; va += map_size)
create_pgd_mapping(pgdir, va, create_pgd_mapping(pgdir, va,
load_pa + (va - kernel_virt_addr), load_pa + (va - kernel_virt_addr),
map_size, PAGE_KERNEL_EXEC); map_size,
early ?
PAGE_KERNEL_EXEC : pgprot_from_va(va));
} }
#endif #endif
@ -590,7 +631,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
* us to reach paging_init(). We map all memory banks later * us to reach paging_init(). We map all memory banks later
* in setup_vm_final() below. * in setup_vm_final() below.
*/ */
create_kernel_page_table(early_pg_dir, map_size); create_kernel_page_table(early_pg_dir, map_size, true);
#ifndef __PAGETABLE_PMD_FOLDED #ifndef __PAGETABLE_PMD_FOLDED
/* Setup early PMD for DTB */ /* Setup early PMD for DTB */
@ -666,22 +707,6 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#endif #endif
} }
#if defined(CONFIG_64BIT) && defined(CONFIG_STRICT_KERNEL_RWX)
void __init protect_kernel_linear_mapping_text_rodata(void)
{
unsigned long text_start = (unsigned long)lm_alias(_start);
unsigned long init_text_start = (unsigned long)lm_alias(__init_text_begin);
unsigned long rodata_start = (unsigned long)lm_alias(__start_rodata);
unsigned long data_start = (unsigned long)lm_alias(_data);
set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
set_memory_nx(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
}
#endif
static void __init setup_vm_final(void) static void __init setup_vm_final(void)
{ {
uintptr_t va, map_size; uintptr_t va, map_size;
@ -714,21 +739,15 @@ static void __init setup_vm_final(void)
map_size = best_map_size(start, end - start); map_size = best_map_size(start, end - start);
for (pa = start; pa < end; pa += map_size) { for (pa = start; pa < end; pa += map_size) {
va = (uintptr_t)__va(pa); va = (uintptr_t)__va(pa);
create_pgd_mapping(swapper_pg_dir, va, pa,
map_size,
#ifdef CONFIG_64BIT
PAGE_KERNEL
#else
PAGE_KERNEL_EXEC
#endif
);
create_pgd_mapping(swapper_pg_dir, va, pa, map_size,
pgprot_from_va(va));
} }
} }
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
/* Map the kernel */ /* Map the kernel */
create_kernel_page_table(swapper_pg_dir, PMD_SIZE); create_kernel_page_table(swapper_pg_dir, PMD_SIZE, false);
#endif #endif
/* Clear fixmap PTE and PMD mappings */ /* Clear fixmap PTE and PMD mappings */
@ -759,35 +778,6 @@ static inline void setup_vm_final(void)
} }
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
#ifdef CONFIG_STRICT_KERNEL_RWX
void __init protect_kernel_text_data(void)
{
unsigned long text_start = (unsigned long)_start;
unsigned long init_text_start = (unsigned long)__init_text_begin;
unsigned long init_data_start = (unsigned long)__init_data_begin;
unsigned long rodata_start = (unsigned long)__start_rodata;
unsigned long data_start = (unsigned long)_data;
unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
set_memory_ro(text_start, (init_text_start - text_start) >> PAGE_SHIFT);
set_memory_ro(init_text_start, (init_data_start - init_text_start) >> PAGE_SHIFT);
set_memory_nx(init_data_start, (rodata_start - init_data_start) >> PAGE_SHIFT);
/* rodata section is marked readonly in mark_rodata_ro */
set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
}
void mark_rodata_ro(void)
{
unsigned long rodata_start = (unsigned long)__start_rodata;
unsigned long data_start = (unsigned long)_data;
set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
debug_checkwx();
}
#endif
#ifdef CONFIG_KEXEC_CORE #ifdef CONFIG_KEXEC_CORE
/* /*
* reserve_crashkernel() - reserves memory for crash kernel * reserve_crashkernel() - reserves memory for crash kernel