linux/arch/riscv/include/asm/set_memory.h
Alexandre Ghiti e5c35fa040
riscv: Map the kernel with correct permissions the first time
For 64-bit kernels, we map all the kernel with write and execute
permissions and afterwards remove writability from text and executability
from data.

For 32-bit kernels, the kernel mapping resides in the linear mapping, so we
map all the linear mapping as writable and executable and afterwards we
remove those properties for unused memory and kernel mapping as
described above.

Change this behavior to directly map the kernel with correct permissions
and avoid going through the whole mapping to fix the permissions.

At the same time, this fixes an issue introduced by commit 2bfc6cd81b
("riscv: Move kernel mapping outside of linear mapping") as reported
here https://github.com/starfive-tech/linux/issues/17.

Signed-off-by: Alexandre Ghiti <alex@ghiti.fr>
Reviewed-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2021-06-30 21:18:58 -07:00

60 lines
1.8 KiB
C

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2019 SiFive
*/
#ifndef _ASM_RISCV_SET_MEMORY_H
#define _ASM_RISCV_SET_MEMORY_H
#ifndef __ASSEMBLY__
/*
* Functions to change memory attributes.
*/
#ifdef CONFIG_MMU
int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
int set_memory_rw_nx(unsigned long addr, int numpages);
static __always_inline int set_kernel_memory(char *startp, char *endp,
int (*set_memory)(unsigned long start,
int num_pages))
{
unsigned long start = (unsigned long)startp;
unsigned long end = (unsigned long)endp;
int num_pages = PAGE_ALIGN(end - start) >> PAGE_SHIFT;
return set_memory(start, num_pages);
}
#else
static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
static inline int set_kernel_memory(char *startp, char *endp,
int (*set_memory)(unsigned long start,
int num_pages))
{
return 0;
}
#endif
int set_direct_map_invalid_noflush(struct page *page);
int set_direct_map_default_noflush(struct page *page);
bool kernel_page_present(struct page *page);
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_STRICT_KERNEL_RWX
#ifdef CONFIG_64BIT
#define SECTION_ALIGN (1 << 21)
#else
#define SECTION_ALIGN (1 << 22)
#endif
#else /* !CONFIG_STRICT_KERNEL_RWX */
#define SECTION_ALIGN L1_CACHE_BYTES
#endif /* CONFIG_STRICT_KERNEL_RWX */
#endif /* _ASM_RISCV_SET_MEMORY_H */