From f19c3b4239f5bfb69aacbaf75d4277c095e7aa7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= Date: Wed, 4 Oct 2023 17:13:58 +0200 Subject: [PATCH 01/51] riscv: remove unused functions in traps_misaligned.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace macros by the only two function calls that are done from this file, store_u8() and load_u8(). Signed-off-by: Clément Léger Link: https://lore.kernel.org/r/20231004151405.521596-2-cleger@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/traps_misaligned.c | 48 +++++----------------------- 1 file changed, 8 insertions(+), 40 deletions(-) diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index 378f5b151443..e7bfb33089c1 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -151,51 +151,19 @@ #define PRECISION_S 0 #define PRECISION_D 1 -#define DECLARE_UNPRIVILEGED_LOAD_FUNCTION(type, insn) \ -static inline type load_##type(const type *addr) \ -{ \ - type val; \ - asm (#insn " %0, %1" \ - : "=&r" (val) : "m" (*addr)); \ - return val; \ -} - -#define DECLARE_UNPRIVILEGED_STORE_FUNCTION(type, insn) \ -static inline void store_##type(type *addr, type val) \ -{ \ - asm volatile (#insn " %0, %1\n" \ - : : "r" (val), "m" (*addr)); \ -} - -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u8, lbu) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u16, lhu) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s8, lb) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s16, lh) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s32, lw) -DECLARE_UNPRIVILEGED_STORE_FUNCTION(u8, sb) -DECLARE_UNPRIVILEGED_STORE_FUNCTION(u16, sh) -DECLARE_UNPRIVILEGED_STORE_FUNCTION(u32, sw) -#if defined(CONFIG_64BIT) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lwu) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u64, ld) -DECLARE_UNPRIVILEGED_STORE_FUNCTION(u64, sd) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, ld) -#else -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lw) -DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, lw) - -static inline u64 load_u64(const u64 *addr) +static inline u8 load_u8(const u8 *addr) { - return load_u32((u32 *)addr) - + ((u64)load_u32((u32 *)addr + 1) << 32); + u8 val; + + asm volatile("lbu %0, %1" : "=&r" (val) : "m" (*addr)); + + return val; } -static inline void store_u64(u64 *addr, u64 val) +static inline void store_u8(u8 *addr, u8 val) { - store_u32((u32 *)addr, val); - store_u32((u32 *)addr + 1, val >> 32); + asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr)); } -#endif static inline ulong get_insn(ulong mepc) { From 7c83232161f609bbc452a1255f823f41afc411dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= Date: Wed, 4 Oct 2023 17:13:59 +0200 Subject: [PATCH 02/51] riscv: add support for misaligned trap handling in S-mode MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Misalignment trap handling is only supported for M-mode and uses direct accesses to user memory. In S-mode, when handling usermode fault, this requires to use the get_user()/put_user() accessors. Implement load_u8(), store_u8() and get_insn() using these accessors for userspace and direct text access for kernel. Signed-off-by: Clément Léger Reviewed-by: Björn Töpel Link: https://lore.kernel.org/r/20231004151405.521596-3-cleger@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 8 ++ arch/riscv/include/asm/entry-common.h | 14 +++ arch/riscv/kernel/Makefile | 2 +- arch/riscv/kernel/traps.c | 9 -- arch/riscv/kernel/traps_misaligned.c | 119 +++++++++++++++++++++++--- 5 files changed, 129 insertions(+), 23 deletions(-) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index d607ab0f7c6d..6e167358a897 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -636,6 +636,14 @@ config THREAD_SIZE_ORDER Specify the Pages of thread stack size (from 4KB to 64KB), which also affects irq stack size, which is equal to thread stack size. +config RISCV_MISALIGNED + bool "Support misaligned load/store traps for kernel and userspace" + default y + help + Say Y here if you want the kernel to embed support for misaligned + load/store for both kernel and userspace. When disable, misaligned + accesses will generate SIGBUS in userspace and panic in kernel. + endmenu # "Platform type" menu "Kernel features" diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h index 6e4dee49d84b..7ab5e34318c8 100644 --- a/arch/riscv/include/asm/entry-common.h +++ b/arch/riscv/include/asm/entry-common.h @@ -8,4 +8,18 @@ void handle_page_fault(struct pt_regs *regs); void handle_break(struct pt_regs *regs); +#ifdef CONFIG_RISCV_MISALIGNED +int handle_misaligned_load(struct pt_regs *regs); +int handle_misaligned_store(struct pt_regs *regs); +#else +static inline int handle_misaligned_load(struct pt_regs *regs) +{ + return -1; +} +static inline int handle_misaligned_store(struct pt_regs *regs) +{ + return -1; +} +#endif + #endif /* _ASM_RISCV_ENTRY_COMMON_H */ diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 95cf25d48405..0d874fb24b51 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -59,7 +59,7 @@ obj-y += patch.o obj-y += probes/ obj-$(CONFIG_MMU) += vdso.o vdso/ -obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o +obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o obj-$(CONFIG_FPU) += fpu.o obj-$(CONFIG_RISCV_ISA_V) += vector.o obj-$(CONFIG_SMP) += smpboot.o diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 19807c4d3805..d69779e4b967 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -179,14 +179,6 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re DO_ERROR_INFO(do_trap_load_fault, SIGSEGV, SEGV_ACCERR, "load access fault"); -#ifndef CONFIG_RISCV_M_MODE -DO_ERROR_INFO(do_trap_load_misaligned, - SIGBUS, BUS_ADRALN, "Oops - load address misaligned"); -DO_ERROR_INFO(do_trap_store_misaligned, - SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned"); -#else -int handle_misaligned_load(struct pt_regs *regs); -int handle_misaligned_store(struct pt_regs *regs); asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs) { @@ -229,7 +221,6 @@ asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs irqentry_nmi_exit(regs, state); } } -#endif DO_ERROR_INFO(do_trap_store_fault, SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault"); DO_ERROR_INFO(do_trap_ecall_s, diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index e7bfb33089c1..9daed7d756ae 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -12,6 +12,7 @@ #include #include #include +#include #define INSN_MATCH_LB 0x3 #define INSN_MASK_LB 0x707f @@ -151,21 +152,25 @@ #define PRECISION_S 0 #define PRECISION_D 1 -static inline u8 load_u8(const u8 *addr) +#ifdef CONFIG_RISCV_M_MODE +static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val) { u8 val; asm volatile("lbu %0, %1" : "=&r" (val) : "m" (*addr)); + *r_val = val; - return val; + return 0; } -static inline void store_u8(u8 *addr, u8 val) +static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val) { asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr)); + + return 0; } -static inline ulong get_insn(ulong mepc) +static inline int get_insn(struct pt_regs *regs, ulong mepc, ulong *r_insn) { register ulong __mepc asm ("a2") = mepc; ulong val, rvc_mask = 3, tmp; @@ -194,8 +199,86 @@ static inline ulong get_insn(ulong mepc) : [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask), [xlen_minus_16] "i" (XLEN_MINUS_16)); - return val; + *r_insn = val; + + return 0; } +#else +static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val) +{ + if (user_mode(regs)) { + return __get_user(*r_val, addr); + } else { + *r_val = *addr; + return 0; + } +} + +static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val) +{ + if (user_mode(regs)) { + return __put_user(val, addr); + } else { + *addr = val; + return 0; + } +} + +#define __read_insn(regs, insn, insn_addr) \ +({ \ + int __ret; \ + \ + if (user_mode(regs)) { \ + __ret = __get_user(insn, insn_addr); \ + } else { \ + insn = *insn_addr; \ + __ret = 0; \ + } \ + \ + __ret; \ +}) + +static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn) +{ + ulong insn = 0; + + if (epc & 0x2) { + ulong tmp = 0; + u16 __user *insn_addr = (u16 __user *)epc; + + if (__read_insn(regs, insn, insn_addr)) + return -EFAULT; + /* __get_user() uses regular "lw" which sign extend the loaded + * value make sure to clear higher order bits in case we "or" it + * below with the upper 16 bits half. + */ + insn &= GENMASK(15, 0); + if ((insn & __INSN_LENGTH_MASK) != __INSN_LENGTH_32) { + *r_insn = insn; + return 0; + } + insn_addr++; + if (__read_insn(regs, tmp, insn_addr)) + return -EFAULT; + *r_insn = (tmp << 16) | insn; + + return 0; + } else { + u32 __user *insn_addr = (u32 __user *)epc; + + if (__read_insn(regs, insn, insn_addr)) + return -EFAULT; + if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) { + *r_insn = insn; + return 0; + } + insn &= GENMASK(15, 0); + *r_insn = insn; + + return 0; + } +} +#endif union reg_data { u8 data_bytes[8]; @@ -207,10 +290,13 @@ int handle_misaligned_load(struct pt_regs *regs) { union reg_data val; unsigned long epc = regs->epc; - unsigned long insn = get_insn(epc); - unsigned long addr = csr_read(mtval); + unsigned long insn; + unsigned long addr = regs->badaddr; int i, fp = 0, shift = 0, len = 0; + if (get_insn(regs, epc, &insn)) + return -1; + regs->epc = 0; if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) { @@ -274,8 +360,10 @@ int handle_misaligned_load(struct pt_regs *regs) } val.data_u64 = 0; - for (i = 0; i < len; i++) - val.data_bytes[i] = load_u8((void *)(addr + i)); + for (i = 0; i < len; i++) { + if (load_u8(regs, (void *)(addr + i), &val.data_bytes[i])) + return -1; + } if (fp) return -1; @@ -290,10 +378,13 @@ int handle_misaligned_store(struct pt_regs *regs) { union reg_data val; unsigned long epc = regs->epc; - unsigned long insn = get_insn(epc); - unsigned long addr = csr_read(mtval); + unsigned long insn; + unsigned long addr = regs->badaddr; int i, len = 0; + if (get_insn(regs, epc, &insn)) + return -1; + regs->epc = 0; val.data_ulong = GET_RS2(insn, regs); @@ -327,8 +418,10 @@ int handle_misaligned_store(struct pt_regs *regs) return -1; } - for (i = 0; i < len; i++) - store_u8((void *)(addr + i), val.data_bytes[i]); + for (i = 0; i < len; i++) { + if (store_u8(regs, (void *)(addr + i), val.data_bytes[i])) + return -1; + } regs->epc = epc + INSN_LEN(insn); From 89c12fecdc4d46c1f08a81dab5d305304cc626eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= Date: Wed, 4 Oct 2023 17:14:00 +0200 Subject: [PATCH 03/51] riscv: report perf event for misaligned fault MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add missing calls to account for misaligned fault event using perf_sw_event(). Signed-off-by: Clément Léger Reviewed-by: Björn Töpel Link: https://lore.kernel.org/r/20231004151405.521596-4-cleger@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/traps_misaligned.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index 9daed7d756ae..804f6c5e0e44 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -294,6 +295,8 @@ int handle_misaligned_load(struct pt_regs *regs) unsigned long addr = regs->badaddr; int i, fp = 0, shift = 0, len = 0; + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); + if (get_insn(regs, epc, &insn)) return -1; @@ -382,6 +385,8 @@ int handle_misaligned_store(struct pt_regs *regs) unsigned long addr = regs->badaddr; int i, len = 0; + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); + if (get_insn(regs, epc, &insn)) return -1; From 7c586a555a48a952f64d883d2f20402fb61d9164 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= Date: Wed, 4 Oct 2023 17:14:01 +0200 Subject: [PATCH 04/51] riscv: add floating point insn support to misaligned access emulation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This support is partially based of openSBI misaligned emulation floating point instruction support. It provides support for the existing floating point instructions (both for 32/64 bits as well as compressed ones). Since floating point registers are not part of the pt_regs struct, we need to modify them directly using some assembly. We also dirty the pt_regs status in case we modify them to be sure context switch will save FP state. With this support, Linux is on par with openSBI support. Signed-off-by: Clément Léger Link: https://lore.kernel.org/r/20231004151405.521596-5-cleger@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/fpu.S | 121 +++++++++++++++++++++ arch/riscv/kernel/traps_misaligned.c | 152 ++++++++++++++++++++++++++- 2 files changed, 269 insertions(+), 4 deletions(-) diff --git a/arch/riscv/kernel/fpu.S b/arch/riscv/kernel/fpu.S index dd2205473de7..5dd3161a4dac 100644 --- a/arch/riscv/kernel/fpu.S +++ b/arch/riscv/kernel/fpu.S @@ -104,3 +104,124 @@ ENTRY(__fstate_restore) csrc CSR_STATUS, t1 ret ENDPROC(__fstate_restore) + +#define get_f32(which) fmv.x.s a0, which; j 2f +#define put_f32(which) fmv.s.x which, a1; j 2f +#if __riscv_xlen == 64 +# define get_f64(which) fmv.x.d a0, which; j 2f +# define put_f64(which) fmv.d.x which, a1; j 2f +#else +# define get_f64(which) fsd which, 0(a1); j 2f +# define put_f64(which) fld which, 0(a1); j 2f +#endif + +.macro fp_access_prologue + /* + * Compute jump offset to store the correct FP register since we don't + * have indirect FP register access + */ + sll t0, a0, 3 + la t2, 1f + add t0, t0, t2 + li t1, SR_FS + csrs CSR_STATUS, t1 + jr t0 +1: +.endm + +.macro fp_access_epilogue +2: + csrc CSR_STATUS, t1 + ret +.endm + +#define fp_access_body(__access_func) \ + __access_func(f0); \ + __access_func(f1); \ + __access_func(f2); \ + __access_func(f3); \ + __access_func(f4); \ + __access_func(f5); \ + __access_func(f6); \ + __access_func(f7); \ + __access_func(f8); \ + __access_func(f9); \ + __access_func(f10); \ + __access_func(f11); \ + __access_func(f12); \ + __access_func(f13); \ + __access_func(f14); \ + __access_func(f15); \ + __access_func(f16); \ + __access_func(f17); \ + __access_func(f18); \ + __access_func(f19); \ + __access_func(f20); \ + __access_func(f21); \ + __access_func(f22); \ + __access_func(f23); \ + __access_func(f24); \ + __access_func(f25); \ + __access_func(f26); \ + __access_func(f27); \ + __access_func(f28); \ + __access_func(f29); \ + __access_func(f30); \ + __access_func(f31) + + +#ifdef CONFIG_RISCV_MISALIGNED + +/* + * Disable compressed instructions set to keep a constant offset between FP + * load/store/move instructions + */ +.option norvc +/* + * put_f32_reg - Set a FP register from a register containing the value + * a0 = FP register index to be set + * a1 = value to be loaded in the FP register + */ +SYM_FUNC_START(put_f32_reg) + fp_access_prologue + fp_access_body(put_f32) + fp_access_epilogue +SYM_FUNC_END(put_f32_reg) + +/* + * get_f32_reg - Get a FP register value and return it + * a0 = FP register index to be retrieved + */ +SYM_FUNC_START(get_f32_reg) + fp_access_prologue + fp_access_body(get_f32) + fp_access_epilogue +SYM_FUNC_END(get_f32_reg) + +/* + * put_f64_reg - Set a 64 bits FP register from a value or a pointer. + * a0 = FP register index to be set + * a1 = value/pointer to be loaded in the FP register (when xlen == 32 bits, we + * load the value to a pointer). + */ +SYM_FUNC_START(put_f64_reg) + fp_access_prologue + fp_access_body(put_f64) + fp_access_epilogue +SYM_FUNC_END(put_f64_reg) + +/* + * put_f64_reg - Get a 64 bits FP register value and returned it or store it to + * a pointer. + * a0 = FP register index to be retrieved + * a1 = If xlen == 32, pointer which should be loaded with the FP register value + * or unused if xlen == 64. In which case the FP register value is returned + * through a0 + */ +SYM_FUNC_START(get_f64_reg) + fp_access_prologue + fp_access_body(get_f64) + fp_access_epilogue +SYM_FUNC_END(get_f64_reg) + +#endif /* CONFIG_RISCV_MISALIGNED */ diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index 804f6c5e0e44..041fd2dbd955 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -153,6 +153,115 @@ #define PRECISION_S 0 #define PRECISION_D 1 +#ifdef CONFIG_FPU + +#define FP_GET_RD(insn) (insn >> 7 & 0x1F) + +extern void put_f32_reg(unsigned long fp_reg, unsigned long value); + +static int set_f32_rd(unsigned long insn, struct pt_regs *regs, + unsigned long val) +{ + unsigned long fp_reg = FP_GET_RD(insn); + + put_f32_reg(fp_reg, val); + regs->status |= SR_FS_DIRTY; + + return 0; +} + +extern void put_f64_reg(unsigned long fp_reg, unsigned long value); + +static int set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val) +{ + unsigned long fp_reg = FP_GET_RD(insn); + unsigned long value; + +#if __riscv_xlen == 32 + value = (unsigned long) &val; +#else + value = val; +#endif + put_f64_reg(fp_reg, value); + regs->status |= SR_FS_DIRTY; + + return 0; +} + +#if __riscv_xlen == 32 +extern void get_f64_reg(unsigned long fp_reg, u64 *value); + +static u64 get_f64_rs(unsigned long insn, u8 fp_reg_offset, + struct pt_regs *regs) +{ + unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F; + u64 val; + + get_f64_reg(fp_reg, &val); + regs->status |= SR_FS_DIRTY; + + return val; +} +#else + +extern unsigned long get_f64_reg(unsigned long fp_reg); + +static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset, + struct pt_regs *regs) +{ + unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F; + unsigned long val; + + val = get_f64_reg(fp_reg); + regs->status |= SR_FS_DIRTY; + + return val; +} + +#endif + +extern unsigned long get_f32_reg(unsigned long fp_reg); + +static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset, + struct pt_regs *regs) +{ + unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F; + unsigned long val; + + val = get_f32_reg(fp_reg); + regs->status |= SR_FS_DIRTY; + + return val; +} + +#else /* CONFIG_FPU */ +static void set_f32_rd(unsigned long insn, struct pt_regs *regs, + unsigned long val) {} + +static void set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val) {} + +static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset, + struct pt_regs *regs) +{ + return 0; +} + +static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset, + struct pt_regs *regs) +{ + return 0; +} + +#endif + +#define GET_F64_RS2(insn, regs) (get_f64_rs(insn, 20, regs)) +#define GET_F64_RS2C(insn, regs) (get_f64_rs(insn, 2, regs)) +#define GET_F64_RS2S(insn, regs) (get_f64_rs(RVC_RS2S(insn), 0, regs)) + +#define GET_F32_RS2(insn, regs) (get_f32_rs(insn, 20, regs)) +#define GET_F32_RS2C(insn, regs) (get_f32_rs(insn, 2, regs)) +#define GET_F32_RS2S(insn, regs) (get_f32_rs(RVC_RS2S(insn), 0, regs)) + #ifdef CONFIG_RISCV_M_MODE static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val) { @@ -362,15 +471,21 @@ int handle_misaligned_load(struct pt_regs *regs) return -1; } + if (!IS_ENABLED(CONFIG_FPU) && fp) + return -EOPNOTSUPP; + val.data_u64 = 0; for (i = 0; i < len; i++) { if (load_u8(regs, (void *)(addr + i), &val.data_bytes[i])) return -1; } - if (fp) - return -1; - SET_RD(insn, regs, val.data_ulong << shift >> shift); + if (!fp) + SET_RD(insn, regs, val.data_ulong << shift >> shift); + else if (len == 8) + set_f64_rd(insn, regs, val.data_u64); + else + set_f32_rd(insn, regs, val.data_ulong); regs->epc = epc + INSN_LEN(insn); @@ -383,7 +498,7 @@ int handle_misaligned_store(struct pt_regs *regs) unsigned long epc = regs->epc; unsigned long insn; unsigned long addr = regs->badaddr; - int i, len = 0; + int i, len = 0, fp = 0; perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); @@ -400,6 +515,14 @@ int handle_misaligned_store(struct pt_regs *regs) } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) { len = 8; #endif + } else if ((insn & INSN_MASK_FSD) == INSN_MATCH_FSD) { + fp = 1; + len = 8; + val.data_u64 = GET_F64_RS2(insn, regs); + } else if ((insn & INSN_MASK_FSW) == INSN_MATCH_FSW) { + fp = 1; + len = 4; + val.data_ulong = GET_F32_RS2(insn, regs); } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) { len = 2; #if defined(CONFIG_64BIT) @@ -418,11 +541,32 @@ int handle_misaligned_store(struct pt_regs *regs) ((insn >> SH_RD) & 0x1f)) { len = 4; val.data_ulong = GET_RS2C(insn, regs); + } else if ((insn & INSN_MASK_C_FSD) == INSN_MATCH_C_FSD) { + fp = 1; + len = 8; + val.data_u64 = GET_F64_RS2S(insn, regs); + } else if ((insn & INSN_MASK_C_FSDSP) == INSN_MATCH_C_FSDSP) { + fp = 1; + len = 8; + val.data_u64 = GET_F64_RS2C(insn, regs); +#if !defined(CONFIG_64BIT) + } else if ((insn & INSN_MASK_C_FSW) == INSN_MATCH_C_FSW) { + fp = 1; + len = 4; + val.data_ulong = GET_F32_RS2S(insn, regs); + } else if ((insn & INSN_MASK_C_FSWSP) == INSN_MATCH_C_FSWSP) { + fp = 1; + len = 4; + val.data_ulong = GET_F32_RS2C(insn, regs); +#endif } else { regs->epc = epc; return -1; } + if (!IS_ENABLED(CONFIG_FPU) && fp) + return -EOPNOTSUPP; + for (i = 0; i < len; i++) { if (store_u8(regs, (void *)(addr + i), val.data_bytes[i])) return -1; From bc38f61313d316d74c16ce7287d6dba2f42502c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= Date: Wed, 4 Oct 2023 17:14:02 +0200 Subject: [PATCH 05/51] riscv: add support for sysctl unaligned_enabled control MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This sysctl tuning option allows the user to disable misaligned access handling globally on the system. This will also be used by misaligned detection code to temporarily disable misaligned access handling. Signed-off-by: Clément Léger Reviewed-by: Björn Töpel Link: https://lore.kernel.org/r/20231004151405.521596-6-cleger@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 1 + arch/riscv/kernel/traps_misaligned.c | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 6e167358a897..1313f83bb0cb 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -638,6 +638,7 @@ config THREAD_SIZE_ORDER config RISCV_MISALIGNED bool "Support misaligned load/store traps for kernel and userspace" + select SYSCTL_ARCH_UNALIGN_ALLOW default y help Say Y here if you want the kernel to embed support for misaligned diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index 041fd2dbd955..b5fb1ff078e3 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -396,6 +396,9 @@ union reg_data { u64 data_u64; }; +/* sysctl hooks */ +int unaligned_enabled __read_mostly = 1; /* Enabled by default */ + int handle_misaligned_load(struct pt_regs *regs) { union reg_data val; @@ -406,6 +409,9 @@ int handle_misaligned_load(struct pt_regs *regs) perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); + if (!unaligned_enabled) + return -1; + if (get_insn(regs, epc, &insn)) return -1; @@ -502,6 +508,9 @@ int handle_misaligned_store(struct pt_regs *regs) perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); + if (!unaligned_enabled) + return -1; + if (get_insn(regs, epc, &insn)) return -1; From 90b11b470b2e88ff583e04be109e6441cb69f54d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= Date: Wed, 4 Oct 2023 17:14:03 +0200 Subject: [PATCH 06/51] riscv: annotate check_unaligned_access_boot_cpu() with __init MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This function is solely called as an initcall, thus annotate it with __init. Signed-off-by: Clément Léger Reviewed-by: Evan Green Link: https://lore.kernel.org/r/20231004151405.521596-7-cleger@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/cpufeature.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 1cfbba65d11a..356e5677eeb1 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -645,7 +645,7 @@ out: __free_pages(page, get_order(MISALIGNED_BUFFER_SIZE)); } -static int check_unaligned_access_boot_cpu(void) +static int __init check_unaligned_access_boot_cpu(void) { check_unaligned_access(0); return 0; From 71c54b3d169db5569655cbd2a3616bc701fd5eec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= Date: Wed, 4 Oct 2023 17:14:04 +0200 Subject: [PATCH 07/51] riscv: report misaligned accesses emulation to hwprobe MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit hwprobe provides a way to report if misaligned access are emulated. In order to correctly populate that feature, we can check if it actually traps when doing a misaligned access. This can be checked using an exception table entry which will actually be used when a misaligned access is done from kernel mode. Signed-off-by: Clément Léger Link: https://lore.kernel.org/r/20231004151405.521596-8-cleger@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/cpufeature.h | 18 +++++++++ arch/riscv/kernel/cpufeature.c | 4 ++ arch/riscv/kernel/smpboot.c | 2 +- arch/riscv/kernel/traps_misaligned.c | 56 ++++++++++++++++++++++++++++ 4 files changed, 79 insertions(+), 1 deletion(-) diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index d0345bd659c9..e4ae6af51876 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -32,4 +32,22 @@ extern struct riscv_isainfo hart_isa[NR_CPUS]; void check_unaligned_access(int cpu); +#ifdef CONFIG_RISCV_MISALIGNED +bool unaligned_ctl_available(void); +bool check_unaligned_access_emulated(int cpu); +void unaligned_emulation_finish(void); +#else +static inline bool unaligned_ctl_available(void) +{ + return false; +} + +static inline bool check_unaligned_access_emulated(int cpu) +{ + return false; +} + +static inline void unaligned_emulation_finish(void) {} +#endif + #endif diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 356e5677eeb1..fbbde800bc21 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -568,6 +568,9 @@ void check_unaligned_access(int cpu) void *src; long speed = RISCV_HWPROBE_MISALIGNED_SLOW; + if (check_unaligned_access_emulated(cpu)) + return; + page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE)); if (!page) { pr_warn("Can't alloc pages to measure memcpy performance"); @@ -648,6 +651,7 @@ out: static int __init check_unaligned_access_boot_cpu(void) { check_unaligned_access(0); + unaligned_emulation_finish(); return 0; } diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 1b8da4e40a4d..5d9858d6ad26 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -245,8 +245,8 @@ asmlinkage __visible void smp_callin(void) riscv_ipi_enable(); numa_add_cpu(curr_cpuid); - set_cpu_online(curr_cpuid, 1); check_unaligned_access(curr_cpuid); + set_cpu_online(curr_cpuid, 1); if (has_vector()) { if (riscv_v_setup_vsize()) diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index b5fb1ff078e3..d99b95084b6c 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -14,6 +14,8 @@ #include #include #include +#include +#include #define INSN_MATCH_LB 0x3 #define INSN_MASK_LB 0x707f @@ -396,6 +398,8 @@ union reg_data { u64 data_u64; }; +static bool unaligned_ctl __read_mostly; + /* sysctl hooks */ int unaligned_enabled __read_mostly = 1; /* Enabled by default */ @@ -409,6 +413,8 @@ int handle_misaligned_load(struct pt_regs *regs) perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); + *this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_EMULATED; + if (!unaligned_enabled) return -1; @@ -585,3 +591,53 @@ int handle_misaligned_store(struct pt_regs *regs) return 0; } + +bool check_unaligned_access_emulated(int cpu) +{ + long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu); + unsigned long tmp_var, tmp_val; + bool misaligned_emu_detected; + + *mas_ptr = RISCV_HWPROBE_MISALIGNED_UNKNOWN; + + __asm__ __volatile__ ( + " "REG_L" %[tmp], 1(%[ptr])\n" + : [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory"); + + misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_EMULATED); + /* + * If unaligned_ctl is already set, this means that we detected that all + * CPUS uses emulated misaligned access at boot time. If that changed + * when hotplugging the new cpu, this is something we don't handle. + */ + if (unlikely(unaligned_ctl && !misaligned_emu_detected)) { + pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n"); + while (true) + cpu_relax(); + } + + return misaligned_emu_detected; +} + +void __init unaligned_emulation_finish(void) +{ + int cpu; + + /* + * We can only support PR_UNALIGN controls if all CPUs have misaligned + * accesses emulated since tasks requesting such control can run on any + * CPU. + */ + for_each_present_cpu(cpu) { + if (per_cpu(misaligned_access_speed, cpu) != + RISCV_HWPROBE_MISALIGNED_EMULATED) { + return; + } + } + unaligned_ctl = true; +} + +bool unaligned_ctl_available(void) +{ + return unaligned_ctl; +} From 9f23a5d2f6b01c2ab91d791109731a0d87ec2239 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= Date: Wed, 4 Oct 2023 17:14:05 +0200 Subject: [PATCH 08/51] riscv: add support for PR_SET_UNALIGN and PR_GET_UNALIGN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that trap support is ready to handle misalignment errors in S-mode, allow the user to control the behavior of misaligned accesses using prctl(PR_SET_UNALIGN). Add an align_ctl flag in thread_struct which will be used to determine if we should SIGBUS the process or not on such fault. Signed-off-by: Clément Léger Reviewed-by: Björn Töpel Link: https://lore.kernel.org/r/20231004151405.521596-9-cleger@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/processor.h | 9 +++++++++ arch/riscv/kernel/process.c | 18 ++++++++++++++++++ arch/riscv/kernel/traps_misaligned.c | 6 ++++++ 3 files changed, 33 insertions(+) diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index 3e23e1786d05..adbe520d07c5 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -8,6 +8,7 @@ #include #include +#include #include @@ -82,6 +83,7 @@ struct thread_struct { unsigned long bad_cause; unsigned long vstate_ctrl; struct __riscv_v_ext_state vstate; + unsigned long align_ctl; }; /* Whitelist the fstate from the task_struct for hardened usercopy */ @@ -94,6 +96,7 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset, #define INIT_THREAD { \ .sp = sizeof(init_stack) + (long)&init_stack, \ + .align_ctl = PR_UNALIGN_NOPRINT, \ } #define task_pt_regs(tsk) \ @@ -134,6 +137,12 @@ extern long riscv_v_vstate_ctrl_set_current(unsigned long arg); extern long riscv_v_vstate_ctrl_get_current(void); #endif /* CONFIG_RISCV_ISA_V */ +extern int get_unalign_ctl(struct task_struct *tsk, unsigned long addr); +extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); + +#define GET_UNALIGN_CTL(tsk, addr) get_unalign_ctl((tsk), (addr)) +#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val)) + #endif /* __ASSEMBLY__ */ #endif /* _ASM_RISCV_PROCESSOR_H */ diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index e32d737e039f..4f21d970a129 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -25,6 +25,7 @@ #include #include #include +#include register unsigned long gp_in_global __asm__("gp"); @@ -41,6 +42,23 @@ void arch_cpu_idle(void) cpu_do_idle(); } +int set_unalign_ctl(struct task_struct *tsk, unsigned int val) +{ + if (!unaligned_ctl_available()) + return -EINVAL; + + tsk->thread.align_ctl = val; + return 0; +} + +int get_unalign_ctl(struct task_struct *tsk, unsigned long adr) +{ + if (!unaligned_ctl_available()) + return -EINVAL; + + return put_user(tsk->thread.align_ctl, (unsigned long __user *)adr); +} + void __show_regs(struct pt_regs *regs) { show_regs_print_info(KERN_DEFAULT); diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index d99b95084b6c..bba301b5194d 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -418,6 +418,9 @@ int handle_misaligned_load(struct pt_regs *regs) if (!unaligned_enabled) return -1; + if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS)) + return -1; + if (get_insn(regs, epc, &insn)) return -1; @@ -517,6 +520,9 @@ int handle_misaligned_store(struct pt_regs *regs) if (!unaligned_enabled) return -1; + if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS)) + return -1; + if (get_insn(regs, epc, &insn)) return -1; From ce4f78f1b53d3327fbd32764aa333bf05fb68818 Mon Sep 17 00:00:00 2001 From: Haorong Lu Date: Thu, 3 Aug 2023 15:44:54 -0700 Subject: [PATCH 09/51] riscv: signal: handle syscall restart before get_signal In the current riscv implementation, blocking syscalls like read() may not correctly restart after being interrupted by ptrace. This problem arises when the syscall restart process in arch_do_signal_or_restart() is bypassed due to changes to the regs->cause register, such as an ebreak instruction. Steps to reproduce: 1. Interrupt the tracee process with PTRACE_SEIZE & PTRACE_INTERRUPT. 2. Backup original registers and instruction at new_pc. 3. Change pc to new_pc, and inject an instruction (like ebreak) to this address. 4. Resume with PTRACE_CONT and wait for the process to stop again after executing ebreak. 5. Restore original registers and instructions, and detach from the tracee process. 6. Now the read() syscall in tracee will return -1 with errno set to ERESTARTSYS. Specifically, during an interrupt, the regs->cause changes from EXC_SYSCALL to EXC_BREAKPOINT due to the injected ebreak, which is inaccessible via ptrace so we cannot restore it. This alteration breaks the syscall restart condition and ends the read() syscall with an ERESTARTSYS error. According to include/linux/errno.h, it should never be seen by user programs. X86 can avoid this issue as it checks the syscall condition using a register (orig_ax) exposed to user space. Arm64 handles syscall restart before calling get_signal, where it could be paused and inspected by ptrace/debugger. This patch adjusts the riscv implementation to arm64 style, which also checks syscall using a kernel register (syscallno). It ensures the syscall restart process is not bypassed when changes to the cause register occur, providing more consistent behavior across various architectures. For a simplified reproduction program, feel free to visit: https://github.com/ancientmodern/riscv-ptrace-bug-demo. Signed-off-by: Haorong Lu Link: https://lore.kernel.org/r/20230803224458.4156006-1-ancientmodern4@gmail.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/signal.c | 95 ++++++++++++++++++++------------------ 1 file changed, 51 insertions(+), 44 deletions(-) diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 180d951d3624..d2d7169048ea 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -391,30 +391,6 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) sigset_t *oldset = sigmask_to_save(); int ret; - /* Are we from a system call? */ - if (regs->cause == EXC_SYSCALL) { - /* Avoid additional syscall restarting via ret_from_exception */ - regs->cause = -1UL; - /* If so, check system call restarting.. */ - switch (regs->a0) { - case -ERESTART_RESTARTBLOCK: - case -ERESTARTNOHAND: - regs->a0 = -EINTR; - break; - - case -ERESTARTSYS: - if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { - regs->a0 = -EINTR; - break; - } - fallthrough; - case -ERESTARTNOINTR: - regs->a0 = regs->orig_a0; - regs->epc -= 0x4; - break; - } - } - rseq_signal_deliver(ksig, regs); /* Set up the stack frame */ @@ -428,34 +404,65 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) void arch_do_signal_or_restart(struct pt_regs *regs) { + unsigned long continue_addr = 0, restart_addr = 0; + int retval = 0; struct ksignal ksig; + bool syscall = (regs->cause == EXC_SYSCALL); + /* If we were from a system call, check for system call restarting */ + if (syscall) { + continue_addr = regs->epc; + restart_addr = continue_addr - 4; + retval = regs->a0; + + /* Avoid additional syscall restarting via ret_from_exception */ + regs->cause = -1UL; + + /* + * Prepare for system call restart. We do this here so that a + * debugger will see the already changed PC. + */ + switch (retval) { + case -ERESTARTNOHAND: + case -ERESTARTSYS: + case -ERESTARTNOINTR: + case -ERESTART_RESTARTBLOCK: + regs->a0 = regs->orig_a0; + regs->epc = restart_addr; + break; + } + } + + /* + * Get the signal to deliver. When running under ptrace, at this point + * the debugger may change all of our registers. + */ if (get_signal(&ksig)) { + /* + * Depending on the signal settings, we may need to revert the + * decision to restart the system call, but skip this if a + * debugger has chosen to restart at a different PC. + */ + if (regs->epc == restart_addr && + (retval == -ERESTARTNOHAND || + retval == -ERESTART_RESTARTBLOCK || + (retval == -ERESTARTSYS && + !(ksig.ka.sa.sa_flags & SA_RESTART)))) { + regs->a0 = -EINTR; + regs->epc = continue_addr; + } + /* Actually deliver the signal */ handle_signal(&ksig, regs); return; } - /* Did we come from a system call? */ - if (regs->cause == EXC_SYSCALL) { - /* Avoid additional syscall restarting via ret_from_exception */ - regs->cause = -1UL; - - /* Restart the system call - no handlers present */ - switch (regs->a0) { - case -ERESTARTNOHAND: - case -ERESTARTSYS: - case -ERESTARTNOINTR: - regs->a0 = regs->orig_a0; - regs->epc -= 0x4; - break; - case -ERESTART_RESTARTBLOCK: - regs->a0 = regs->orig_a0; - regs->a7 = __NR_restart_syscall; - regs->epc -= 0x4; - break; - } - } + /* + * Handle restarting a different system call. As above, if a debugger + * has chosen to restart at a different PC, ignore the restart. + */ + if (syscall && regs->epc == restart_addr && retval == -ERESTART_RESTARTBLOCK) + regs->a7 = __NR_restart_syscall; /* * If there is no signal to deliver, we just put the saved From 07863871dfb162965b21bb8c2e4861bdb0019da3 Mon Sep 17 00:00:00 2001 From: Jinyu Tang Date: Tue, 12 Sep 2023 21:31:28 +0800 Subject: [PATCH 10/51] riscv: defconfig : add CONFIG_MMC_DW for starfive If these config not set, mmc can't run for jh7110, rootfs can't be found when using SD card. So set CONFIG_MMC_DW=y like arm64 defconfig, and set CONFIG_MMC_DW_STARFIVE=y for starfive. Then starfive vf2 board can start SD card rootfs with mainline defconfig and dtb. Signed-off-by: Jinyu Tang Reviewed-by: Conor Dooley Link: https://lore.kernel.org/r/20230912133128.5247-1-tangjinyu@tinylab.org Signed-off-by: Palmer Dabbelt --- arch/riscv/configs/defconfig | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index 1e81c865a271..d56d52e2ef87 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -214,6 +214,8 @@ CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_CADENCE=y CONFIG_MMC_SPI=y +CONFIG_MMC_DW=y +CONFIG_MMC_DW_STARFIVE=y CONFIG_MMC_SDHI=y CONFIG_MMC_SUNXI=y CONFIG_RTC_CLASS=y From c20d36cc2a2073d4cdcda92bd7a1bb9b3b3b7c79 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Tue, 12 Sep 2023 23:40:40 +0800 Subject: [PATCH 11/51] riscv: don't probe unaligned access speed if already done If misaligned_access_speed percpu var isn't so called "HWPROBE MISALIGNED UNKNOWN", it means the probe has happened(this is possible for example, hotplug off then hotplug on one cpu), and the percpu var has been set, don't probe again in this case. Signed-off-by: Jisheng Zhang Fixes: 584ea6564bca ("RISC-V: Probe for unaligned access speed") Reviewed-by: Conor Dooley Link: https://lore.kernel.org/r/20230912154040.3306-1-jszhang@kernel.org Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/cpufeature.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 970a71422bee..6a01ded615cd 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -572,6 +572,10 @@ void check_unaligned_access(int cpu) if (check_unaligned_access_emulated(cpu)) return; + /* We are already set since the last check */ + if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN) + return; + page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE)); if (!page) { pr_warn("Can't alloc pages to measure memcpy performance"); From d3eabf2f2c81f8b0de0b75c6f41b875c501eb33b Mon Sep 17 00:00:00 2001 From: Conor Dooley Date: Fri, 15 Sep 2023 16:40:44 +0100 Subject: [PATCH 12/51] RISC-V: capitalise CMO op macros The CMO op macros initially used lower case, as the original iteration of the ALT_CMO_OP alternative stringified the first parameter to finalise the assembly for the standard variant. As a knock-on, the T-Head versions of these CMOs had to use mixed case defines. Commit dd23e9535889 ("RISC-V: replace cbom instructions with an insn-def") removed the asm construction with stringify, replacing it an insn-def macro, rending the lower-case surplus to requirements. As far as I can tell from a brief check, CBO_zero does not see similar use and didn't require the mixed case define in the first place. Replace the lower case characters now for consistency with other insn-def macros in the standard and T-Head forms, and adjust the callsites. Suggested-by: Andrew Jones Signed-off-by: Conor Dooley Reviewed-by: Andrew Jones Link: https://lore.kernel.org/r/20230915-aloe-dollar-994937477776@spud Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/errata_list.h | 6 +++--- arch/riscv/include/asm/insn-def.h | 8 +++---- arch/riscv/lib/clear_page.S | 32 ++++++++++++++-------------- arch/riscv/mm/dma-noncoherent.c | 8 +++---- arch/riscv/mm/pmem.c | 4 ++-- 5 files changed, 29 insertions(+), 29 deletions(-) diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h index 0ac18a4135be..83ed25e43553 100644 --- a/arch/riscv/include/asm/errata_list.h +++ b/arch/riscv/include/asm/errata_list.h @@ -117,9 +117,9 @@ asm volatile(ALTERNATIVE( \ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | * 0000000 11001 00000 000 00000 0001011 */ -#define THEAD_inval_A0 ".long 0x0265000b" -#define THEAD_clean_A0 ".long 0x0255000b" -#define THEAD_flush_A0 ".long 0x0275000b" +#define THEAD_INVAL_A0 ".long 0x0265000b" +#define THEAD_CLEAN_A0 ".long 0x0255000b" +#define THEAD_FLUSH_A0 ".long 0x0275000b" #define THEAD_SYNC_S ".long 0x0190000b" #define ALT_CMO_OP(_op, _start, _size, _cachesize) \ diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h index 6960beb75f32..e27179b26086 100644 --- a/arch/riscv/include/asm/insn-def.h +++ b/arch/riscv/include/asm/insn-def.h @@ -180,19 +180,19 @@ INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(51), \ __RD(0), RS1(gaddr), RS2(vmid)) -#define CBO_inval(base) \ +#define CBO_INVAL(base) \ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ RS1(base), SIMM12(0)) -#define CBO_clean(base) \ +#define CBO_CLEAN(base) \ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ RS1(base), SIMM12(1)) -#define CBO_flush(base) \ +#define CBO_FLUSH(base) \ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ RS1(base), SIMM12(2)) -#define CBO_zero(base) \ +#define CBO_ZERO(base) \ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ RS1(base), SIMM12(4)) diff --git a/arch/riscv/lib/clear_page.S b/arch/riscv/lib/clear_page.S index d7a256eb53f4..b22de1231144 100644 --- a/arch/riscv/lib/clear_page.S +++ b/arch/riscv/lib/clear_page.S @@ -29,41 +29,41 @@ SYM_FUNC_START(clear_page) lw a1, riscv_cboz_block_size add a2, a0, a2 .Lzero_loop: - CBO_zero(a0) + CBO_ZERO(a0) add a0, a0, a1 CBOZ_ALT(11, "bltu a0, a2, .Lzero_loop; ret", "nop; nop") - CBO_zero(a0) + CBO_ZERO(a0) add a0, a0, a1 CBOZ_ALT(10, "bltu a0, a2, .Lzero_loop; ret", "nop; nop") - CBO_zero(a0) + CBO_ZERO(a0) add a0, a0, a1 - CBO_zero(a0) + CBO_ZERO(a0) add a0, a0, a1 CBOZ_ALT(9, "bltu a0, a2, .Lzero_loop; ret", "nop; nop") - CBO_zero(a0) + CBO_ZERO(a0) add a0, a0, a1 - CBO_zero(a0) + CBO_ZERO(a0) add a0, a0, a1 - CBO_zero(a0) + CBO_ZERO(a0) add a0, a0, a1 - CBO_zero(a0) + CBO_ZERO(a0) add a0, a0, a1 CBOZ_ALT(8, "bltu a0, a2, .Lzero_loop; ret", "nop; nop") - CBO_zero(a0) + CBO_ZERO(a0) add a0, a0, a1 - CBO_zero(a0) + CBO_ZERO(a0) add a0, a0, a1 - CBO_zero(a0) + CBO_ZERO(a0) add a0, a0, a1 - CBO_zero(a0) + CBO_ZERO(a0) add a0, a0, a1 - CBO_zero(a0) + CBO_ZERO(a0) add a0, a0, a1 - CBO_zero(a0) + CBO_ZERO(a0) add a0, a0, a1 - CBO_zero(a0) + CBO_ZERO(a0) add a0, a0, a1 - CBO_zero(a0) + CBO_ZERO(a0) add a0, a0, a1 bltu a0, a2, .Lzero_loop ret diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c index b76e7e192eb1..607d5f47d437 100644 --- a/arch/riscv/mm/dma-noncoherent.c +++ b/arch/riscv/mm/dma-noncoherent.c @@ -31,7 +31,7 @@ static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size) return; } #endif - ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size); + ALT_CMO_OP(CLEAN, vaddr, size, riscv_cbom_block_size); } static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size) @@ -45,7 +45,7 @@ static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size) } #endif - ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size); + ALT_CMO_OP(INVAL, vaddr, size, riscv_cbom_block_size); } static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size) @@ -59,7 +59,7 @@ static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size) } #endif - ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size); + ALT_CMO_OP(FLUSH, vaddr, size, riscv_cbom_block_size); } static inline bool arch_sync_dma_clean_before_fromdevice(void) @@ -131,7 +131,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) } #endif - ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size); + ALT_CMO_OP(FLUSH, flush_addr, size, riscv_cbom_block_size); } void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, diff --git a/arch/riscv/mm/pmem.c b/arch/riscv/mm/pmem.c index c5fc5ec96f6d..370a422ede11 100644 --- a/arch/riscv/mm/pmem.c +++ b/arch/riscv/mm/pmem.c @@ -17,7 +17,7 @@ void arch_wb_cache_pmem(void *addr, size_t size) return; } #endif - ALT_CMO_OP(clean, addr, size, riscv_cbom_block_size); + ALT_CMO_OP(CLEAN, addr, size, riscv_cbom_block_size); } EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); @@ -29,6 +29,6 @@ void arch_invalidate_pmem(void *addr, size_t size) return; } #endif - ALT_CMO_OP(inval, addr, size, riscv_cbom_block_size); + ALT_CMO_OP(INVAL, addr, size, riscv_cbom_block_size); } EXPORT_SYMBOL_GPL(arch_invalidate_pmem); From d5d2c264d33b9acf1a0216861942176ee3569258 Mon Sep 17 00:00:00 2001 From: Yu Chien Peter Lin Date: Thu, 21 Sep 2023 10:50:20 +0800 Subject: [PATCH 13/51] riscv: Improve PTDUMP to show RSW with non-zero value RSW field can be used to encode 2 bits of software defined information. Currently, PTDUMP only prints "RSW" when its value is 1 or 3. To fix this issue and improve the debugging experience with PTDUMP, we redefine _PAGE_SPECIAL to its original value and use _PAGE_SOFT as the RSW mask, allow it to print the RSW with any non-zero value. This patch also removes the val from the struct prot_bits as it is no longer needed. Signed-off-by: Yu Chien Peter Lin Reviewed-by: Alexandre Ghiti Tested-by: Alexandre Ghiti Link: https://lore.kernel.org/r/20230921025022.3989723-2-peterlin@andestech.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/pgtable-bits.h | 4 ++-- arch/riscv/mm/ptdump.c | 33 ++++++++++++--------------- 2 files changed, 16 insertions(+), 21 deletions(-) diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h index f896708e8331..179bd4afece4 100644 --- a/arch/riscv/include/asm/pgtable-bits.h +++ b/arch/riscv/include/asm/pgtable-bits.h @@ -16,9 +16,9 @@ #define _PAGE_GLOBAL (1 << 5) /* Global */ #define _PAGE_ACCESSED (1 << 6) /* Set by hardware on any access */ #define _PAGE_DIRTY (1 << 7) /* Set by hardware on any write */ -#define _PAGE_SOFT (1 << 8) /* Reserved for software */ +#define _PAGE_SOFT (3 << 8) /* Reserved for software */ -#define _PAGE_SPECIAL _PAGE_SOFT +#define _PAGE_SPECIAL (1 << 8) /* RSW: 0x1 */ #define _PAGE_TABLE _PAGE_PRESENT /* diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c index 20a9f991a6d7..57a0926c6627 100644 --- a/arch/riscv/mm/ptdump.c +++ b/arch/riscv/mm/ptdump.c @@ -129,7 +129,6 @@ static struct ptd_mm_info efi_ptd_info = { /* Page Table Entry */ struct prot_bits { u64 mask; - u64 val; const char *set; const char *clear; }; @@ -137,47 +136,38 @@ struct prot_bits { static const struct prot_bits pte_bits[] = { { .mask = _PAGE_SOFT, - .val = _PAGE_SOFT, - .set = "RSW", - .clear = " ", + .set = "RSW(%d)", + .clear = " .. ", }, { .mask = _PAGE_DIRTY, - .val = _PAGE_DIRTY, .set = "D", .clear = ".", }, { .mask = _PAGE_ACCESSED, - .val = _PAGE_ACCESSED, .set = "A", .clear = ".", }, { .mask = _PAGE_GLOBAL, - .val = _PAGE_GLOBAL, .set = "G", .clear = ".", }, { .mask = _PAGE_USER, - .val = _PAGE_USER, .set = "U", .clear = ".", }, { .mask = _PAGE_EXEC, - .val = _PAGE_EXEC, .set = "X", .clear = ".", }, { .mask = _PAGE_WRITE, - .val = _PAGE_WRITE, .set = "W", .clear = ".", }, { .mask = _PAGE_READ, - .val = _PAGE_READ, .set = "R", .clear = ".", }, { .mask = _PAGE_PRESENT, - .val = _PAGE_PRESENT, .set = "V", .clear = ".", } @@ -208,15 +198,20 @@ static void dump_prot(struct pg_state *st) unsigned int i; for (i = 0; i < ARRAY_SIZE(pte_bits); i++) { - const char *s; + char s[7]; + unsigned long val; - if ((st->current_prot & pte_bits[i].mask) == pte_bits[i].val) - s = pte_bits[i].set; - else - s = pte_bits[i].clear; + val = st->current_prot & pte_bits[i].mask; + if (val) { + if (pte_bits[i].mask == _PAGE_SOFT) + sprintf(s, pte_bits[i].set, val >> 8); + else + sprintf(s, "%s", pte_bits[i].set); + } else { + sprintf(s, "%s", pte_bits[i].clear); + } - if (s) - pt_dump_seq_printf(st->seq, " %s", s); + pt_dump_seq_printf(st->seq, " %s", s); } } From 0713ff337173884bcaf163e44cadd6a56bc8193a Mon Sep 17 00:00:00 2001 From: Yu Chien Peter Lin Date: Thu, 21 Sep 2023 10:50:21 +0800 Subject: [PATCH 14/51] riscv: Introduce PBMT field to PTDUMP This patch introduces the PBMT field to the PTDUMP, so it can display the memory attributes for NC or IO. Signed-off-by: Yu Chien Peter Lin Reviewed-by: Alexandre Ghiti Tested-by: Alexandre Ghiti Link: https://lore.kernel.org/r/20230921025022.3989723-3-peterlin@andestech.com Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/ptdump.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c index 57a0926c6627..13997cf3fe36 100644 --- a/arch/riscv/mm/ptdump.c +++ b/arch/riscv/mm/ptdump.c @@ -135,6 +135,12 @@ struct prot_bits { static const struct prot_bits pte_bits[] = { { +#ifdef CONFIG_64BIT + .mask = _PAGE_MTMASK_SVPBMT, + .set = "MT(%s)", + .clear = " .. ", + }, { +#endif .mask = _PAGE_SOFT, .set = "RSW(%d)", .clear = " .. ", @@ -205,6 +211,16 @@ static void dump_prot(struct pg_state *st) if (val) { if (pte_bits[i].mask == _PAGE_SOFT) sprintf(s, pte_bits[i].set, val >> 8); +#ifdef CONFIG_64BIT + else if (pte_bits[i].mask == _PAGE_MTMASK_SVPBMT) { + if (val == _PAGE_NOCACHE_SVPBMT) + sprintf(s, pte_bits[i].set, "NC"); + else if (val == _PAGE_IO_SVPBMT) + sprintf(s, pte_bits[i].set, "IO"); + else + sprintf(s, pte_bits[i].set, "??"); + } +#endif else sprintf(s, "%s", pte_bits[i].set); } else { From 015c3c370472290e37143d1387c000db7ec273ad Mon Sep 17 00:00:00 2001 From: Yu Chien Peter Lin Date: Thu, 21 Sep 2023 10:50:22 +0800 Subject: [PATCH 15/51] riscv: Introduce NAPOT field to PTDUMP This patch introduces the NAPOT field to PTDUMP, allowing it to display the letter "N" for pages that have the 63rd bit set. Signed-off-by: Yu Chien Peter Lin Reviewed-by: Alexandre Ghiti Tested-by: Alexandre Ghiti Link: https://lore.kernel.org/r/20230921025022.3989723-4-peterlin@andestech.com Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/ptdump.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c index 13997cf3fe36..b71f08b91e53 100644 --- a/arch/riscv/mm/ptdump.c +++ b/arch/riscv/mm/ptdump.c @@ -136,6 +136,10 @@ struct prot_bits { static const struct prot_bits pte_bits[] = { { #ifdef CONFIG_64BIT + .mask = _PAGE_NAPOT, + .set = "N", + .clear = ".", + }, { .mask = _PAGE_MTMASK_SVPBMT, .set = "MT(%s)", .clear = " .. ", From b701f9e726f0a30a94ea6af596b74c1f07b95b6b Mon Sep 17 00:00:00 2001 From: Nam Cao Date: Tue, 29 Aug 2023 10:36:15 +0200 Subject: [PATCH 16/51] riscv: provide riscv-specific is_trap_insn() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit uprobes expects is_trap_insn() to return true for any trap instructions, not just the one used for installing uprobe. The current default implementation only returns true for 16-bit c.ebreak if C extension is enabled. This can confuse uprobes if a 32-bit ebreak generates a trap exception from userspace: uprobes asks is_trap_insn() who says there is no trap, so uprobes assume a probe was there before but has been removed, and return to the trap instruction. This causes an infinite loop of entering and exiting trap handler. Instead of using the default implementation, implement this function speficially for riscv with checks for both ebreak and c.ebreak. Fixes: 74784081aac8 ("riscv: Add uprobes supported") Signed-off-by: Nam Cao Tested-by: Björn Töpel Reviewed-by: Guo Ren Link: https://lore.kernel.org/r/20230829083614.117748-1-namcaov@gmail.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/probes/uprobes.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/riscv/kernel/probes/uprobes.c b/arch/riscv/kernel/probes/uprobes.c index 194f166b2cc4..4b3dc8beaf77 100644 --- a/arch/riscv/kernel/probes/uprobes.c +++ b/arch/riscv/kernel/probes/uprobes.c @@ -3,6 +3,7 @@ #include #include #include +#include #include "decode-insn.h" @@ -17,6 +18,11 @@ bool is_swbp_insn(uprobe_opcode_t *insn) #endif } +bool is_trap_insn(uprobe_opcode_t *insn) +{ + return riscv_insn_is_ebreak(*insn) || riscv_insn_is_c_ebreak(*insn); +} + unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) { return instruction_pointer(regs); From 8cb22bec142624d21bc85ff96b7bad10b6220e6a Mon Sep 17 00:00:00 2001 From: Nam Cao Date: Tue, 29 Aug 2023 20:25:00 +0200 Subject: [PATCH 17/51] riscv: kprobes: allow writing to x0 Instructions can write to x0, so we should simulate these instructions normally. Currently, the kernel hangs if an instruction who writes to x0 is simulated. Fixes: c22b0bcb1dd0 ("riscv: Add kprobes supported") Cc: stable@vger.kernel.org Signed-off-by: Nam Cao Reviewed-by: Charlie Jenkins Acked-by: Guo Ren Link: https://lore.kernel.org/r/20230829182500.61875-1-namcaov@gmail.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/probes/simulate-insn.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/riscv/kernel/probes/simulate-insn.c b/arch/riscv/kernel/probes/simulate-insn.c index d3099d67816d..6c166029079c 100644 --- a/arch/riscv/kernel/probes/simulate-insn.c +++ b/arch/riscv/kernel/probes/simulate-insn.c @@ -24,7 +24,7 @@ static inline bool rv_insn_reg_set_val(struct pt_regs *regs, u32 index, unsigned long val) { if (index == 0) - return false; + return true; else if (index <= 31) *((unsigned long *)regs + index) = val; else From b8a03a634129b61a7ec69ece4460297ffbd790dc Mon Sep 17 00:00:00 2001 From: Yunhui Cui Date: Tue, 12 Sep 2023 10:13:49 +0800 Subject: [PATCH 18/51] riscv: add userland instruction dump to RISC-V splats MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add userland instruction dump and rename dump_kernel_instr() to dump_instr(). An example: [ 0.822439] Freeing unused kernel image (initmem) memory: 6916K [ 0.823817] Run /init as init process [ 0.839411] init[1]: unhandled signal 4 code 0x1 at 0x000000000005be18 in bb[10000+5fb000] [ 0.840751] CPU: 0 PID: 1 Comm: init Not tainted 5.14.0-rc4-00049-gbd644290aa72-dirty #187 [ 0.841373] Hardware name: , BIOS [ 0.841743] epc : 000000000005be18 ra : 0000000000079e74 sp : 0000003fffcafda0 [ 0.842271] gp : ffffffff816e9dc8 tp : 0000000000000000 t0 : 0000000000000000 [ 0.842947] t1 : 0000003fffc9fdf0 t2 : 0000000000000000 s0 : 0000000000000000 [ 0.843434] s1 : 0000000000000000 a0 : 0000003fffca0190 a1 : 0000003fffcafe18 [ 0.843891] a2 : 0000000000000000 a3 : 0000000000000000 a4 : 0000000000000000 [ 0.844357] a5 : 0000000000000000 a6 : 0000000000000000 a7 : 0000000000000000 [ 0.844803] s2 : 0000000000000000 s3 : 0000000000000000 s4 : 0000000000000000 [ 0.845253] s5 : 0000000000000000 s6 : 0000000000000000 s7 : 0000000000000000 [ 0.845722] s8 : 0000000000000000 s9 : 0000000000000000 s10: 0000000000000000 [ 0.846180] s11: 0000000000d144e0 t3 : 0000000000000000 t4 : 0000000000000000 [ 0.846616] t5 : 0000000000000000 t6 : 0000000000000000 [ 0.847204] status: 0000000200000020 badaddr: 00000000f0028053 cause: 0000000000000002 [ 0.848219] Code: f06f ff5f 3823 fa11 0113 fb01 2e23 0201 0293 0000 (8053) f002 [ 0.851016] Kernel panic - not syncing: Attempted to kill init! exitcode=0x00000004 Signed-off-by: Yunhui Cui Reviewed-by: Björn Töpel Link: https://lore.kernel.org/r/20230912021349.28302-1-cuiyunhui@bytedance.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/traps.c | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index 8895c8197519..141036d06015 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -34,7 +34,21 @@ int show_unhandled_signals = 1; static DEFINE_SPINLOCK(die_lock); -static void dump_kernel_instr(const char *loglvl, struct pt_regs *regs) +static int copy_code(struct pt_regs *regs, u16 *val, const u16 *insns) +{ + const void __user *uaddr = (__force const void __user *)insns; + + if (!user_mode(regs)) + return get_kernel_nofault(*val, insns); + + /* The user space code from other tasks cannot be accessed. */ + if (regs != task_pt_regs(current)) + return -EPERM; + + return copy_from_user_nofault(val, uaddr, sizeof(*val)); +} + +static void dump_instr(const char *loglvl, struct pt_regs *regs) { char str[sizeof("0000 ") * 12 + 2 + 1], *p = str; const u16 *insns = (u16 *)instruction_pointer(regs); @@ -43,7 +57,7 @@ static void dump_kernel_instr(const char *loglvl, struct pt_regs *regs) int i; for (i = -10; i < 2; i++) { - bad = get_kernel_nofault(val, &insns[i]); + bad = copy_code(regs, &val, &insns[i]); if (!bad) { p += sprintf(p, i == 0 ? "(%04hx) " : "%04hx ", val); } else { @@ -72,7 +86,7 @@ void die(struct pt_regs *regs, const char *str) print_modules(); if (regs) { show_regs(regs); - dump_kernel_instr(KERN_EMERG, regs); + dump_instr(KERN_EMERG, regs); } cause = regs ? regs->cause : -1; @@ -105,6 +119,7 @@ void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) print_vma_addr(KERN_CONT " in ", instruction_pointer(regs)); pr_cont("\n"); __show_regs(regs); + dump_instr(KERN_EMERG, regs); } force_sig_fault(signo, code, (void __user *)addr); From ddcc7d9bf531b2e950bc4a745a41c825a4759ae6 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Tue, 12 Sep 2023 15:20:13 +0800 Subject: [PATCH 19/51] riscv: vdso.lds.S: drop __alt_start and __alt_end symbols These two symbols are not used, remove them. Signed-off-by: Jisheng Zhang Tested-by: Emil Renner Berthing Link: https://lore.kernel.org/r/20230912072015.2424-2-jszhang@kernel.org Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/vdso/vdso.lds.S | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S index 82ce64900f3d..d43fd7c7dd11 100644 --- a/arch/riscv/kernel/vdso/vdso.lds.S +++ b/arch/riscv/kernel/vdso/vdso.lds.S @@ -42,9 +42,7 @@ SECTIONS . = ALIGN(4); .alternative : { - __alt_start = .; *(.alternative) - __alt_end = .; } .data : { From 49cfbdc21faf5fffbdaa8fd31e1451a4432cfdaa Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Tue, 12 Sep 2023 15:20:14 +0800 Subject: [PATCH 20/51] riscv: vdso.lds.S: merge .data section into .rodata section The .data section doesn't need to be separate from .rodata section, they are both readonly. Signed-off-by: Jisheng Zhang Tested-by: Emil Renner Berthing Link: https://lore.kernel.org/r/20230912072015.2424-3-jszhang@kernel.org Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/vdso/vdso.lds.S | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S index d43fd7c7dd11..671aa21769bc 100644 --- a/arch/riscv/kernel/vdso/vdso.lds.S +++ b/arch/riscv/kernel/vdso/vdso.lds.S @@ -29,7 +29,13 @@ SECTIONS .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr .eh_frame : { KEEP (*(.eh_frame)) } :text - .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) } + .rodata : { + *(.rodata .rodata.* .gnu.linkonce.r.*) + *(.got.plt) *(.got) + *(.data .data.* .gnu.linkonce.d.*) + *(.dynbss) + *(.bss .bss.* .gnu.linkonce.b.*) + } /* * This linker script is used both with -r and with -shared. @@ -44,13 +50,6 @@ SECTIONS .alternative : { *(.alternative) } - - .data : { - *(.got.plt) *(.got) - *(.data .data.* .gnu.linkonce.d.*) - *(.dynbss) - *(.bss .bss.* .gnu.linkonce.b.*) - } } /* From 8f8c1ff879fab60f80f3a7aec3000f47e5b03ba9 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Tue, 12 Sep 2023 15:20:15 +0800 Subject: [PATCH 21/51] riscv: vdso.lds.S: remove hardcoded 0x800 .text start addr I believe the hardcoded 0x800 and related comments come from the long history VDSO_TEXT_OFFSET in x86 vdso code, but commit 5b9304933730 ("x86 vDSO: generate vdso-syms.lds") and commit f6b46ebf904f ("x86 vDSO: new layout") removes the comment and hard coding for x86. Similar as x86 and other arch, riscv doesn't need the rigid layout using VDSO_TEXT_OFFSET since it "no longer matters to the kernel". so we could remove the hard coding now, and removing it brings a small vdso.so and aligns with other architectures. Also, having enough separation between data and text is important for I-cache, so similar as x86, move .note, .eh_frame_hdr, and .eh_frame between .rodata and .text. Signed-off-by: Jisheng Zhang Reviewed-by: Andrew Jones Tested-by: Emil Renner Berthing Link: https://lore.kernel.org/r/20230912072015.2424-4-jszhang@kernel.org Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/vdso/vdso.lds.S | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S index 671aa21769bc..cbe2a179331d 100644 --- a/arch/riscv/kernel/vdso/vdso.lds.S +++ b/arch/riscv/kernel/vdso/vdso.lds.S @@ -23,12 +23,8 @@ SECTIONS .gnu.version_d : { *(.gnu.version_d) } .gnu.version_r : { *(.gnu.version_r) } - .note : { *(.note.*) } :text :note .dynamic : { *(.dynamic) } :text :dynamic - .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr - .eh_frame : { KEEP (*(.eh_frame)) } :text - .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) *(.got.plt) *(.got) @@ -37,13 +33,16 @@ SECTIONS *(.bss .bss.* .gnu.linkonce.b.*) } + .note : { *(.note.*) } :text :note + + .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr + .eh_frame : { KEEP (*(.eh_frame)) } :text + /* - * This linker script is used both with -r and with -shared. - * For the layouts to match, we need to skip more than enough - * space for the dynamic symbol table, etc. If this amount is - * insufficient, ld -shared will error; simply increase it here. + * Text is well-separated from actual data: there's plenty of + * stuff that isn't used at runtime in between. */ - . = 0x800; + . = ALIGN(16); .text : { *(.text .text.*) } :text . = ALIGN(4); From dbfbda3bd6bfb5189e05b9eab8dfaad2d1d23f62 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Tue, 12 Sep 2023 15:25:10 +0800 Subject: [PATCH 22/51] riscv: mm: update T-Head memory type definitions Update T-Head memory type definitions according to C910 doc [1] For NC and IO, SH property isn't configurable, hardcoded as SH, so set SH for NOCACHE and IO. And also set bit[61](Bufferable) for NOCACHE according to the table 6.1 in the doc [1]. Link: https://github.com/T-head-Semi/openc910 [1] Signed-off-by: Jisheng Zhang Reviewed-by: Guo Ren Tested-by: Drew Fustini Link: https://lore.kernel.org/r/20230912072510.2510-1-jszhang@kernel.org Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/pgtable-64.h | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h index 7a5097202e15..9a2c780a11e9 100644 --- a/arch/riscv/include/asm/pgtable-64.h +++ b/arch/riscv/include/asm/pgtable-64.h @@ -126,14 +126,18 @@ enum napot_cont_order { /* * [63:59] T-Head Memory Type definitions: - * - * 00000 - NC Weakly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable + * bit[63] SO - Strong Order + * bit[62] C - Cacheable + * bit[61] B - Bufferable + * bit[60] SH - Shareable + * bit[59] Sec - Trustable + * 00110 - NC Weakly-ordered, Non-cacheable, Bufferable, Shareable, Non-trustable * 01110 - PMA Weakly-ordered, Cacheable, Bufferable, Shareable, Non-trustable - * 10000 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable + * 10010 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Shareable, Non-trustable */ #define _PAGE_PMA_THEAD ((1UL << 62) | (1UL << 61) | (1UL << 60)) -#define _PAGE_NOCACHE_THEAD 0UL -#define _PAGE_IO_THEAD (1UL << 63) +#define _PAGE_NOCACHE_THEAD ((1UL < 61) | (1UL << 60)) +#define _PAGE_IO_THEAD ((1UL << 63) | (1UL << 60)) #define _PAGE_MTMASK_THEAD (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59)) static inline u64 riscv_page_mtmask(void) From c5e9b2c2ae82231d85d9650854e7b3e97dde33da Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Mon, 30 Oct 2023 14:30:25 +0100 Subject: [PATCH 23/51] riscv: Improve tlb_flush() For now, tlb_flush() simply calls flush_tlb_mm() which results in a flush of the whole TLB. So let's use mmu_gather fields to provide a more fine-grained flush of the TLB. Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Reviewed-by: Samuel Holland Tested-by: Lad Prabhakar # On RZ/Five SMARC Link: https://lore.kernel.org/r/20231030133027.19542-2-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/tlb.h | 8 +++++++- arch/riscv/include/asm/tlbflush.h | 3 +++ arch/riscv/mm/tlbflush.c | 7 +++++++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h index 120bcf2ed8a8..1eb5682b2af6 100644 --- a/arch/riscv/include/asm/tlb.h +++ b/arch/riscv/include/asm/tlb.h @@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb); static inline void tlb_flush(struct mmu_gather *tlb) { - flush_tlb_mm(tlb->mm); +#ifdef CONFIG_MMU + if (tlb->fullmm || tlb->need_flush_all) + flush_tlb_mm(tlb->mm); + else + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, + tlb_get_unmap_size(tlb)); +#endif } #endif /* _ASM_RISCV_TLB_H */ diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index a09196f8de68..f5c4fb0ae642 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -32,6 +32,8 @@ static inline void local_flush_tlb_page(unsigned long addr) #if defined(CONFIG_SMP) && defined(CONFIG_MMU) void flush_tlb_all(void); void flush_tlb_mm(struct mm_struct *mm); +void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, + unsigned long end, unsigned int page_size); void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); @@ -52,6 +54,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, } #define flush_tlb_mm(mm) flush_tlb_all() +#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all() #endif /* !CONFIG_SMP || !CONFIG_MMU */ /* Flush a range of kernel pages */ diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 77be59aadc73..fa03289853d8 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -132,6 +132,13 @@ void flush_tlb_mm(struct mm_struct *mm) __flush_tlb_range(mm, 0, -1, PAGE_SIZE); } +void flush_tlb_mm_range(struct mm_struct *mm, + unsigned long start, unsigned long end, + unsigned int page_size) +{ + __flush_tlb_range(mm, start, end - start, page_size); +} + void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE); From c962a6e7463980a513e990a7a8a9967e529ad467 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Mon, 30 Oct 2023 14:30:26 +0100 Subject: [PATCH 24/51] riscv: Improve flush_tlb_range() for hugetlb pages flush_tlb_range() uses a fixed stride of PAGE_SIZE and in its current form, when a hugetlb mapping needs to be flushed, flush_tlb_range() flushes the whole tlb: so set a stride of the size of the hugetlb mapping in order to only flush the hugetlb mapping. However, if the hugepage is a NAPOT region, all PTEs that constitute this mapping must be invalidated, so the stride size must actually be the size of the PTE. Note that THPs are directly handled by flush_pmd_tlb_range(). Signed-off-by: Alexandre Ghiti Reviewed-by: Samuel Holland Tested-by: Lad Prabhakar # On RZ/Five SMARC Link: https://lore.kernel.org/r/20231030133027.19542-3-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/tlbflush.c | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index fa03289853d8..b6d712a82306 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -147,7 +148,33 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - __flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE); + unsigned long stride_size; + + if (!is_vm_hugetlb_page(vma)) { + stride_size = PAGE_SIZE; + } else { + stride_size = huge_page_size(hstate_vma(vma)); + + /* + * As stated in the privileged specification, every PTE in a + * NAPOT region must be invalidated, so reset the stride in that + * case. + */ + if (has_svnapot()) { + if (stride_size >= PGDIR_SIZE) + stride_size = PGDIR_SIZE; + else if (stride_size >= P4D_SIZE) + stride_size = P4D_SIZE; + else if (stride_size >= PUD_SIZE) + stride_size = PUD_SIZE; + else if (stride_size >= PMD_SIZE) + stride_size = PMD_SIZE; + else + stride_size = PAGE_SIZE; + } + } + + __flush_tlb_range(vma->vm_mm, start, end - start, stride_size); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, From 9d4e8d5fa7dbbb606b355f40d918a1feef821bc5 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Mon, 30 Oct 2023 14:30:27 +0100 Subject: [PATCH 25/51] riscv: Make __flush_tlb_range() loop over pte instead of flushing the whole tlb Currently, when the range to flush covers more than one page (a 4K page or a hugepage), __flush_tlb_range() flushes the whole tlb. Flushing the whole tlb comes with a greater cost than flushing a single entry so we should flush single entries up to a certain threshold so that: threshold * cost of flushing a single entry < cost of flushing the whole tlb. Co-developed-by: Mayuresh Chitale Signed-off-by: Mayuresh Chitale Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Tested-by: Lad Prabhakar # On RZ/Five SMARC Reviewed-by: Samuel Holland Tested-by: Samuel Holland Link: https://lore.kernel.org/r/20231030133027.19542-4-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/sbi.h | 3 - arch/riscv/include/asm/tlbflush.h | 3 + arch/riscv/kernel/sbi.c | 32 +++------ arch/riscv/mm/tlbflush.c | 113 +++++++++++++++--------------- 4 files changed, 71 insertions(+), 80 deletions(-) diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 5b4a1bf5f439..b79d0228144f 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -273,9 +273,6 @@ void sbi_set_timer(uint64_t stime_value); void sbi_shutdown(void); void sbi_send_ipi(unsigned int cpu); int sbi_remote_fence_i(const struct cpumask *cpu_mask); -int sbi_remote_sfence_vma(const struct cpumask *cpu_mask, - unsigned long start, - unsigned long size); int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask, unsigned long start, diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index f5c4fb0ae642..170a49c531c6 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -11,6 +11,9 @@ #include #include +#define FLUSH_TLB_MAX_SIZE ((unsigned long)-1) +#define FLUSH_TLB_NO_ASID ((unsigned long)-1) + #ifdef CONFIG_MMU extern unsigned long asid_mask; diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index c672c8ba9a2a..5a62ed1da453 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -11,6 +11,7 @@ #include #include #include +#include /* default SBI version is 0.1 */ unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT; @@ -376,32 +377,15 @@ int sbi_remote_fence_i(const struct cpumask *cpu_mask) } EXPORT_SYMBOL(sbi_remote_fence_i); -/** - * sbi_remote_sfence_vma() - Execute SFENCE.VMA instructions on given remote - * harts for the specified virtual address range. - * @cpu_mask: A cpu mask containing all the target harts. - * @start: Start of the virtual address - * @size: Total size of the virtual address range. - * - * Return: 0 on success, appropriate linux error code otherwise. - */ -int sbi_remote_sfence_vma(const struct cpumask *cpu_mask, - unsigned long start, - unsigned long size) -{ - return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA, - cpu_mask, start, size, 0, 0); -} -EXPORT_SYMBOL(sbi_remote_sfence_vma); - /** * sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given - * remote harts for a virtual address range belonging to a specific ASID. + * remote harts for a virtual address range belonging to a specific ASID or not. * * @cpu_mask: A cpu mask containing all the target harts. * @start: Start of the virtual address * @size: Total size of the virtual address range. - * @asid: The value of address space identifier (ASID). + * @asid: The value of address space identifier (ASID), or FLUSH_TLB_NO_ASID + * for flushing all address spaces. * * Return: 0 on success, appropriate linux error code otherwise. */ @@ -410,8 +394,12 @@ int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask, unsigned long size, unsigned long asid) { - return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID, - cpu_mask, start, size, asid, 0); + if (asid == FLUSH_TLB_NO_ASID) + return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA, + cpu_mask, start, size, 0, 0); + else + return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID, + cpu_mask, start, size, asid, 0); } EXPORT_SYMBOL(sbi_remote_sfence_vma_asid); diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index b6d712a82306..e46fefc70927 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -9,28 +9,50 @@ static inline void local_flush_tlb_all_asid(unsigned long asid) { - __asm__ __volatile__ ("sfence.vma x0, %0" - : - : "r" (asid) - : "memory"); + if (asid != FLUSH_TLB_NO_ASID) + __asm__ __volatile__ ("sfence.vma x0, %0" + : + : "r" (asid) + : "memory"); + else + local_flush_tlb_all(); } static inline void local_flush_tlb_page_asid(unsigned long addr, unsigned long asid) { - __asm__ __volatile__ ("sfence.vma %0, %1" - : - : "r" (addr), "r" (asid) - : "memory"); + if (asid != FLUSH_TLB_NO_ASID) + __asm__ __volatile__ ("sfence.vma %0, %1" + : + : "r" (addr), "r" (asid) + : "memory"); + else + local_flush_tlb_page(addr); } -static inline void local_flush_tlb_range(unsigned long start, - unsigned long size, unsigned long stride) +/* + * Flush entire TLB if number of entries to be flushed is greater + * than the threshold below. + */ +static unsigned long tlb_flush_all_threshold __read_mostly = 64; + +static void local_flush_tlb_range_threshold_asid(unsigned long start, + unsigned long size, + unsigned long stride, + unsigned long asid) { - if (size <= stride) - local_flush_tlb_page(start); - else - local_flush_tlb_all(); + unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride); + int i; + + if (nr_ptes_in_range > tlb_flush_all_threshold) { + local_flush_tlb_all_asid(asid); + return; + } + + for (i = 0; i < nr_ptes_in_range; ++i) { + local_flush_tlb_page_asid(start, asid); + start += stride; + } } static inline void local_flush_tlb_range_asid(unsigned long start, @@ -38,8 +60,10 @@ static inline void local_flush_tlb_range_asid(unsigned long start, { if (size <= stride) local_flush_tlb_page_asid(start, asid); - else + else if (size == FLUSH_TLB_MAX_SIZE) local_flush_tlb_all_asid(asid); + else + local_flush_tlb_range_threshold_asid(start, size, stride, asid); } static void __ipi_flush_tlb_all(void *info) @@ -52,7 +76,7 @@ void flush_tlb_all(void) if (riscv_use_ipi_for_rfence()) on_each_cpu(__ipi_flush_tlb_all, NULL, 1); else - sbi_remote_sfence_vma(NULL, 0, -1); + sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID); } struct flush_tlb_range_data { @@ -69,18 +93,12 @@ static void __ipi_flush_tlb_range_asid(void *info) local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid); } -static void __ipi_flush_tlb_range(void *info) -{ - struct flush_tlb_range_data *d = info; - - local_flush_tlb_range(d->start, d->size, d->stride); -} - static void __flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long size, unsigned long stride) { struct flush_tlb_range_data ftd; struct cpumask *cmask = mm_cpumask(mm); + unsigned long asid = FLUSH_TLB_NO_ASID; unsigned int cpuid; bool broadcast; @@ -90,39 +108,24 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start, cpuid = get_cpu(); /* check if the tlbflush needs to be sent to other CPUs */ broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids; - if (static_branch_unlikely(&use_asid_allocator)) { - unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask; - if (broadcast) { - if (riscv_use_ipi_for_rfence()) { - ftd.asid = asid; - ftd.start = start; - ftd.size = size; - ftd.stride = stride; - on_each_cpu_mask(cmask, - __ipi_flush_tlb_range_asid, - &ftd, 1); - } else - sbi_remote_sfence_vma_asid(cmask, - start, size, asid); - } else { - local_flush_tlb_range_asid(start, size, stride, asid); - } + if (static_branch_unlikely(&use_asid_allocator)) + asid = atomic_long_read(&mm->context.id) & asid_mask; + + if (broadcast) { + if (riscv_use_ipi_for_rfence()) { + ftd.asid = asid; + ftd.start = start; + ftd.size = size; + ftd.stride = stride; + on_each_cpu_mask(cmask, + __ipi_flush_tlb_range_asid, + &ftd, 1); + } else + sbi_remote_sfence_vma_asid(cmask, + start, size, asid); } else { - if (broadcast) { - if (riscv_use_ipi_for_rfence()) { - ftd.asid = 0; - ftd.start = start; - ftd.size = size; - ftd.stride = stride; - on_each_cpu_mask(cmask, - __ipi_flush_tlb_range, - &ftd, 1); - } else - sbi_remote_sfence_vma(cmask, start, size); - } else { - local_flush_tlb_range(start, size, stride); - } + local_flush_tlb_range_asid(start, size, stride, asid); } put_cpu(); @@ -130,7 +133,7 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start, void flush_tlb_mm(struct mm_struct *mm) { - __flush_tlb_range(mm, 0, -1, PAGE_SIZE); + __flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE); } void flush_tlb_mm_range(struct mm_struct *mm, From 5e22bfd520ea8740e9a20314d2a890baf304c9d2 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Mon, 30 Oct 2023 14:30:28 +0100 Subject: [PATCH 26/51] riscv: Improve flush_tlb_kernel_range() This function used to simply flush the whole tlb of all harts, be more subtile and try to only flush the range. The problem is that we can only use PAGE_SIZE as stride since we don't know the size of the underlying mapping and then this function will be improved only if the size of the region to flush is < threshold * PAGE_SIZE. Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Tested-by: Lad Prabhakar # On RZ/Five SMARC Reviewed-by: Samuel Holland Tested-by: Samuel Holland Link: https://lore.kernel.org/r/20231030133027.19542-5-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/tlbflush.h | 11 +++++----- arch/riscv/mm/tlbflush.c | 34 ++++++++++++++++++++++--------- 2 files changed, 30 insertions(+), 15 deletions(-) diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 170a49c531c6..8f3418c5f172 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -40,6 +40,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +void flush_tlb_kernel_range(unsigned long start, unsigned long end); #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, @@ -56,15 +57,15 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, local_flush_tlb_all(); } -#define flush_tlb_mm(mm) flush_tlb_all() -#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all() -#endif /* !CONFIG_SMP || !CONFIG_MMU */ - /* Flush a range of kernel pages */ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - flush_tlb_all(); + local_flush_tlb_all(); } +#define flush_tlb_mm(mm) flush_tlb_all() +#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all() +#endif /* !CONFIG_SMP || !CONFIG_MMU */ + #endif /* _ASM_RISCV_TLBFLUSH_H */ diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index e46fefc70927..e6659d7368b3 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -97,20 +97,27 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long size, unsigned long stride) { struct flush_tlb_range_data ftd; - struct cpumask *cmask = mm_cpumask(mm); + const struct cpumask *cmask; unsigned long asid = FLUSH_TLB_NO_ASID; - unsigned int cpuid; bool broadcast; - if (cpumask_empty(cmask)) - return; + if (mm) { + unsigned int cpuid; - cpuid = get_cpu(); - /* check if the tlbflush needs to be sent to other CPUs */ - broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids; + cmask = mm_cpumask(mm); + if (cpumask_empty(cmask)) + return; - if (static_branch_unlikely(&use_asid_allocator)) - asid = atomic_long_read(&mm->context.id) & asid_mask; + cpuid = get_cpu(); + /* check if the tlbflush needs to be sent to other CPUs */ + broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids; + + if (static_branch_unlikely(&use_asid_allocator)) + asid = atomic_long_read(&mm->context.id) & asid_mask; + } else { + cmask = cpu_online_mask; + broadcast = true; + } if (broadcast) { if (riscv_use_ipi_for_rfence()) { @@ -128,7 +135,8 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start, local_flush_tlb_range_asid(start, size, stride, asid); } - put_cpu(); + if (mm) + put_cpu(); } void flush_tlb_mm(struct mm_struct *mm) @@ -179,6 +187,12 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, __flush_tlb_range(vma->vm_mm, start, end - start, stride_size); } + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + __flush_tlb_range(NULL, start, end - start, PAGE_SIZE); +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) From 57a4542cb7c9baa1509c3366b57a08d75b212ead Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 24 Oct 2023 16:53:18 +0200 Subject: [PATCH 27/51] riscv: boot: Fix creation of loader.bin MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When flashing loader.bin for K210 using kflash:     [ERROR] This is an ELF file and cannot be programmed to flash directly: arch/riscv/boot/loader.bin Before, loader.bin relied on "OBJCOPYFLAGS := -O binary" in the main RISC-V Makefile to create a boot image with the right format. With this removed, the image is now created in the wrong (ELF) format. Fix this by adding an explicit rule. Fixes: 505b02957e74f0c5 ("riscv: Remove duplicate objcopy flag") Signed-off-by: Geert Uytterhoeven Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/1086025809583809538dfecaa899892218f44e7e.1698159066.git.geert+renesas@glider.be Signed-off-by: Palmer Dabbelt --- arch/riscv/boot/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile index 22b13947bd13..8e7fc0edf21d 100644 --- a/arch/riscv/boot/Makefile +++ b/arch/riscv/boot/Makefile @@ -17,6 +17,7 @@ KCOV_INSTRUMENT := n OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S +OBJCOPYFLAGS_loader.bin :=-O binary OBJCOPYFLAGS_xipImage :=-O binary -R .note -R .note.gnu.build-id -R .comment -S targets := Image Image.* loader loader.o loader.lds loader.bin From b18f7296fbfdb2ad0871f00f3042fc74663d52ac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= Date: Tue, 24 Oct 2023 15:26:51 +0200 Subject: [PATCH 28/51] riscv: use ".L" local labels in assembly when applicable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For the sake of coherency, use local labels in assembly when applicable. This also avoid kprobes being confused when applying a kprobe since the size of function is computed by checking where the next visible symbol is located. This might end up in computing some function size to be way shorter than expected and thus failing to apply kprobes to the specified offset. Signed-off-by: Clément Léger Reviewed-by: Andrew Jones Link: https://lore.kernel.org/r/20231024132655.730417-2-cleger@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/entry.S | 6 ++--- arch/riscv/kernel/head.S | 18 ++++++------- arch/riscv/kernel/mcount.S | 10 +++---- arch/riscv/lib/memmove.S | 54 +++++++++++++++++++------------------- 4 files changed, 44 insertions(+), 44 deletions(-) diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 9f92c067f7e1..e48478eb6f2d 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -26,9 +26,9 @@ SYM_CODE_START(handle_exception) * register will contain 0, and we should continue on the current TP. */ csrrw tp, CSR_SCRATCH, tp - bnez tp, _save_context + bnez tp, .Lsave_context -_restore_kernel_tpsp: +.Lrestore_kernel_tpsp: csrr tp, CSR_SCRATCH REG_S sp, TASK_TI_KERNEL_SP(tp) @@ -40,7 +40,7 @@ _restore_kernel_tpsp: REG_L sp, TASK_TI_KERNEL_SP(tp) #endif -_save_context: +.Lsave_context: REG_S sp, TASK_TI_USER_SP(tp) REG_L sp, TASK_TI_KERNEL_SP(tp) addi sp, sp, -(PT_SIZE_ON_STACK) diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 18f97ec0f7ed..0c0432eb57d7 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -164,12 +164,12 @@ secondary_start_sbi: XIP_FIXUP_OFFSET a0 call relocate_enable_mmu #endif - call setup_trap_vector + call .Lsetup_trap_vector tail smp_callin #endif /* CONFIG_SMP */ .align 2 -setup_trap_vector: +.Lsetup_trap_vector: /* Set trap vector to exception handler */ la a0, handle_exception csrw CSR_TVEC, a0 @@ -206,7 +206,7 @@ ENTRY(_start_kernel) * not implement PMPs, so we set up a quick trap handler to just skip * touching the PMPs on any trap. */ - la a0, pmp_done + la a0, .Lpmp_done csrw CSR_TVEC, a0 li a0, -1 @@ -214,7 +214,7 @@ ENTRY(_start_kernel) li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X) csrw CSR_PMPCFG0, a0 .align 2 -pmp_done: +.Lpmp_done: /* * The hartid in a0 is expected later on, and we have no firmware @@ -275,12 +275,12 @@ pmp_done: /* Clear BSS for flat non-ELF images */ la a3, __bss_start la a4, __bss_stop - ble a4, a3, clear_bss_done -clear_bss: + ble a4, a3, .Lclear_bss_done +.Lclear_bss: REG_S zero, (a3) add a3, a3, RISCV_SZPTR - blt a3, a4, clear_bss -clear_bss_done: + blt a3, a4, .Lclear_bss +.Lclear_bss_done: #endif la a2, boot_cpu_hartid XIP_FIXUP_OFFSET a2 @@ -305,7 +305,7 @@ clear_bss_done: call relocate_enable_mmu #endif /* CONFIG_MMU */ - call setup_trap_vector + call .Lsetup_trap_vector /* Restore C environment */ la tp, init_task la sp, init_thread_union + THREAD_SIZE diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S index 8818a8fa9ff3..ab4dd0594fe7 100644 --- a/arch/riscv/kernel/mcount.S +++ b/arch/riscv/kernel/mcount.S @@ -85,16 +85,16 @@ ENTRY(MCOUNT_NAME) #ifdef CONFIG_FUNCTION_GRAPH_TRACER la t0, ftrace_graph_return REG_L t1, 0(t0) - bne t1, t4, do_ftrace_graph_caller + bne t1, t4, .Ldo_ftrace_graph_caller la t3, ftrace_graph_entry REG_L t2, 0(t3) la t6, ftrace_graph_entry_stub - bne t2, t6, do_ftrace_graph_caller + bne t2, t6, .Ldo_ftrace_graph_caller #endif la t3, ftrace_trace_function REG_L t5, 0(t3) - bne t5, t4, do_trace + bne t5, t4, .Ldo_trace ret #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -102,7 +102,7 @@ ENTRY(MCOUNT_NAME) * A pseudo representation for the function graph tracer: * prepare_to_return(&ra_to_caller_of_caller, ra_to_caller) */ -do_ftrace_graph_caller: +.Ldo_ftrace_graph_caller: addi a0, s0, -SZREG mv a1, ra #ifdef HAVE_FUNCTION_GRAPH_FP_TEST @@ -118,7 +118,7 @@ do_ftrace_graph_caller: * A pseudo representation for the function tracer: * (*ftrace_trace_function)(ra_to_caller, ra_to_caller_of_caller) */ -do_trace: +.Ldo_trace: REG_L a1, -SZREG(s0) mv a0, ra diff --git a/arch/riscv/lib/memmove.S b/arch/riscv/lib/memmove.S index 838ff2022fe3..1930b388c3a0 100644 --- a/arch/riscv/lib/memmove.S +++ b/arch/riscv/lib/memmove.S @@ -26,8 +26,8 @@ SYM_FUNC_START_WEAK(memmove) */ /* Return if nothing to do */ - beq a0, a1, return_from_memmove - beqz a2, return_from_memmove + beq a0, a1, .Lreturn_from_memmove + beqz a2, .Lreturn_from_memmove /* * Register Uses @@ -60,7 +60,7 @@ SYM_FUNC_START_WEAK(memmove) * small enough not to bother. */ andi t0, a2, -(2 * SZREG) - beqz t0, byte_copy + beqz t0, .Lbyte_copy /* * Now solve for t5 and t6. @@ -87,14 +87,14 @@ SYM_FUNC_START_WEAK(memmove) */ xor t0, a0, a1 andi t1, t0, (SZREG - 1) - beqz t1, coaligned_copy + beqz t1, .Lcoaligned_copy /* Fall through to misaligned fixup copy */ -misaligned_fixup_copy: - bltu a1, a0, misaligned_fixup_copy_reverse +.Lmisaligned_fixup_copy: + bltu a1, a0, .Lmisaligned_fixup_copy_reverse -misaligned_fixup_copy_forward: - jal t0, byte_copy_until_aligned_forward +.Lmisaligned_fixup_copy_forward: + jal t0, .Lbyte_copy_until_aligned_forward andi a5, a1, (SZREG - 1) /* Find the alignment offset of src (a1) */ slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */ @@ -153,10 +153,10 @@ misaligned_fixup_copy_forward: mv t3, t6 /* Fix the dest pointer in case the loop was broken */ add a1, t3, a5 /* Restore the src pointer */ - j byte_copy_forward /* Copy any remaining bytes */ + j .Lbyte_copy_forward /* Copy any remaining bytes */ -misaligned_fixup_copy_reverse: - jal t0, byte_copy_until_aligned_reverse +.Lmisaligned_fixup_copy_reverse: + jal t0, .Lbyte_copy_until_aligned_reverse andi a5, a4, (SZREG - 1) /* Find the alignment offset of src (a4) */ slli a6, a5, 3 /* Multiply by 8 to convert that to bits to shift */ @@ -215,18 +215,18 @@ misaligned_fixup_copy_reverse: mv t4, t5 /* Fix the dest pointer in case the loop was broken */ add a4, t4, a5 /* Restore the src pointer */ - j byte_copy_reverse /* Copy any remaining bytes */ + j .Lbyte_copy_reverse /* Copy any remaining bytes */ /* * Simple copy loops for SZREG co-aligned memory locations. * These also make calls to do byte copies for any unaligned * data at their terminations. */ -coaligned_copy: - bltu a1, a0, coaligned_copy_reverse +.Lcoaligned_copy: + bltu a1, a0, .Lcoaligned_copy_reverse -coaligned_copy_forward: - jal t0, byte_copy_until_aligned_forward +.Lcoaligned_copy_forward: + jal t0, .Lbyte_copy_until_aligned_forward 1: REG_L t1, ( 0 * SZREG)(a1) @@ -235,10 +235,10 @@ coaligned_copy_forward: REG_S t1, (-1 * SZREG)(t3) bne t3, t6, 1b - j byte_copy_forward /* Copy any remaining bytes */ + j .Lbyte_copy_forward /* Copy any remaining bytes */ -coaligned_copy_reverse: - jal t0, byte_copy_until_aligned_reverse +.Lcoaligned_copy_reverse: + jal t0, .Lbyte_copy_until_aligned_reverse 1: REG_L t1, (-1 * SZREG)(a4) @@ -247,7 +247,7 @@ coaligned_copy_reverse: REG_S t1, ( 0 * SZREG)(t4) bne t4, t5, 1b - j byte_copy_reverse /* Copy any remaining bytes */ + j .Lbyte_copy_reverse /* Copy any remaining bytes */ /* * These are basically sub-functions within the function. They @@ -258,7 +258,7 @@ coaligned_copy_reverse: * up from where they were left and we avoid code duplication * without any overhead except the call in and return jumps. */ -byte_copy_until_aligned_forward: +.Lbyte_copy_until_aligned_forward: beq t3, t5, 2f 1: lb t1, 0(a1) @@ -269,7 +269,7 @@ byte_copy_until_aligned_forward: 2: jalr zero, 0x0(t0) /* Return to multibyte copy loop */ -byte_copy_until_aligned_reverse: +.Lbyte_copy_until_aligned_reverse: beq t4, t6, 2f 1: lb t1, -1(a4) @@ -285,10 +285,10 @@ byte_copy_until_aligned_reverse: * These will byte copy until they reach the end of data to copy. * At that point, they will call to return from memmove. */ -byte_copy: - bltu a1, a0, byte_copy_reverse +.Lbyte_copy: + bltu a1, a0, .Lbyte_copy_reverse -byte_copy_forward: +.Lbyte_copy_forward: beq t3, t4, 2f 1: lb t1, 0(a1) @@ -299,7 +299,7 @@ byte_copy_forward: 2: ret -byte_copy_reverse: +.Lbyte_copy_reverse: beq t4, t3, 2f 1: lb t1, -1(a4) @@ -309,7 +309,7 @@ byte_copy_reverse: bne t4, t3, 1b 2: -return_from_memmove: +.Lreturn_from_memmove: ret SYM_FUNC_END(memmove) From 76329c693924d8f37afbf361f0d8daab594e1644 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= Date: Tue, 24 Oct 2023 15:26:52 +0200 Subject: [PATCH 29/51] riscv: Use SYM_*() assembly macros instead of deprecated ones MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ENTRY()/END()/WEAK() macros are deprecated and we should make use of the new SYM_*() macros [1] for better annotation of symbols. Replace the deprecated ones with the new ones and fix wrong usage of END()/ENDPROC() to correctly describe the symbols. [1] https://docs.kernel.org/core-api/asm-annotations.html Signed-off-by: Clément Léger Reviewed-by: Andrew Jones Link: https://lore.kernel.org/r/20231024132655.730417-3-cleger@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/copy-unaligned.S | 8 ++++---- arch/riscv/kernel/fpu.S | 8 ++++---- arch/riscv/kernel/head.S | 12 +++++------ arch/riscv/kernel/hibernate-asm.S | 12 +++++------ arch/riscv/kernel/mcount-dyn.S | 20 ++++++++----------- arch/riscv/kernel/mcount.S | 8 ++++---- arch/riscv/kernel/probes/rethook_trampoline.S | 4 ++-- arch/riscv/kernel/suspend_entry.S | 4 ++-- arch/riscv/kernel/vdso/flush_icache.S | 4 ++-- arch/riscv/kernel/vdso/getcpu.S | 4 ++-- arch/riscv/kernel/vdso/rt_sigreturn.S | 4 ++-- arch/riscv/kernel/vdso/sys_hwprobe.S | 4 ++-- arch/riscv/lib/memcpy.S | 6 +++--- arch/riscv/lib/memmove.S | 3 +-- arch/riscv/lib/memset.S | 6 +++--- arch/riscv/lib/uaccess.S | 11 +++++----- arch/riscv/purgatory/entry.S | 16 ++++----------- 17 files changed, 60 insertions(+), 74 deletions(-) diff --git a/arch/riscv/kernel/copy-unaligned.S b/arch/riscv/kernel/copy-unaligned.S index cfdecfbaad62..2b3d9398c113 100644 --- a/arch/riscv/kernel/copy-unaligned.S +++ b/arch/riscv/kernel/copy-unaligned.S @@ -9,7 +9,7 @@ /* void __riscv_copy_words_unaligned(void *, const void *, size_t) */ /* Performs a memcpy without aligning buffers, using word loads and stores. */ /* Note: The size is truncated to a multiple of 8 * SZREG */ -ENTRY(__riscv_copy_words_unaligned) +SYM_FUNC_START(__riscv_copy_words_unaligned) andi a4, a2, ~((8*SZREG)-1) beqz a4, 2f add a3, a1, a4 @@ -36,12 +36,12 @@ ENTRY(__riscv_copy_words_unaligned) 2: ret -END(__riscv_copy_words_unaligned) +SYM_FUNC_END(__riscv_copy_words_unaligned) /* void __riscv_copy_bytes_unaligned(void *, const void *, size_t) */ /* Performs a memcpy without aligning buffers, using only byte accesses. */ /* Note: The size is truncated to a multiple of 8 */ -ENTRY(__riscv_copy_bytes_unaligned) +SYM_FUNC_START(__riscv_copy_bytes_unaligned) andi a4, a2, ~(8-1) beqz a4, 2f add a3, a1, a4 @@ -68,4 +68,4 @@ ENTRY(__riscv_copy_bytes_unaligned) 2: ret -END(__riscv_copy_bytes_unaligned) +SYM_FUNC_END(__riscv_copy_bytes_unaligned) diff --git a/arch/riscv/kernel/fpu.S b/arch/riscv/kernel/fpu.S index 5dd3161a4dac..2c543f130f93 100644 --- a/arch/riscv/kernel/fpu.S +++ b/arch/riscv/kernel/fpu.S @@ -19,7 +19,7 @@ #include #include -ENTRY(__fstate_save) +SYM_FUNC_START(__fstate_save) li a2, TASK_THREAD_F0 add a0, a0, a2 li t1, SR_FS @@ -60,9 +60,9 @@ ENTRY(__fstate_save) sw t0, TASK_THREAD_FCSR_F0(a0) csrc CSR_STATUS, t1 ret -ENDPROC(__fstate_save) +SYM_FUNC_END(__fstate_save) -ENTRY(__fstate_restore) +SYM_FUNC_START(__fstate_restore) li a2, TASK_THREAD_F0 add a0, a0, a2 li t1, SR_FS @@ -103,7 +103,7 @@ ENTRY(__fstate_restore) fscsr t0 csrc CSR_STATUS, t1 ret -ENDPROC(__fstate_restore) +SYM_FUNC_END(__fstate_restore) #define get_f32(which) fmv.x.s a0, which; j 2f #define put_f32(which) fmv.s.x which, a1; j 2f diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 0c0432eb57d7..b77397432403 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -19,7 +19,7 @@ #include "efi-header.S" __HEAD -ENTRY(_start) +SYM_CODE_START(_start) /* * Image header expected by Linux boot-loaders. The image header data * structure is described in asm/image.h. @@ -187,9 +187,9 @@ secondary_start_sbi: wfi j .Lsecondary_park -END(_start) +SYM_CODE_END(_start) -ENTRY(_start_kernel) +SYM_CODE_START(_start_kernel) /* Mask all interrupts */ csrw CSR_IE, zero csrw CSR_IP, zero @@ -348,10 +348,10 @@ ENTRY(_start_kernel) tail .Lsecondary_start_common #endif /* CONFIG_RISCV_BOOT_SPINWAIT */ -END(_start_kernel) +SYM_CODE_END(_start_kernel) #ifdef CONFIG_RISCV_M_MODE -ENTRY(reset_regs) +SYM_CODE_START_LOCAL(reset_regs) li sp, 0 li gp, 0 li tp, 0 @@ -449,5 +449,5 @@ ENTRY(reset_regs) .Lreset_regs_done_vector: #endif /* CONFIG_RISCV_ISA_V */ ret -END(reset_regs) +SYM_CODE_END(reset_regs) #endif /* CONFIG_RISCV_M_MODE */ diff --git a/arch/riscv/kernel/hibernate-asm.S b/arch/riscv/kernel/hibernate-asm.S index d698dd7df637..d040dcf4add4 100644 --- a/arch/riscv/kernel/hibernate-asm.S +++ b/arch/riscv/kernel/hibernate-asm.S @@ -21,7 +21,7 @@ * * Always returns 0 */ -ENTRY(__hibernate_cpu_resume) +SYM_FUNC_START(__hibernate_cpu_resume) /* switch to hibernated image's page table. */ csrw CSR_SATP, s0 sfence.vma @@ -34,7 +34,7 @@ ENTRY(__hibernate_cpu_resume) mv a0, zero ret -END(__hibernate_cpu_resume) +SYM_FUNC_END(__hibernate_cpu_resume) /* * Prepare to restore the image. @@ -42,7 +42,7 @@ END(__hibernate_cpu_resume) * a1: satp of temporary page tables. * a2: cpu_resume. */ -ENTRY(hibernate_restore_image) +SYM_FUNC_START(hibernate_restore_image) mv s0, a0 mv s1, a1 mv s2, a2 @@ -50,7 +50,7 @@ ENTRY(hibernate_restore_image) REG_L a1, relocated_restore_code jr a1 -END(hibernate_restore_image) +SYM_FUNC_END(hibernate_restore_image) /* * The below code will be executed from a 'safe' page. @@ -58,7 +58,7 @@ END(hibernate_restore_image) * back to the original memory location. Finally, it jumps to __hibernate_cpu_resume() * to restore the CPU context. */ -ENTRY(hibernate_core_restore_code) +SYM_FUNC_START(hibernate_core_restore_code) /* switch to temp page table. */ csrw satp, s1 sfence.vma @@ -73,4 +73,4 @@ ENTRY(hibernate_core_restore_code) bnez s4, .Lcopy jr s2 -END(hibernate_core_restore_code) +SYM_FUNC_END(hibernate_core_restore_code) diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S index 669b8697aa38..58dd96a2a153 100644 --- a/arch/riscv/kernel/mcount-dyn.S +++ b/arch/riscv/kernel/mcount-dyn.S @@ -82,7 +82,7 @@ .endm #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ -ENTRY(ftrace_caller) +SYM_FUNC_START(ftrace_caller) SAVE_ABI addi a0, t0, -FENTRY_RA_OFFSET @@ -91,8 +91,7 @@ ENTRY(ftrace_caller) mv a1, ra mv a3, sp -ftrace_call: - .global ftrace_call +SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) call ftrace_stub #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -102,16 +101,15 @@ ftrace_call: #ifdef HAVE_FUNCTION_GRAPH_FP_TEST mv a2, s0 #endif -ftrace_graph_call: - .global ftrace_graph_call +SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) call ftrace_stub #endif RESTORE_ABI jr t0 -ENDPROC(ftrace_caller) +SYM_FUNC_END(ftrace_caller) #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS -ENTRY(ftrace_regs_caller) +SYM_FUNC_START(ftrace_regs_caller) SAVE_ALL addi a0, t0, -FENTRY_RA_OFFSET @@ -120,8 +118,7 @@ ENTRY(ftrace_regs_caller) mv a1, ra mv a3, sp -ftrace_regs_call: - .global ftrace_regs_call +SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL) call ftrace_stub #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -131,12 +128,11 @@ ftrace_regs_call: #ifdef HAVE_FUNCTION_GRAPH_FP_TEST mv a2, s0 #endif -ftrace_graph_regs_call: - .global ftrace_graph_regs_call +SYM_INNER_LABEL(ftrace_graph_regs_call, SYM_L_GLOBAL) call ftrace_stub #endif RESTORE_ALL jr t0 -ENDPROC(ftrace_regs_caller) +SYM_FUNC_END(ftrace_regs_caller) #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S index ab4dd0594fe7..b4dd9ed6849e 100644 --- a/arch/riscv/kernel/mcount.S +++ b/arch/riscv/kernel/mcount.S @@ -61,7 +61,7 @@ SYM_TYPED_FUNC_START(ftrace_stub_graph) ret SYM_FUNC_END(ftrace_stub_graph) -ENTRY(return_to_handler) +SYM_FUNC_START(return_to_handler) /* * On implementing the frame point test, the ideal way is to compare the * s0 (frame pointer, if enabled) on entry and the sp (stack pointer) on return. @@ -76,11 +76,11 @@ ENTRY(return_to_handler) mv a2, a0 RESTORE_RET_ABI_STATE jalr a2 -ENDPROC(return_to_handler) +SYM_FUNC_END(return_to_handler) #endif #ifndef CONFIG_DYNAMIC_FTRACE -ENTRY(MCOUNT_NAME) +SYM_FUNC_START(MCOUNT_NAME) la t4, ftrace_stub #ifdef CONFIG_FUNCTION_GRAPH_TRACER la t0, ftrace_graph_return @@ -126,6 +126,6 @@ ENTRY(MCOUNT_NAME) jalr t5 RESTORE_ABI_STATE ret -ENDPROC(MCOUNT_NAME) +SYM_FUNC_END(MCOUNT_NAME) #endif EXPORT_SYMBOL(MCOUNT_NAME) diff --git a/arch/riscv/kernel/probes/rethook_trampoline.S b/arch/riscv/kernel/probes/rethook_trampoline.S index 21bac92a170a..f2cd83d9b0f0 100644 --- a/arch/riscv/kernel/probes/rethook_trampoline.S +++ b/arch/riscv/kernel/probes/rethook_trampoline.S @@ -75,7 +75,7 @@ REG_L x31, PT_T6(sp) .endm -ENTRY(arch_rethook_trampoline) +SYM_CODE_START(arch_rethook_trampoline) addi sp, sp, -(PT_SIZE_ON_STACK) save_all_base_regs @@ -90,4 +90,4 @@ ENTRY(arch_rethook_trampoline) addi sp, sp, PT_SIZE_ON_STACK ret -ENDPROC(arch_rethook_trampoline) +SYM_CODE_END(arch_rethook_trampoline) diff --git a/arch/riscv/kernel/suspend_entry.S b/arch/riscv/kernel/suspend_entry.S index d5cf8b575777..2d54f309c140 100644 --- a/arch/riscv/kernel/suspend_entry.S +++ b/arch/riscv/kernel/suspend_entry.S @@ -16,7 +16,7 @@ .altmacro .option norelax -ENTRY(__cpu_suspend_enter) +SYM_FUNC_START(__cpu_suspend_enter) /* Save registers (except A0 and T0-T6) */ REG_S ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0) REG_S sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0) @@ -57,7 +57,7 @@ ENTRY(__cpu_suspend_enter) /* Return to C code */ ret -END(__cpu_suspend_enter) +SYM_FUNC_END(__cpu_suspend_enter) SYM_TYPED_FUNC_START(__cpu_resume_enter) /* Load the global pointer */ diff --git a/arch/riscv/kernel/vdso/flush_icache.S b/arch/riscv/kernel/vdso/flush_icache.S index 82f97d67c23e..8f884227e8bc 100644 --- a/arch/riscv/kernel/vdso/flush_icache.S +++ b/arch/riscv/kernel/vdso/flush_icache.S @@ -8,7 +8,7 @@ .text /* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */ -ENTRY(__vdso_flush_icache) +SYM_FUNC_START(__vdso_flush_icache) .cfi_startproc #ifdef CONFIG_SMP li a7, __NR_riscv_flush_icache @@ -19,4 +19,4 @@ ENTRY(__vdso_flush_icache) #endif ret .cfi_endproc -ENDPROC(__vdso_flush_icache) +SYM_FUNC_END(__vdso_flush_icache) diff --git a/arch/riscv/kernel/vdso/getcpu.S b/arch/riscv/kernel/vdso/getcpu.S index bb0c05e2ffba..9c1bd531907f 100644 --- a/arch/riscv/kernel/vdso/getcpu.S +++ b/arch/riscv/kernel/vdso/getcpu.S @@ -8,11 +8,11 @@ .text /* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */ -ENTRY(__vdso_getcpu) +SYM_FUNC_START(__vdso_getcpu) .cfi_startproc /* For now, just do the syscall. */ li a7, __NR_getcpu ecall ret .cfi_endproc -ENDPROC(__vdso_getcpu) +SYM_FUNC_END(__vdso_getcpu) diff --git a/arch/riscv/kernel/vdso/rt_sigreturn.S b/arch/riscv/kernel/vdso/rt_sigreturn.S index 10438c7c626a..3dc022aa8931 100644 --- a/arch/riscv/kernel/vdso/rt_sigreturn.S +++ b/arch/riscv/kernel/vdso/rt_sigreturn.S @@ -7,10 +7,10 @@ #include .text -ENTRY(__vdso_rt_sigreturn) +SYM_FUNC_START(__vdso_rt_sigreturn) .cfi_startproc .cfi_signal_frame li a7, __NR_rt_sigreturn ecall .cfi_endproc -ENDPROC(__vdso_rt_sigreturn) +SYM_FUNC_END(__vdso_rt_sigreturn) diff --git a/arch/riscv/kernel/vdso/sys_hwprobe.S b/arch/riscv/kernel/vdso/sys_hwprobe.S index 4e704146c77a..77e57f830521 100644 --- a/arch/riscv/kernel/vdso/sys_hwprobe.S +++ b/arch/riscv/kernel/vdso/sys_hwprobe.S @@ -5,11 +5,11 @@ #include .text -ENTRY(riscv_hwprobe) +SYM_FUNC_START(riscv_hwprobe) .cfi_startproc li a7, __NR_riscv_hwprobe ecall ret .cfi_endproc -ENDPROC(riscv_hwprobe) +SYM_FUNC_END(riscv_hwprobe) diff --git a/arch/riscv/lib/memcpy.S b/arch/riscv/lib/memcpy.S index 1a40d01a9543..44e009ec5fef 100644 --- a/arch/riscv/lib/memcpy.S +++ b/arch/riscv/lib/memcpy.S @@ -7,8 +7,7 @@ #include /* void *memcpy(void *, const void *, size_t) */ -ENTRY(__memcpy) -WEAK(memcpy) +SYM_FUNC_START(__memcpy) move t6, a0 /* Preserve return value */ /* Defer to byte-oriented copy for small sizes */ @@ -105,6 +104,7 @@ WEAK(memcpy) bltu a1, a3, 5b 6: ret -END(__memcpy) +SYM_FUNC_END(__memcpy) +SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy) SYM_FUNC_ALIAS(__pi_memcpy, __memcpy) SYM_FUNC_ALIAS(__pi___memcpy, __memcpy) diff --git a/arch/riscv/lib/memmove.S b/arch/riscv/lib/memmove.S index 1930b388c3a0..cb3e2e7ef0ba 100644 --- a/arch/riscv/lib/memmove.S +++ b/arch/riscv/lib/memmove.S @@ -7,7 +7,6 @@ #include SYM_FUNC_START(__memmove) -SYM_FUNC_START_WEAK(memmove) /* * Returns * a0 - dest @@ -312,7 +311,7 @@ SYM_FUNC_START_WEAK(memmove) .Lreturn_from_memmove: ret -SYM_FUNC_END(memmove) SYM_FUNC_END(__memmove) +SYM_FUNC_ALIAS_WEAK(memmove, __memmove) SYM_FUNC_ALIAS(__pi_memmove, __memmove) SYM_FUNC_ALIAS(__pi___memmove, __memmove) diff --git a/arch/riscv/lib/memset.S b/arch/riscv/lib/memset.S index 34c5360c6705..35f358e70bdb 100644 --- a/arch/riscv/lib/memset.S +++ b/arch/riscv/lib/memset.S @@ -8,8 +8,7 @@ #include /* void *memset(void *, int, size_t) */ -ENTRY(__memset) -WEAK(memset) +SYM_FUNC_START(__memset) move t0, a0 /* Preserve return value */ /* Defer to byte-oriented fill for small sizes */ @@ -110,4 +109,5 @@ WEAK(memset) bltu t0, a3, 5b 6: ret -END(__memset) +SYM_FUNC_END(__memset) +SYM_FUNC_ALIAS_WEAK(memset, __memset) diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S index 09b47ebacf2e..3ab438f30d13 100644 --- a/arch/riscv/lib/uaccess.S +++ b/arch/riscv/lib/uaccess.S @@ -10,8 +10,7 @@ _asm_extable 100b, \lbl .endm -ENTRY(__asm_copy_to_user) -ENTRY(__asm_copy_from_user) +SYM_FUNC_START(__asm_copy_to_user) /* Enable access to user memory */ li t6, SR_SUM @@ -181,13 +180,13 @@ ENTRY(__asm_copy_from_user) csrc CSR_STATUS, t6 sub a0, t5, a0 ret -ENDPROC(__asm_copy_to_user) -ENDPROC(__asm_copy_from_user) +SYM_FUNC_END(__asm_copy_to_user) EXPORT_SYMBOL(__asm_copy_to_user) +SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user) EXPORT_SYMBOL(__asm_copy_from_user) -ENTRY(__clear_user) +SYM_FUNC_START(__clear_user) /* Enable access to user memory */ li t6, SR_SUM @@ -233,5 +232,5 @@ ENTRY(__clear_user) csrc CSR_STATUS, t6 sub a0, a3, a0 ret -ENDPROC(__clear_user) +SYM_FUNC_END(__clear_user) EXPORT_SYMBOL(__clear_user) diff --git a/arch/riscv/purgatory/entry.S b/arch/riscv/purgatory/entry.S index 0194f4554130..5bcf3af903da 100644 --- a/arch/riscv/purgatory/entry.S +++ b/arch/riscv/purgatory/entry.S @@ -7,15 +7,11 @@ * Author: Li Zhengyu (lizhengyu3@huawei.com) * */ - -.macro size, sym:req - .size \sym, . - \sym -.endm +#include .text -.globl purgatory_start -purgatory_start: +SYM_CODE_START(purgatory_start) lla sp, .Lstack mv s0, a0 /* The hartid of the current hart */ @@ -28,8 +24,7 @@ purgatory_start: mv a1, s1 ld a2, riscv_kernel_entry jr a2 - -size purgatory_start +SYM_CODE_END(purgatory_start) .align 4 .rept 256 @@ -39,9 +34,6 @@ size purgatory_start .data -.globl riscv_kernel_entry -riscv_kernel_entry: - .quad 0 -size riscv_kernel_entry +SYM_DATA(riscv_kernel_entry, .quad 0) .end From 4cc0d8a3f109fbdd8100ed88fc9417203a5d5b4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20L=C3=A9ger?= Date: Tue, 24 Oct 2023 15:26:53 +0200 Subject: [PATCH 30/51] riscv: kernel: Use correct SYM_DATA_*() macro for data MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some data were incorrectly annotated with SYM_FUNC_*() instead of SYM_DATA_*() ones. Use the correct ones. Signed-off-by: Clément Léger Reviewed-by: Andrew Jones Link: https://lore.kernel.org/r/20231024132655.730417-4-cleger@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/entry.S | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index e48478eb6f2d..54ca4564a926 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -322,7 +322,7 @@ SYM_FUNC_END(__switch_to) .section ".rodata" .align LGREG /* Exception vector table */ -SYM_CODE_START(excp_vect_table) +SYM_DATA_START_LOCAL(excp_vect_table) RISCV_PTR do_trap_insn_misaligned ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault) RISCV_PTR do_trap_insn_illegal @@ -340,12 +340,11 @@ SYM_CODE_START(excp_vect_table) RISCV_PTR do_page_fault /* load page fault */ RISCV_PTR do_trap_unknown RISCV_PTR do_page_fault /* store page fault */ -excp_vect_table_end: -SYM_CODE_END(excp_vect_table) +SYM_DATA_END_LABEL(excp_vect_table, SYM_L_LOCAL, excp_vect_table_end) #ifndef CONFIG_MMU -SYM_CODE_START(__user_rt_sigreturn) +SYM_DATA_START(__user_rt_sigreturn) li a7, __NR_rt_sigreturn ecall -SYM_CODE_END(__user_rt_sigreturn) +SYM_DATA_END(__user_rt_sigreturn) #endif From e0c0a7c35f67191152635e5913f76aa7094d967c Mon Sep 17 00:00:00 2001 From: Andreas Schwab Date: Tue, 31 Oct 2023 12:40:47 +0100 Subject: [PATCH 31/51] riscv: select ARCH_PROC_KCORE_TEXT This adds a separate segment for kernel text in /proc/kcore, which has a different address than the direct linear map. Signed-off-by: Andreas Schwab Link: https://lore.kernel.org/r/mvmh6m758ao.fsf@suse.de Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 3f77530180b9..5b1e61aca6cf 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -916,6 +916,9 @@ config PORTABLE select MMU select OF +config ARCH_PROC_KCORE_TEXT + def_bool y + menu "Power management options" source "kernel/power/Kconfig" From 114d5c85a39a0e35024ff5156a160eef1907f3e6 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Mon, 30 Oct 2023 14:30:25 +0100 Subject: [PATCH 32/51] riscv: Improve tlb_flush() For now, tlb_flush() simply calls flush_tlb_mm() which results in a flush of the whole TLB. So let's use mmu_gather fields to provide a more fine-grained flush of the TLB. Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Reviewed-by: Samuel Holland Tested-by: Lad Prabhakar # On RZ/Five SMARC Link: https://lore.kernel.org/r/20231030133027.19542-2-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/tlb.h | 8 +++++++- arch/riscv/include/asm/tlbflush.h | 3 +++ arch/riscv/mm/tlbflush.c | 7 +++++++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h index 120bcf2ed8a8..1eb5682b2af6 100644 --- a/arch/riscv/include/asm/tlb.h +++ b/arch/riscv/include/asm/tlb.h @@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb); static inline void tlb_flush(struct mmu_gather *tlb) { - flush_tlb_mm(tlb->mm); +#ifdef CONFIG_MMU + if (tlb->fullmm || tlb->need_flush_all) + flush_tlb_mm(tlb->mm); + else + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, + tlb_get_unmap_size(tlb)); +#endif } #endif /* _ASM_RISCV_TLB_H */ diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index a09196f8de68..f5c4fb0ae642 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -32,6 +32,8 @@ static inline void local_flush_tlb_page(unsigned long addr) #if defined(CONFIG_SMP) && defined(CONFIG_MMU) void flush_tlb_all(void); void flush_tlb_mm(struct mm_struct *mm); +void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, + unsigned long end, unsigned int page_size); void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); @@ -52,6 +54,7 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, } #define flush_tlb_mm(mm) flush_tlb_all() +#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all() #endif /* !CONFIG_SMP || !CONFIG_MMU */ /* Flush a range of kernel pages */ diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 77be59aadc73..fa03289853d8 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -132,6 +132,13 @@ void flush_tlb_mm(struct mm_struct *mm) __flush_tlb_range(mm, 0, -1, PAGE_SIZE); } +void flush_tlb_mm_range(struct mm_struct *mm, + unsigned long start, unsigned long end, + unsigned int page_size) +{ + __flush_tlb_range(mm, start, end - start, page_size); +} + void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE); From 9e113064b4c291ad06a7a3864691288bd2cf014f Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Mon, 30 Oct 2023 14:30:26 +0100 Subject: [PATCH 33/51] riscv: Improve flush_tlb_range() for hugetlb pages flush_tlb_range() uses a fixed stride of PAGE_SIZE and in its current form, when a hugetlb mapping needs to be flushed, flush_tlb_range() flushes the whole tlb: so set a stride of the size of the hugetlb mapping in order to only flush the hugetlb mapping. However, if the hugepage is a NAPOT region, all PTEs that constitute this mapping must be invalidated, so the stride size must actually be the size of the PTE. Note that THPs are directly handled by flush_pmd_tlb_range(). Signed-off-by: Alexandre Ghiti Reviewed-by: Samuel Holland Tested-by: Lad Prabhakar # On RZ/Five SMARC Link: https://lore.kernel.org/r/20231030133027.19542-3-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/tlbflush.c | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index fa03289853d8..b6d712a82306 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -3,6 +3,7 @@ #include #include #include +#include #include #include @@ -147,7 +148,33 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - __flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE); + unsigned long stride_size; + + if (!is_vm_hugetlb_page(vma)) { + stride_size = PAGE_SIZE; + } else { + stride_size = huge_page_size(hstate_vma(vma)); + + /* + * As stated in the privileged specification, every PTE in a + * NAPOT region must be invalidated, so reset the stride in that + * case. + */ + if (has_svnapot()) { + if (stride_size >= PGDIR_SIZE) + stride_size = PGDIR_SIZE; + else if (stride_size >= P4D_SIZE) + stride_size = P4D_SIZE; + else if (stride_size >= PUD_SIZE) + stride_size = PUD_SIZE; + else if (stride_size >= PMD_SIZE) + stride_size = PMD_SIZE; + else + stride_size = PAGE_SIZE; + } + } + + __flush_tlb_range(vma->vm_mm, start, end - start, stride_size); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, From ba6f35964c518b4520bc3f2fe25d8457cb4a7be5 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Mon, 30 Oct 2023 14:30:27 +0100 Subject: [PATCH 34/51] riscv: Make __flush_tlb_range() loop over pte instead of flushing the whole tlb Currently, when the range to flush covers more than one page (a 4K page or a hugepage), __flush_tlb_range() flushes the whole tlb. Flushing the whole tlb comes with a greater cost than flushing a single entry so we should flush single entries up to a certain threshold so that: threshold * cost of flushing a single entry < cost of flushing the whole tlb. Co-developed-by: Mayuresh Chitale Signed-off-by: Mayuresh Chitale Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Tested-by: Lad Prabhakar # On RZ/Five SMARC Reviewed-by: Samuel Holland Tested-by: Samuel Holland Link: https://lore.kernel.org/r/20231030133027.19542-4-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/sbi.h | 3 - arch/riscv/include/asm/tlbflush.h | 3 + arch/riscv/kernel/sbi.c | 32 +++------ arch/riscv/mm/tlbflush.c | 113 +++++++++++++++--------------- 4 files changed, 71 insertions(+), 80 deletions(-) diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 5b4a1bf5f439..b79d0228144f 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -273,9 +273,6 @@ void sbi_set_timer(uint64_t stime_value); void sbi_shutdown(void); void sbi_send_ipi(unsigned int cpu); int sbi_remote_fence_i(const struct cpumask *cpu_mask); -int sbi_remote_sfence_vma(const struct cpumask *cpu_mask, - unsigned long start, - unsigned long size); int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask, unsigned long start, diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index f5c4fb0ae642..170a49c531c6 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -11,6 +11,9 @@ #include #include +#define FLUSH_TLB_MAX_SIZE ((unsigned long)-1) +#define FLUSH_TLB_NO_ASID ((unsigned long)-1) + #ifdef CONFIG_MMU extern unsigned long asid_mask; diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index c672c8ba9a2a..5a62ed1da453 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -11,6 +11,7 @@ #include #include #include +#include /* default SBI version is 0.1 */ unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT; @@ -376,32 +377,15 @@ int sbi_remote_fence_i(const struct cpumask *cpu_mask) } EXPORT_SYMBOL(sbi_remote_fence_i); -/** - * sbi_remote_sfence_vma() - Execute SFENCE.VMA instructions on given remote - * harts for the specified virtual address range. - * @cpu_mask: A cpu mask containing all the target harts. - * @start: Start of the virtual address - * @size: Total size of the virtual address range. - * - * Return: 0 on success, appropriate linux error code otherwise. - */ -int sbi_remote_sfence_vma(const struct cpumask *cpu_mask, - unsigned long start, - unsigned long size) -{ - return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA, - cpu_mask, start, size, 0, 0); -} -EXPORT_SYMBOL(sbi_remote_sfence_vma); - /** * sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given - * remote harts for a virtual address range belonging to a specific ASID. + * remote harts for a virtual address range belonging to a specific ASID or not. * * @cpu_mask: A cpu mask containing all the target harts. * @start: Start of the virtual address * @size: Total size of the virtual address range. - * @asid: The value of address space identifier (ASID). + * @asid: The value of address space identifier (ASID), or FLUSH_TLB_NO_ASID + * for flushing all address spaces. * * Return: 0 on success, appropriate linux error code otherwise. */ @@ -410,8 +394,12 @@ int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask, unsigned long size, unsigned long asid) { - return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID, - cpu_mask, start, size, asid, 0); + if (asid == FLUSH_TLB_NO_ASID) + return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA, + cpu_mask, start, size, 0, 0); + else + return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID, + cpu_mask, start, size, asid, 0); } EXPORT_SYMBOL(sbi_remote_sfence_vma_asid); diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index b6d712a82306..e46fefc70927 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -9,28 +9,50 @@ static inline void local_flush_tlb_all_asid(unsigned long asid) { - __asm__ __volatile__ ("sfence.vma x0, %0" - : - : "r" (asid) - : "memory"); + if (asid != FLUSH_TLB_NO_ASID) + __asm__ __volatile__ ("sfence.vma x0, %0" + : + : "r" (asid) + : "memory"); + else + local_flush_tlb_all(); } static inline void local_flush_tlb_page_asid(unsigned long addr, unsigned long asid) { - __asm__ __volatile__ ("sfence.vma %0, %1" - : - : "r" (addr), "r" (asid) - : "memory"); + if (asid != FLUSH_TLB_NO_ASID) + __asm__ __volatile__ ("sfence.vma %0, %1" + : + : "r" (addr), "r" (asid) + : "memory"); + else + local_flush_tlb_page(addr); } -static inline void local_flush_tlb_range(unsigned long start, - unsigned long size, unsigned long stride) +/* + * Flush entire TLB if number of entries to be flushed is greater + * than the threshold below. + */ +static unsigned long tlb_flush_all_threshold __read_mostly = 64; + +static void local_flush_tlb_range_threshold_asid(unsigned long start, + unsigned long size, + unsigned long stride, + unsigned long asid) { - if (size <= stride) - local_flush_tlb_page(start); - else - local_flush_tlb_all(); + unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride); + int i; + + if (nr_ptes_in_range > tlb_flush_all_threshold) { + local_flush_tlb_all_asid(asid); + return; + } + + for (i = 0; i < nr_ptes_in_range; ++i) { + local_flush_tlb_page_asid(start, asid); + start += stride; + } } static inline void local_flush_tlb_range_asid(unsigned long start, @@ -38,8 +60,10 @@ static inline void local_flush_tlb_range_asid(unsigned long start, { if (size <= stride) local_flush_tlb_page_asid(start, asid); - else + else if (size == FLUSH_TLB_MAX_SIZE) local_flush_tlb_all_asid(asid); + else + local_flush_tlb_range_threshold_asid(start, size, stride, asid); } static void __ipi_flush_tlb_all(void *info) @@ -52,7 +76,7 @@ void flush_tlb_all(void) if (riscv_use_ipi_for_rfence()) on_each_cpu(__ipi_flush_tlb_all, NULL, 1); else - sbi_remote_sfence_vma(NULL, 0, -1); + sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID); } struct flush_tlb_range_data { @@ -69,18 +93,12 @@ static void __ipi_flush_tlb_range_asid(void *info) local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid); } -static void __ipi_flush_tlb_range(void *info) -{ - struct flush_tlb_range_data *d = info; - - local_flush_tlb_range(d->start, d->size, d->stride); -} - static void __flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long size, unsigned long stride) { struct flush_tlb_range_data ftd; struct cpumask *cmask = mm_cpumask(mm); + unsigned long asid = FLUSH_TLB_NO_ASID; unsigned int cpuid; bool broadcast; @@ -90,39 +108,24 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start, cpuid = get_cpu(); /* check if the tlbflush needs to be sent to other CPUs */ broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids; - if (static_branch_unlikely(&use_asid_allocator)) { - unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask; - if (broadcast) { - if (riscv_use_ipi_for_rfence()) { - ftd.asid = asid; - ftd.start = start; - ftd.size = size; - ftd.stride = stride; - on_each_cpu_mask(cmask, - __ipi_flush_tlb_range_asid, - &ftd, 1); - } else - sbi_remote_sfence_vma_asid(cmask, - start, size, asid); - } else { - local_flush_tlb_range_asid(start, size, stride, asid); - } + if (static_branch_unlikely(&use_asid_allocator)) + asid = atomic_long_read(&mm->context.id) & asid_mask; + + if (broadcast) { + if (riscv_use_ipi_for_rfence()) { + ftd.asid = asid; + ftd.start = start; + ftd.size = size; + ftd.stride = stride; + on_each_cpu_mask(cmask, + __ipi_flush_tlb_range_asid, + &ftd, 1); + } else + sbi_remote_sfence_vma_asid(cmask, + start, size, asid); } else { - if (broadcast) { - if (riscv_use_ipi_for_rfence()) { - ftd.asid = 0; - ftd.start = start; - ftd.size = size; - ftd.stride = stride; - on_each_cpu_mask(cmask, - __ipi_flush_tlb_range, - &ftd, 1); - } else - sbi_remote_sfence_vma(cmask, start, size); - } else { - local_flush_tlb_range(start, size, stride); - } + local_flush_tlb_range_asid(start, size, stride, asid); } put_cpu(); @@ -130,7 +133,7 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start, void flush_tlb_mm(struct mm_struct *mm) { - __flush_tlb_range(mm, 0, -1, PAGE_SIZE); + __flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE); } void flush_tlb_mm_range(struct mm_struct *mm, From 62b78fd5fe39b5b82e4b4b8d0ba87ad40d1a99bb Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Mon, 30 Oct 2023 14:30:28 +0100 Subject: [PATCH 35/51] riscv: Improve flush_tlb_kernel_range() This function used to simply flush the whole tlb of all harts, be more subtile and try to only flush the range. The problem is that we can only use PAGE_SIZE as stride since we don't know the size of the underlying mapping and then this function will be improved only if the size of the region to flush is < threshold * PAGE_SIZE. Signed-off-by: Alexandre Ghiti Reviewed-by: Andrew Jones Tested-by: Lad Prabhakar # On RZ/Five SMARC Reviewed-by: Samuel Holland Tested-by: Samuel Holland Link: https://lore.kernel.org/r/20231030133027.19542-5-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/tlbflush.h | 11 +++++----- arch/riscv/mm/tlbflush.c | 34 ++++++++++++++++++++++--------- 2 files changed, 30 insertions(+), 15 deletions(-) diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 170a49c531c6..8f3418c5f172 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -40,6 +40,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +void flush_tlb_kernel_range(unsigned long start, unsigned long end); #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, @@ -56,15 +57,15 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, local_flush_tlb_all(); } -#define flush_tlb_mm(mm) flush_tlb_all() -#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all() -#endif /* !CONFIG_SMP || !CONFIG_MMU */ - /* Flush a range of kernel pages */ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - flush_tlb_all(); + local_flush_tlb_all(); } +#define flush_tlb_mm(mm) flush_tlb_all() +#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all() +#endif /* !CONFIG_SMP || !CONFIG_MMU */ + #endif /* _ASM_RISCV_TLBFLUSH_H */ diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index e46fefc70927..e6659d7368b3 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -97,20 +97,27 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long size, unsigned long stride) { struct flush_tlb_range_data ftd; - struct cpumask *cmask = mm_cpumask(mm); + const struct cpumask *cmask; unsigned long asid = FLUSH_TLB_NO_ASID; - unsigned int cpuid; bool broadcast; - if (cpumask_empty(cmask)) - return; + if (mm) { + unsigned int cpuid; - cpuid = get_cpu(); - /* check if the tlbflush needs to be sent to other CPUs */ - broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids; + cmask = mm_cpumask(mm); + if (cpumask_empty(cmask)) + return; - if (static_branch_unlikely(&use_asid_allocator)) - asid = atomic_long_read(&mm->context.id) & asid_mask; + cpuid = get_cpu(); + /* check if the tlbflush needs to be sent to other CPUs */ + broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids; + + if (static_branch_unlikely(&use_asid_allocator)) + asid = atomic_long_read(&mm->context.id) & asid_mask; + } else { + cmask = cpu_online_mask; + broadcast = true; + } if (broadcast) { if (riscv_use_ipi_for_rfence()) { @@ -128,7 +135,8 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start, local_flush_tlb_range_asid(start, size, stride, asid); } - put_cpu(); + if (mm) + put_cpu(); } void flush_tlb_mm(struct mm_struct *mm) @@ -179,6 +187,12 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, __flush_tlb_range(vma->vm_mm, start, end - start, stride_size); } + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + __flush_tlb_range(NULL, start, end - start, PAGE_SIZE); +} + #ifdef CONFIG_TRANSPARENT_HUGEPAGE void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) From 946bb33d330251966223f770f64885c79448b1a1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Sat, 28 Oct 2023 17:51:01 +0200 Subject: [PATCH 36/51] riscv: split cache ops out of dma-noncoherent.c The cache ops are also used by the pmem code which is unconditionally built into the kernel. Move them into a separate file that is built based on the correct config option. Fixes: fd962781270e ("riscv: RISCV_NONSTANDARD_CACHE_OPS shouldn't depend on RISCV_DMA_NONCOHERENT") Reported-by: kernel test robot Signed-off-by: Christoph Hellwig Reviewed-by: Conor Dooley Tested-by: Conor Dooley Reviewed-by: Lad Prabhakar Tested-by: Lad Prabhakar # Link: https://lore.kernel.org/r/20231028155101.1039049-1-hch@lst.de Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/Makefile | 1 + arch/riscv/mm/cache-ops.c | 17 +++++++++++++++++ arch/riscv/mm/dma-noncoherent.c | 15 --------------- 3 files changed, 18 insertions(+), 15 deletions(-) create mode 100644 arch/riscv/mm/cache-ops.c diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index 9c454f90fd3d..3a4dfc8babcf 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -36,3 +36,4 @@ endif obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o obj-$(CONFIG_RISCV_DMA_NONCOHERENT) += dma-noncoherent.o +obj-$(CONFIG_RISCV_NONSTANDARD_CACHE_OPS) += cache-ops.o diff --git a/arch/riscv/mm/cache-ops.c b/arch/riscv/mm/cache-ops.c new file mode 100644 index 000000000000..a993ad11d0ec --- /dev/null +++ b/arch/riscv/mm/cache-ops.c @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + */ + +#include + +struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init; + +void +riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops) +{ + if (!ops) + return; + noncoherent_cache_ops = *ops; +} +EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops); diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c index 607d5f47d437..4e4e469b8dd6 100644 --- a/arch/riscv/mm/dma-noncoherent.c +++ b/arch/riscv/mm/dma-noncoherent.c @@ -15,12 +15,6 @@ static bool noncoherent_supported __ro_after_init; int dma_cache_alignment __ro_after_init = ARCH_DMA_MINALIGN; EXPORT_SYMBOL_GPL(dma_cache_alignment); -struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init = { - .wback = NULL, - .inv = NULL, - .wback_inv = NULL, -}; - static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size) { void *vaddr = phys_to_virt(paddr); @@ -162,12 +156,3 @@ void __init riscv_set_dma_cache_alignment(void) if (!noncoherent_supported) dma_cache_alignment = 1; } - -void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops) -{ - if (!ops) - return; - - noncoherent_cache_ops = *ops; -} -EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops); From 8cbe0accc4a6ba7ed34812a1c7e1ba67e7f7b2a4 Mon Sep 17 00:00:00 2001 From: Emil Renner Berthing Date: Wed, 1 Nov 2023 11:32:59 -0700 Subject: [PATCH 37/51] riscv: Avoid unaligned access when relocating modules With the C-extension regular 32bit instructions are not necessarily aligned on 4-byte boundaries. RISC-V instructions are in fact an ordered list of 16bit little-endian "parcels", so access the instruction as such. This should also make the code work in case someone builds a big-endian RISC-V machine. Signed-off-by: Emil Renner Berthing Signed-off-by: Charlie Jenkins Link: https://lore.kernel.org/r/20231101-module_relocations-v9-1-8dfa3483c400@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/module.c | 157 +++++++++++++++++++------------------ 1 file changed, 81 insertions(+), 76 deletions(-) diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 7c651d55fcbd..8286d3bb04f4 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -27,68 +27,90 @@ static bool riscv_insn_valid_32bit_offset(ptrdiff_t val) #endif } -static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v) +static int riscv_insn_rmw(void *location, u32 keep, u32 set) +{ + u16 *parcel = location; + u32 insn = (u32)le16_to_cpu(parcel[0]) | (u32)le16_to_cpu(parcel[1]) << 16; + + insn &= keep; + insn |= set; + + parcel[0] = cpu_to_le16(insn); + parcel[1] = cpu_to_le16(insn >> 16); + return 0; +} + +static int riscv_insn_rvc_rmw(void *location, u16 keep, u16 set) +{ + u16 *parcel = location; + u16 insn = le16_to_cpu(*parcel); + + insn &= keep; + insn |= set; + + *parcel = cpu_to_le16(insn); + return 0; +} + +static int apply_r_riscv_32_rela(struct module *me, void *location, Elf_Addr v) { if (v != (u32)v) { pr_err("%s: value %016llx out of range for 32-bit field\n", me->name, (long long)v); return -EINVAL; } - *location = v; + *(u32 *)location = v; return 0; } -static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v) +static int apply_r_riscv_64_rela(struct module *me, void *location, Elf_Addr v) { *(u64 *)location = v; return 0; } -static int apply_r_riscv_branch_rela(struct module *me, u32 *location, +static int apply_r_riscv_branch_rela(struct module *me, void *location, Elf_Addr v) { - ptrdiff_t offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - location; u32 imm12 = (offset & 0x1000) << (31 - 12); u32 imm11 = (offset & 0x800) >> (11 - 7); u32 imm10_5 = (offset & 0x7e0) << (30 - 10); u32 imm4_1 = (offset & 0x1e) << (11 - 4); - *location = (*location & 0x1fff07f) | imm12 | imm11 | imm10_5 | imm4_1; - return 0; + return riscv_insn_rmw(location, 0x1fff07f, imm12 | imm11 | imm10_5 | imm4_1); } -static int apply_r_riscv_jal_rela(struct module *me, u32 *location, +static int apply_r_riscv_jal_rela(struct module *me, void *location, Elf_Addr v) { - ptrdiff_t offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - location; u32 imm20 = (offset & 0x100000) << (31 - 20); u32 imm19_12 = (offset & 0xff000); u32 imm11 = (offset & 0x800) << (20 - 11); u32 imm10_1 = (offset & 0x7fe) << (30 - 10); - *location = (*location & 0xfff) | imm20 | imm19_12 | imm11 | imm10_1; - return 0; + return riscv_insn_rmw(location, 0xfff, imm20 | imm19_12 | imm11 | imm10_1); } -static int apply_r_riscv_rvc_branch_rela(struct module *me, u32 *location, +static int apply_r_riscv_rvc_branch_rela(struct module *me, void *location, Elf_Addr v) { - ptrdiff_t offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - location; u16 imm8 = (offset & 0x100) << (12 - 8); u16 imm7_6 = (offset & 0xc0) >> (6 - 5); u16 imm5 = (offset & 0x20) >> (5 - 2); u16 imm4_3 = (offset & 0x18) << (12 - 5); u16 imm2_1 = (offset & 0x6) << (12 - 10); - *(u16 *)location = (*(u16 *)location & 0xe383) | - imm8 | imm7_6 | imm5 | imm4_3 | imm2_1; - return 0; + return riscv_insn_rvc_rmw(location, 0xe383, + imm8 | imm7_6 | imm5 | imm4_3 | imm2_1); } -static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location, +static int apply_r_riscv_rvc_jump_rela(struct module *me, void *location, Elf_Addr v) { - ptrdiff_t offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - location; u16 imm11 = (offset & 0x800) << (12 - 11); u16 imm10 = (offset & 0x400) >> (10 - 8); u16 imm9_8 = (offset & 0x300) << (12 - 11); @@ -98,16 +120,14 @@ static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location, u16 imm4 = (offset & 0x10) << (12 - 5); u16 imm3_1 = (offset & 0xe) << (12 - 10); - *(u16 *)location = (*(u16 *)location & 0xe003) | - imm11 | imm10 | imm9_8 | imm7 | imm6 | imm5 | imm4 | imm3_1; - return 0; + return riscv_insn_rvc_rmw(location, 0xe003, + imm11 | imm10 | imm9_8 | imm7 | imm6 | imm5 | imm4 | imm3_1); } -static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location, +static int apply_r_riscv_pcrel_hi20_rela(struct module *me, void *location, Elf_Addr v) { - ptrdiff_t offset = (void *)v - (void *)location; - s32 hi20; + ptrdiff_t offset = (void *)v - location; if (!riscv_insn_valid_32bit_offset(offset)) { pr_err( @@ -116,23 +136,20 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location, return -EINVAL; } - hi20 = (offset + 0x800) & 0xfffff000; - *location = (*location & 0xfff) | hi20; - return 0; + return riscv_insn_rmw(location, 0xfff, (offset + 0x800) & 0xfffff000); } -static int apply_r_riscv_pcrel_lo12_i_rela(struct module *me, u32 *location, +static int apply_r_riscv_pcrel_lo12_i_rela(struct module *me, void *location, Elf_Addr v) { /* * v is the lo12 value to fill. It is calculated before calling this * handler. */ - *location = (*location & 0xfffff) | ((v & 0xfff) << 20); - return 0; + return riscv_insn_rmw(location, 0xfffff, (v & 0xfff) << 20); } -static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, u32 *location, +static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, void *location, Elf_Addr v) { /* @@ -142,15 +159,12 @@ static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, u32 *location, u32 imm11_5 = (v & 0xfe0) << (31 - 11); u32 imm4_0 = (v & 0x1f) << (11 - 4); - *location = (*location & 0x1fff07f) | imm11_5 | imm4_0; - return 0; + return riscv_insn_rmw(location, 0x1fff07f, imm11_5 | imm4_0); } -static int apply_r_riscv_hi20_rela(struct module *me, u32 *location, +static int apply_r_riscv_hi20_rela(struct module *me, void *location, Elf_Addr v) { - s32 hi20; - if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) { pr_err( "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", @@ -158,22 +172,20 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location, return -EINVAL; } - hi20 = ((s32)v + 0x800) & 0xfffff000; - *location = (*location & 0xfff) | hi20; - return 0; + return riscv_insn_rmw(location, 0xfff, ((s32)v + 0x800) & 0xfffff000); } -static int apply_r_riscv_lo12_i_rela(struct module *me, u32 *location, +static int apply_r_riscv_lo12_i_rela(struct module *me, void *location, Elf_Addr v) { /* Skip medlow checking because of filtering by HI20 already */ s32 hi20 = ((s32)v + 0x800) & 0xfffff000; s32 lo12 = ((s32)v - hi20); - *location = (*location & 0xfffff) | ((lo12 & 0xfff) << 20); - return 0; + + return riscv_insn_rmw(location, 0xfffff, (lo12 & 0xfff) << 20); } -static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location, +static int apply_r_riscv_lo12_s_rela(struct module *me, void *location, Elf_Addr v) { /* Skip medlow checking because of filtering by HI20 already */ @@ -181,20 +193,18 @@ static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location, s32 lo12 = ((s32)v - hi20); u32 imm11_5 = (lo12 & 0xfe0) << (31 - 11); u32 imm4_0 = (lo12 & 0x1f) << (11 - 4); - *location = (*location & 0x1fff07f) | imm11_5 | imm4_0; - return 0; + + return riscv_insn_rmw(location, 0x1fff07f, imm11_5 | imm4_0); } -static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location, +static int apply_r_riscv_got_hi20_rela(struct module *me, void *location, Elf_Addr v) { - ptrdiff_t offset = (void *)v - (void *)location; - s32 hi20; + ptrdiff_t offset = (void *)v - location; /* Always emit the got entry */ if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) { - offset = module_emit_got_entry(me, v); - offset = (void *)offset - (void *)location; + offset = (void *)module_emit_got_entry(me, v) - location; } else { pr_err( "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n", @@ -202,22 +212,19 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location, return -EINVAL; } - hi20 = (offset + 0x800) & 0xfffff000; - *location = (*location & 0xfff) | hi20; - return 0; + return riscv_insn_rmw(location, 0xfff, (offset + 0x800) & 0xfffff000); } -static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, +static int apply_r_riscv_call_plt_rela(struct module *me, void *location, Elf_Addr v) { - ptrdiff_t offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - location; u32 hi20, lo12; if (!riscv_insn_valid_32bit_offset(offset)) { /* Only emit the plt entry if offset over 32-bit range */ if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) { - offset = module_emit_plt_entry(me, v); - offset = (void *)offset - (void *)location; + offset = (void *)module_emit_plt_entry(me, v) - location; } else { pr_err( "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", @@ -228,15 +235,14 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location, hi20 = (offset + 0x800) & 0xfffff000; lo12 = (offset - hi20) & 0xfff; - *location = (*location & 0xfff) | hi20; - *(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20); - return 0; + riscv_insn_rmw(location, 0xfff, hi20); + return riscv_insn_rmw(location + 4, 0xfffff, lo12 << 20); } -static int apply_r_riscv_call_rela(struct module *me, u32 *location, +static int apply_r_riscv_call_rela(struct module *me, void *location, Elf_Addr v) { - ptrdiff_t offset = (void *)v - (void *)location; + ptrdiff_t offset = (void *)v - location; u32 hi20, lo12; if (!riscv_insn_valid_32bit_offset(offset)) { @@ -248,18 +254,17 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location, hi20 = (offset + 0x800) & 0xfffff000; lo12 = (offset - hi20) & 0xfff; - *location = (*location & 0xfff) | hi20; - *(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20); - return 0; + riscv_insn_rmw(location, 0xfff, hi20); + return riscv_insn_rmw(location + 4, 0xfffff, lo12 << 20); } -static int apply_r_riscv_relax_rela(struct module *me, u32 *location, +static int apply_r_riscv_relax_rela(struct module *me, void *location, Elf_Addr v) { return 0; } -static int apply_r_riscv_align_rela(struct module *me, u32 *location, +static int apply_r_riscv_align_rela(struct module *me, void *location, Elf_Addr v) { pr_err( @@ -268,49 +273,49 @@ static int apply_r_riscv_align_rela(struct module *me, u32 *location, return -EINVAL; } -static int apply_r_riscv_add16_rela(struct module *me, u32 *location, +static int apply_r_riscv_add16_rela(struct module *me, void *location, Elf_Addr v) { *(u16 *)location += (u16)v; return 0; } -static int apply_r_riscv_add32_rela(struct module *me, u32 *location, +static int apply_r_riscv_add32_rela(struct module *me, void *location, Elf_Addr v) { *(u32 *)location += (u32)v; return 0; } -static int apply_r_riscv_add64_rela(struct module *me, u32 *location, +static int apply_r_riscv_add64_rela(struct module *me, void *location, Elf_Addr v) { *(u64 *)location += (u64)v; return 0; } -static int apply_r_riscv_sub16_rela(struct module *me, u32 *location, +static int apply_r_riscv_sub16_rela(struct module *me, void *location, Elf_Addr v) { *(u16 *)location -= (u16)v; return 0; } -static int apply_r_riscv_sub32_rela(struct module *me, u32 *location, +static int apply_r_riscv_sub32_rela(struct module *me, void *location, Elf_Addr v) { *(u32 *)location -= (u32)v; return 0; } -static int apply_r_riscv_sub64_rela(struct module *me, u32 *location, +static int apply_r_riscv_sub64_rela(struct module *me, void *location, Elf_Addr v) { *(u64 *)location -= (u64)v; return 0; } -static int (*reloc_handlers_rela[]) (struct module *me, u32 *location, +static int (*reloc_handlers_rela[]) (struct module *me, void *location, Elf_Addr v) = { [R_RISCV_32] = apply_r_riscv_32_rela, [R_RISCV_64] = apply_r_riscv_64_rela, @@ -342,9 +347,9 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, struct module *me) { Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr; - int (*handler)(struct module *me, u32 *location, Elf_Addr v); + int (*handler)(struct module *me, void *location, Elf_Addr v); Elf_Sym *sym; - u32 *location; + void *location; unsigned int i, type; Elf_Addr v; int res; From 8fd6c5142395a106b63c8668e9f4a7106b6a0772 Mon Sep 17 00:00:00 2001 From: Charlie Jenkins Date: Wed, 1 Nov 2023 11:33:00 -0700 Subject: [PATCH 38/51] riscv: Add remaining module relocations Add all final module relocations and add error logs explaining the ones that are not supported. Implement overflow checks for ADD/SUB/SET/ULEB128 relocations. Signed-off-by: Charlie Jenkins Link: https://lore.kernel.org/r/20231101-module_relocations-v9-2-8dfa3483c400@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/uapi/asm/elf.h | 5 +- arch/riscv/kernel/module.c | 448 ++++++++++++++++++++++++++++-- 2 files changed, 423 insertions(+), 30 deletions(-) diff --git a/arch/riscv/include/uapi/asm/elf.h b/arch/riscv/include/uapi/asm/elf.h index d696d6610231..11a71b8533d5 100644 --- a/arch/riscv/include/uapi/asm/elf.h +++ b/arch/riscv/include/uapi/asm/elf.h @@ -49,6 +49,7 @@ typedef union __riscv_fp_state elf_fpregset_t; #define R_RISCV_TLS_DTPREL64 9 #define R_RISCV_TLS_TPREL32 10 #define R_RISCV_TLS_TPREL64 11 +#define R_RISCV_IRELATIVE 58 /* Relocation types not used by the dynamic linker */ #define R_RISCV_BRANCH 16 @@ -81,7 +82,6 @@ typedef union __riscv_fp_state elf_fpregset_t; #define R_RISCV_ALIGN 43 #define R_RISCV_RVC_BRANCH 44 #define R_RISCV_RVC_JUMP 45 -#define R_RISCV_LUI 46 #define R_RISCV_GPREL_I 47 #define R_RISCV_GPREL_S 48 #define R_RISCV_TPREL_I 49 @@ -93,6 +93,9 @@ typedef union __riscv_fp_state elf_fpregset_t; #define R_RISCV_SET16 55 #define R_RISCV_SET32 56 #define R_RISCV_32_PCREL 57 +#define R_RISCV_PLT32 59 +#define R_RISCV_SET_ULEB128 60 +#define R_RISCV_SUB_ULEB128 61 #endif /* _UAPI_ASM_RISCV_ELF_H */ diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 8286d3bb04f4..4b339729d5ec 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -7,6 +7,9 @@ #include #include #include +#include +#include +#include #include #include #include @@ -14,6 +17,38 @@ #include #include +struct used_bucket { + struct list_head head; + struct hlist_head *bucket; +}; + +struct relocation_head { + struct hlist_node node; + struct list_head *rel_entry; + void *location; +}; + +struct relocation_entry { + struct list_head head; + Elf_Addr value; + unsigned int type; +}; + +struct relocation_handlers { + int (*reloc_handler)(struct module *me, void *location, Elf_Addr v); + int (*accumulate_handler)(struct module *me, void *location, + long buffer); +}; + +unsigned int initialize_relocation_hashtable(unsigned int num_relocations); +void process_accumulated_relocations(struct module *me); +int add_relocation_to_accumulate(struct module *me, int type, void *location, + unsigned int hashtable_bits, Elf_Addr v); + +struct hlist_head *relocation_hashtable; + +struct list_head used_buckets_list; + /* * The auipc+jalr instruction pair can reach any PC-relative offset * in the range [-2^31 - 2^11, 2^31 - 2^11) @@ -273,6 +308,12 @@ static int apply_r_riscv_align_rela(struct module *me, void *location, return -EINVAL; } +static int apply_r_riscv_add8_rela(struct module *me, void *location, Elf_Addr v) +{ + *(u8 *)location += (u8)v; + return 0; +} + static int apply_r_riscv_add16_rela(struct module *me, void *location, Elf_Addr v) { @@ -294,6 +335,12 @@ static int apply_r_riscv_add64_rela(struct module *me, void *location, return 0; } +static int apply_r_riscv_sub8_rela(struct module *me, void *location, Elf_Addr v) +{ + *(u8 *)location -= (u8)v; + return 0; +} + static int apply_r_riscv_sub16_rela(struct module *me, void *location, Elf_Addr v) { @@ -315,33 +362,369 @@ static int apply_r_riscv_sub64_rela(struct module *me, void *location, return 0; } -static int (*reloc_handlers_rela[]) (struct module *me, void *location, - Elf_Addr v) = { - [R_RISCV_32] = apply_r_riscv_32_rela, - [R_RISCV_64] = apply_r_riscv_64_rela, - [R_RISCV_BRANCH] = apply_r_riscv_branch_rela, - [R_RISCV_JAL] = apply_r_riscv_jal_rela, - [R_RISCV_RVC_BRANCH] = apply_r_riscv_rvc_branch_rela, - [R_RISCV_RVC_JUMP] = apply_r_riscv_rvc_jump_rela, - [R_RISCV_PCREL_HI20] = apply_r_riscv_pcrel_hi20_rela, - [R_RISCV_PCREL_LO12_I] = apply_r_riscv_pcrel_lo12_i_rela, - [R_RISCV_PCREL_LO12_S] = apply_r_riscv_pcrel_lo12_s_rela, - [R_RISCV_HI20] = apply_r_riscv_hi20_rela, - [R_RISCV_LO12_I] = apply_r_riscv_lo12_i_rela, - [R_RISCV_LO12_S] = apply_r_riscv_lo12_s_rela, - [R_RISCV_GOT_HI20] = apply_r_riscv_got_hi20_rela, - [R_RISCV_CALL_PLT] = apply_r_riscv_call_plt_rela, - [R_RISCV_CALL] = apply_r_riscv_call_rela, - [R_RISCV_RELAX] = apply_r_riscv_relax_rela, - [R_RISCV_ALIGN] = apply_r_riscv_align_rela, - [R_RISCV_ADD16] = apply_r_riscv_add16_rela, - [R_RISCV_ADD32] = apply_r_riscv_add32_rela, - [R_RISCV_ADD64] = apply_r_riscv_add64_rela, - [R_RISCV_SUB16] = apply_r_riscv_sub16_rela, - [R_RISCV_SUB32] = apply_r_riscv_sub32_rela, - [R_RISCV_SUB64] = apply_r_riscv_sub64_rela, +static int dynamic_linking_not_supported(struct module *me, void *location, + Elf_Addr v) +{ + pr_err("%s: Dynamic linking not supported in kernel modules PC = %p\n", + me->name, location); + return -EINVAL; +} + +static int tls_not_supported(struct module *me, void *location, Elf_Addr v) +{ + pr_err("%s: Thread local storage not supported in kernel modules PC = %p\n", + me->name, location); + return -EINVAL; +} + +static int apply_r_riscv_sub6_rela(struct module *me, void *location, Elf_Addr v) +{ + u8 *byte = location; + u8 value = v; + + *byte = (*byte - (value & 0x3f)) & 0x3f; + return 0; +} + +static int apply_r_riscv_set6_rela(struct module *me, void *location, Elf_Addr v) +{ + u8 *byte = location; + u8 value = v; + + *byte = (*byte & 0xc0) | (value & 0x3f); + return 0; +} + +static int apply_r_riscv_set8_rela(struct module *me, void *location, Elf_Addr v) +{ + *(u8 *)location = (u8)v; + return 0; +} + +static int apply_r_riscv_set16_rela(struct module *me, void *location, + Elf_Addr v) +{ + *(u16 *)location = (u16)v; + return 0; +} + +static int apply_r_riscv_set32_rela(struct module *me, void *location, + Elf_Addr v) +{ + *(u32 *)location = (u32)v; + return 0; +} + +static int apply_r_riscv_32_pcrel_rela(struct module *me, void *location, + Elf_Addr v) +{ + *(u32 *)location = v - (uintptr_t)location; + return 0; +} + +static int apply_r_riscv_plt32_rela(struct module *me, void *location, + Elf_Addr v) +{ + ptrdiff_t offset = (void *)v - location; + + if (!riscv_insn_valid_32bit_offset(offset)) { + /* Only emit the plt entry if offset over 32-bit range */ + if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) { + offset = (void *)module_emit_plt_entry(me, v) - location; + } else { + pr_err("%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n", + me->name, (long long)v, location); + return -EINVAL; + } + } + + *(u32 *)location = (u32)offset; + return 0; +} + +static int apply_r_riscv_set_uleb128(struct module *me, void *location, Elf_Addr v) +{ + *(long *)location = v; + return 0; +} + +static int apply_r_riscv_sub_uleb128(struct module *me, void *location, Elf_Addr v) +{ + *(long *)location -= v; + return 0; +} + +static int apply_6_bit_accumulation(struct module *me, void *location, long buffer) +{ + u8 *byte = location; + u8 value = buffer; + + if (buffer > 0x3f) { + pr_err("%s: value %ld out of range for 6-bit relocation.\n", + me->name, buffer); + return -EINVAL; + } + + *byte = (*byte & 0xc0) | (value & 0x3f); + return 0; +} + +static int apply_8_bit_accumulation(struct module *me, void *location, long buffer) +{ + if (buffer > U8_MAX) { + pr_err("%s: value %ld out of range for 8-bit relocation.\n", + me->name, buffer); + return -EINVAL; + } + *(u8 *)location = (u8)buffer; + return 0; +} + +static int apply_16_bit_accumulation(struct module *me, void *location, long buffer) +{ + if (buffer > U16_MAX) { + pr_err("%s: value %ld out of range for 16-bit relocation.\n", + me->name, buffer); + return -EINVAL; + } + *(u16 *)location = (u16)buffer; + return 0; +} + +static int apply_32_bit_accumulation(struct module *me, void *location, long buffer) +{ + if (buffer > U32_MAX) { + pr_err("%s: value %ld out of range for 32-bit relocation.\n", + me->name, buffer); + return -EINVAL; + } + *(u32 *)location = (u32)buffer; + return 0; +} + +static int apply_64_bit_accumulation(struct module *me, void *location, long buffer) +{ + *(u64 *)location = (u64)buffer; + return 0; +} + +static int apply_uleb128_accumulation(struct module *me, void *location, long buffer) +{ + /* + * ULEB128 is a variable length encoding. Encode the buffer into + * the ULEB128 data format. + */ + u8 *p = location; + + while (buffer != 0) { + u8 value = buffer & 0x7f; + + buffer >>= 7; + value |= (!!buffer) << 7; + + *p++ = value; + } + return 0; +} + +/* + * Relocations defined in the riscv-elf-psabi-doc. + * This handles static linking only. + */ +static const struct relocation_handlers reloc_handlers[] = { + [R_RISCV_32] = { apply_r_riscv_32_rela }, + [R_RISCV_64] = { apply_r_riscv_64_rela }, + [R_RISCV_RELATIVE] = { dynamic_linking_not_supported }, + [R_RISCV_COPY] = { dynamic_linking_not_supported }, + [R_RISCV_JUMP_SLOT] = { dynamic_linking_not_supported }, + [R_RISCV_TLS_DTPMOD32] = { dynamic_linking_not_supported }, + [R_RISCV_TLS_DTPMOD64] = { dynamic_linking_not_supported }, + [R_RISCV_TLS_DTPREL32] = { dynamic_linking_not_supported }, + [R_RISCV_TLS_DTPREL64] = { dynamic_linking_not_supported }, + [R_RISCV_TLS_TPREL32] = { dynamic_linking_not_supported }, + [R_RISCV_TLS_TPREL64] = { dynamic_linking_not_supported }, + /* 12-15 undefined */ + [R_RISCV_BRANCH] = { apply_r_riscv_branch_rela }, + [R_RISCV_JAL] = { apply_r_riscv_jal_rela }, + [R_RISCV_CALL] = { apply_r_riscv_call_rela }, + [R_RISCV_CALL_PLT] = { apply_r_riscv_call_plt_rela }, + [R_RISCV_GOT_HI20] = { apply_r_riscv_got_hi20_rela }, + [R_RISCV_TLS_GOT_HI20] = { tls_not_supported }, + [R_RISCV_TLS_GD_HI20] = { tls_not_supported }, + [R_RISCV_PCREL_HI20] = { apply_r_riscv_pcrel_hi20_rela }, + [R_RISCV_PCREL_LO12_I] = { apply_r_riscv_pcrel_lo12_i_rela }, + [R_RISCV_PCREL_LO12_S] = { apply_r_riscv_pcrel_lo12_s_rela }, + [R_RISCV_HI20] = { apply_r_riscv_hi20_rela }, + [R_RISCV_LO12_I] = { apply_r_riscv_lo12_i_rela }, + [R_RISCV_LO12_S] = { apply_r_riscv_lo12_s_rela }, + [R_RISCV_TPREL_HI20] = { tls_not_supported }, + [R_RISCV_TPREL_LO12_I] = { tls_not_supported }, + [R_RISCV_TPREL_LO12_S] = { tls_not_supported }, + [R_RISCV_TPREL_ADD] = { tls_not_supported }, + [R_RISCV_ADD8] = { apply_r_riscv_add8_rela, apply_8_bit_accumulation }, + [R_RISCV_ADD16] = { apply_r_riscv_add16_rela, + apply_16_bit_accumulation }, + [R_RISCV_ADD32] = { apply_r_riscv_add32_rela, + apply_32_bit_accumulation }, + [R_RISCV_ADD64] = { apply_r_riscv_add64_rela, + apply_64_bit_accumulation }, + [R_RISCV_SUB8] = { apply_r_riscv_sub8_rela, apply_8_bit_accumulation }, + [R_RISCV_SUB16] = { apply_r_riscv_sub16_rela, + apply_16_bit_accumulation }, + [R_RISCV_SUB32] = { apply_r_riscv_sub32_rela, + apply_32_bit_accumulation }, + [R_RISCV_SUB64] = { apply_r_riscv_sub64_rela, + apply_64_bit_accumulation }, + /* 41-42 reserved for future standard use */ + [R_RISCV_ALIGN] = { apply_r_riscv_align_rela }, + [R_RISCV_RVC_BRANCH] = { apply_r_riscv_rvc_branch_rela }, + [R_RISCV_RVC_JUMP] = { apply_r_riscv_rvc_jump_rela }, + /* 46-50 reserved for future standard use */ + [R_RISCV_RELAX] = { apply_r_riscv_relax_rela }, + [R_RISCV_SUB6] = { apply_r_riscv_sub6_rela, apply_6_bit_accumulation }, + [R_RISCV_SET6] = { apply_r_riscv_set6_rela, apply_6_bit_accumulation }, + [R_RISCV_SET8] = { apply_r_riscv_set8_rela, apply_8_bit_accumulation }, + [R_RISCV_SET16] = { apply_r_riscv_set16_rela, + apply_16_bit_accumulation }, + [R_RISCV_SET32] = { apply_r_riscv_set32_rela, + apply_32_bit_accumulation }, + [R_RISCV_32_PCREL] = { apply_r_riscv_32_pcrel_rela }, + [R_RISCV_IRELATIVE] = { dynamic_linking_not_supported }, + [R_RISCV_PLT32] = { apply_r_riscv_plt32_rela }, + [R_RISCV_SET_ULEB128] = { apply_r_riscv_set_uleb128, + apply_uleb128_accumulation }, + [R_RISCV_SUB_ULEB128] = { apply_r_riscv_sub_uleb128, + apply_uleb128_accumulation }, + /* 62-191 reserved for future standard use */ + /* 192-255 nonstandard ABI extensions */ }; +void process_accumulated_relocations(struct module *me) +{ + /* + * Only ADD/SUB/SET/ULEB128 should end up here. + * + * Each bucket may have more than one relocation location. All + * relocations for a location are stored in a list in a bucket. + * + * Relocations are applied to a temp variable before being stored to the + * provided location to check for overflow. This also allows ULEB128 to + * properly decide how many entries are needed before storing to + * location. The final value is stored into location using the handler + * for the last relocation to an address. + * + * Three layers of indexing: + * - Each of the buckets in use + * - Groups of relocations in each bucket by location address + * - Each relocation entry for a location address + */ + struct used_bucket *bucket_iter; + struct relocation_head *rel_head_iter; + struct relocation_entry *rel_entry_iter; + int curr_type; + void *location; + long buffer; + + list_for_each_entry(bucket_iter, &used_buckets_list, head) { + hlist_for_each_entry(rel_head_iter, bucket_iter->bucket, node) { + buffer = 0; + location = rel_head_iter->location; + list_for_each_entry(rel_entry_iter, + rel_head_iter->rel_entry, head) { + curr_type = rel_entry_iter->type; + reloc_handlers[curr_type].reloc_handler( + me, &buffer, rel_entry_iter->value); + kfree(rel_entry_iter); + } + reloc_handlers[curr_type].accumulate_handler( + me, location, buffer); + kfree(rel_head_iter); + } + kfree(bucket_iter); + } + + kfree(relocation_hashtable); +} + +int add_relocation_to_accumulate(struct module *me, int type, void *location, + unsigned int hashtable_bits, Elf_Addr v) +{ + struct relocation_entry *entry; + struct relocation_head *rel_head; + struct hlist_head *current_head; + struct used_bucket *bucket; + unsigned long hash; + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + INIT_LIST_HEAD(&entry->head); + entry->type = type; + entry->value = v; + + hash = hash_min((uintptr_t)location, hashtable_bits); + + current_head = &relocation_hashtable[hash]; + + /* Find matching location (if any) */ + bool found = false; + struct relocation_head *rel_head_iter; + + hlist_for_each_entry(rel_head_iter, current_head, node) { + if (rel_head_iter->location == location) { + found = true; + rel_head = rel_head_iter; + break; + } + } + + if (!found) { + rel_head = kmalloc(sizeof(*rel_head), GFP_KERNEL); + rel_head->rel_entry = + kmalloc(sizeof(struct list_head), GFP_KERNEL); + INIT_LIST_HEAD(rel_head->rel_entry); + rel_head->location = location; + INIT_HLIST_NODE(&rel_head->node); + if (!current_head->first) { + bucket = + kmalloc(sizeof(struct used_bucket), GFP_KERNEL); + INIT_LIST_HEAD(&bucket->head); + bucket->bucket = current_head; + list_add(&bucket->head, &used_buckets_list); + } + hlist_add_head(&rel_head->node, current_head); + } + + /* Add relocation to head of discovered rel_head */ + list_add_tail(&entry->head, rel_head->rel_entry); + + return 0; +} + +unsigned int initialize_relocation_hashtable(unsigned int num_relocations) +{ + /* Can safely assume that bits is not greater than sizeof(long) */ + unsigned long hashtable_size = roundup_pow_of_two(num_relocations); + unsigned int hashtable_bits = ilog2(hashtable_size); + + /* + * Double size of hashtable if num_relocations * 1.25 is greater than + * hashtable_size. + */ + int should_double_size = ((num_relocations + (num_relocations >> 2)) > (hashtable_size)); + + hashtable_bits += should_double_size; + + hashtable_size <<= should_double_size; + + relocation_hashtable = kmalloc_array(hashtable_size, + sizeof(*relocation_hashtable), + GFP_KERNEL); + __hash_init(relocation_hashtable, hashtable_size); + + INIT_LIST_HEAD(&used_buckets_list); + + return hashtable_bits; +} + int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) @@ -353,11 +736,13 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, unsigned int i, type; Elf_Addr v; int res; + unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel); + unsigned int hashtable_bits = initialize_relocation_hashtable(num_relocations); pr_debug("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); - for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + for (i = 0; i < num_relocations; i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; @@ -375,8 +760,8 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, type = ELF_RISCV_R_TYPE(rel[i].r_info); - if (type < ARRAY_SIZE(reloc_handlers_rela)) - handler = reloc_handlers_rela[type]; + if (type < ARRAY_SIZE(reloc_handlers)) + handler = reloc_handlers[type].reloc_handler; else handler = NULL; @@ -432,11 +817,16 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, } } - res = handler(me, location, v); + if (reloc_handlers[type].accumulate_handler) + res = add_relocation_to_accumulate(me, type, location, hashtable_bits, v); + else + res = handler(me, location, v); if (res) return res; } + process_accumulated_relocations(me); + return 0; } From af71bc194916b10f9b394f9b14419d99700a5e67 Mon Sep 17 00:00:00 2001 From: Charlie Jenkins Date: Wed, 1 Nov 2023 11:33:01 -0700 Subject: [PATCH 39/51] riscv: Add tests for riscv module loading Add test cases for the two main groups of relocations added: SUB and SET, along with uleb128. Signed-off-by: Charlie Jenkins Link: https://lore.kernel.org/r/20231101-module_relocations-v9-3-8dfa3483c400@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig.debug | 1 + arch/riscv/kernel/Makefile | 1 + arch/riscv/kernel/tests/Kconfig.debug | 35 ++++++++ arch/riscv/kernel/tests/Makefile | 1 + arch/riscv/kernel/tests/module_test/Makefile | 15 ++++ .../module_test/test_module_linking_main.c | 88 +++++++++++++++++++ .../kernel/tests/module_test/test_set16.S | 23 +++++ .../kernel/tests/module_test/test_set32.S | 20 +++++ .../kernel/tests/module_test/test_set6.S | 23 +++++ .../kernel/tests/module_test/test_set8.S | 23 +++++ .../kernel/tests/module_test/test_sub16.S | 20 +++++ .../kernel/tests/module_test/test_sub32.S | 20 +++++ .../kernel/tests/module_test/test_sub6.S | 20 +++++ .../kernel/tests/module_test/test_sub64.S | 25 ++++++ .../kernel/tests/module_test/test_sub8.S | 20 +++++ .../kernel/tests/module_test/test_uleb128.S | 31 +++++++ 16 files changed, 366 insertions(+) create mode 100644 arch/riscv/kernel/tests/Kconfig.debug create mode 100644 arch/riscv/kernel/tests/Makefile create mode 100644 arch/riscv/kernel/tests/module_test/Makefile create mode 100644 arch/riscv/kernel/tests/module_test/test_module_linking_main.c create mode 100644 arch/riscv/kernel/tests/module_test/test_set16.S create mode 100644 arch/riscv/kernel/tests/module_test/test_set32.S create mode 100644 arch/riscv/kernel/tests/module_test/test_set6.S create mode 100644 arch/riscv/kernel/tests/module_test/test_set8.S create mode 100644 arch/riscv/kernel/tests/module_test/test_sub16.S create mode 100644 arch/riscv/kernel/tests/module_test/test_sub32.S create mode 100644 arch/riscv/kernel/tests/module_test/test_sub6.S create mode 100644 arch/riscv/kernel/tests/module_test/test_sub64.S create mode 100644 arch/riscv/kernel/tests/module_test/test_sub8.S create mode 100644 arch/riscv/kernel/tests/module_test/test_uleb128.S diff --git a/arch/riscv/Kconfig.debug b/arch/riscv/Kconfig.debug index e69de29bb2d1..eafe17ebf710 100644 --- a/arch/riscv/Kconfig.debug +++ b/arch/riscv/Kconfig.debug @@ -0,0 +1 @@ +source "arch/riscv/kernel/tests/Kconfig.debug" diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 95cf25d48405..bb99657252f4 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -57,6 +57,7 @@ obj-y += stacktrace.o obj-y += cacheinfo.o obj-y += patch.o obj-y += probes/ +obj-y += tests/ obj-$(CONFIG_MMU) += vdso.o vdso/ obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o diff --git a/arch/riscv/kernel/tests/Kconfig.debug b/arch/riscv/kernel/tests/Kconfig.debug new file mode 100644 index 000000000000..5dba64e8e977 --- /dev/null +++ b/arch/riscv/kernel/tests/Kconfig.debug @@ -0,0 +1,35 @@ +# SPDX-License-Identifier: GPL-2.0-only +menu "arch/riscv/kernel Testing and Coverage" + +config AS_HAS_ULEB128 + def_bool $(as-instr,.reloc label$(comma) R_RISCV_SET_ULEB128$(comma) 127\n.reloc label$(comma) R_RISCV_SUB_ULEB128$(comma) 127\nlabel:\n.word 0) + +menuconfig RUNTIME_KERNEL_TESTING_MENU + bool "arch/riscv/kernel runtime Testing" + def_bool y + help + Enable riscv kernel runtime testing. + +if RUNTIME_KERNEL_TESTING_MENU + +config RISCV_MODULE_LINKING_KUNIT + bool "KUnit test riscv module linking at runtime" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + Enable this option to test riscv module linking at boot. This will + enable a module called "test_module_linking". + + KUnit tests run during boot and output the results to the debug log + in TAP format (http://testanything.org/). Only useful for kernel devs + running the KUnit test harness, and not intended for inclusion into a + production build. + + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. + +endif # RUNTIME_TESTING_MENU + +endmenu # "arch/riscv/kernel runtime Testing" diff --git a/arch/riscv/kernel/tests/Makefile b/arch/riscv/kernel/tests/Makefile new file mode 100644 index 000000000000..7d6c76cffe20 --- /dev/null +++ b/arch/riscv/kernel/tests/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_RISCV_MODULE_LINKING_KUNIT) += module_test/ diff --git a/arch/riscv/kernel/tests/module_test/Makefile b/arch/riscv/kernel/tests/module_test/Makefile new file mode 100644 index 000000000000..d7a6fd8943de --- /dev/null +++ b/arch/riscv/kernel/tests/module_test/Makefile @@ -0,0 +1,15 @@ +obj-m += test_module_linking.o + +test_sub := test_sub6.o test_sub8.o test_sub16.o test_sub32.o test_sub64.o + +test_set := test_set6.o test_set8.o test_set16.o test_set32.o + +test_module_linking-objs += $(test_sub) + +test_module_linking-objs += $(test_set) + +ifeq ($(CONFIG_AS_HAS_ULEB128),y) +test_module_linking-objs += test_uleb128.o +endif + +test_module_linking-objs += test_module_linking_main.o diff --git a/arch/riscv/kernel/tests/module_test/test_module_linking_main.c b/arch/riscv/kernel/tests/module_test/test_module_linking_main.c new file mode 100644 index 000000000000..8df5fa5b834e --- /dev/null +++ b/arch/riscv/kernel/tests/module_test/test_module_linking_main.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2023 Rivos Inc. + */ + +#include +#include +#include +#include + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Test module linking"); + +extern int test_set32(void); +extern int test_set16(void); +extern int test_set8(void); +extern int test_set6(void); +extern long test_sub64(void); +extern int test_sub32(void); +extern int test_sub16(void); +extern int test_sub8(void); +extern int test_sub6(void); + +#ifdef CONFIG_AS_HAS_ULEB128 +extern int test_uleb_basic(void); +extern int test_uleb_large(void); +#endif + +#define CHECK_EQ(lhs, rhs) KUNIT_ASSERT_EQ(test, lhs, rhs) + +void run_test_set(struct kunit *test); +void run_test_sub(struct kunit *test); +void run_test_uleb(struct kunit *test); + +void run_test_set(struct kunit *test) +{ + int val32 = test_set32(); + int val16 = test_set16(); + int val8 = test_set8(); + int val6 = test_set6(); + + CHECK_EQ(val32, 0); + CHECK_EQ(val16, 0); + CHECK_EQ(val8, 0); + CHECK_EQ(val6, 0); +} + +void run_test_sub(struct kunit *test) +{ + int val64 = test_sub64(); + int val32 = test_sub32(); + int val16 = test_sub16(); + int val8 = test_sub8(); + int val6 = test_sub6(); + + CHECK_EQ(val64, 0); + CHECK_EQ(val32, 0); + CHECK_EQ(val16, 0); + CHECK_EQ(val8, 0); + CHECK_EQ(val6, 0); +} + +#ifdef CONFIG_AS_HAS_ULEB128 +void run_test_uleb(struct kunit *test) +{ + int val_uleb = test_uleb_basic(); + int val_uleb2 = test_uleb_large(); + + CHECK_EQ(val_uleb, 0); + CHECK_EQ(val_uleb2, 0); +} +#endif + +static struct kunit_case __refdata riscv_module_linking_test_cases[] = { + KUNIT_CASE(run_test_set), + KUNIT_CASE(run_test_sub), +#ifdef CONFIG_AS_HAS_ULEB128 + KUNIT_CASE(run_test_uleb), +#endif + {} +}; + +static struct kunit_suite riscv_module_linking_test_suite = { + .name = "riscv_checksum", + .test_cases = riscv_module_linking_test_cases, +}; + +kunit_test_suites(&riscv_module_linking_test_suite); diff --git a/arch/riscv/kernel/tests/module_test/test_set16.S b/arch/riscv/kernel/tests/module_test/test_set16.S new file mode 100644 index 000000000000..2be0e441a12e --- /dev/null +++ b/arch/riscv/kernel/tests/module_test/test_set16.S @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 Rivos Inc. + */ + +.text +.global test_set16 +test_set16: + lw a0, set16 + la t0, set16 +#ifdef CONFIG_32BIT + slli t0, t0, 16 + srli t0, t0, 16 +#else + slli t0, t0, 48 + srli t0, t0, 48 +#endif + sub a0, a0, t0 + ret +.data +set16: + .reloc set16, R_RISCV_SET16, set16 + .word 0 diff --git a/arch/riscv/kernel/tests/module_test/test_set32.S b/arch/riscv/kernel/tests/module_test/test_set32.S new file mode 100644 index 000000000000..de0444537e67 --- /dev/null +++ b/arch/riscv/kernel/tests/module_test/test_set32.S @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 Rivos Inc. + */ + +.text +.global test_set32 +test_set32: + lw a0, set32 + la t0, set32 +#ifndef CONFIG_32BIT + slli t0, t0, 32 + srli t0, t0, 32 +#endif + sub a0, a0, t0 + ret +.data +set32: + .reloc set32, R_RISCV_SET32, set32 + .word 0 diff --git a/arch/riscv/kernel/tests/module_test/test_set6.S b/arch/riscv/kernel/tests/module_test/test_set6.S new file mode 100644 index 000000000000..c39ce4c219eb --- /dev/null +++ b/arch/riscv/kernel/tests/module_test/test_set6.S @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 Rivos Inc. + */ + +.text +.global test_set6 +test_set6: + lw a0, set6 + la t0, set6 +#ifdef CONFIG_32BIT + slli t0, t0, 26 + srli t0, t0, 26 +#else + slli t0, t0, 58 + srli t0, t0, 58 +#endif + sub a0, a0, t0 + ret +.data +set6: + .reloc set6, R_RISCV_SET6, set6 + .word 0 diff --git a/arch/riscv/kernel/tests/module_test/test_set8.S b/arch/riscv/kernel/tests/module_test/test_set8.S new file mode 100644 index 000000000000..a656173f6f99 --- /dev/null +++ b/arch/riscv/kernel/tests/module_test/test_set8.S @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 Rivos Inc. + */ + +.text +.global test_set8 +test_set8: + lw a0, set8 + la t0, set8 +#ifdef CONFIG_32BIT + slli t0, t0, 24 + srli t0, t0, 24 +#else + slli t0, t0, 56 + srli t0, t0, 56 +#endif + sub a0, a0, t0 + ret +.data +set8: + .reloc set8, R_RISCV_SET8, set8 + .word 0 diff --git a/arch/riscv/kernel/tests/module_test/test_sub16.S b/arch/riscv/kernel/tests/module_test/test_sub16.S new file mode 100644 index 000000000000..80f731d599ba --- /dev/null +++ b/arch/riscv/kernel/tests/module_test/test_sub16.S @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 Rivos Inc. + */ + +.text +.global test_sub16 +test_sub16: + lh a0, sub16 + addi a0, a0, -32 + ret +first: + .space 32 +second: + +.data +sub16: + .reloc sub16, R_RISCV_ADD16, second + .reloc sub16, R_RISCV_SUB16, first + .half 0 diff --git a/arch/riscv/kernel/tests/module_test/test_sub32.S b/arch/riscv/kernel/tests/module_test/test_sub32.S new file mode 100644 index 000000000000..a341686e12df --- /dev/null +++ b/arch/riscv/kernel/tests/module_test/test_sub32.S @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 Rivos Inc. + */ + +.text +.global test_sub32 +test_sub32: + lw a0, sub32 + addi a0, a0, -32 + ret +first: + .space 32 +second: + +.data +sub32: + .reloc sub32, R_RISCV_ADD32, second + .reloc sub32, R_RISCV_SUB32, first + .word 0 diff --git a/arch/riscv/kernel/tests/module_test/test_sub6.S b/arch/riscv/kernel/tests/module_test/test_sub6.S new file mode 100644 index 000000000000..e8b61c1ec527 --- /dev/null +++ b/arch/riscv/kernel/tests/module_test/test_sub6.S @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 Rivos Inc. + */ + +.text +.global test_sub6 +test_sub6: + lb a0, sub6 + addi a0, a0, -32 + ret +first: + .space 32 +second: + +.data +sub6: + .reloc sub6, R_RISCV_SET6, second + .reloc sub6, R_RISCV_SUB6, first + .byte 0 diff --git a/arch/riscv/kernel/tests/module_test/test_sub64.S b/arch/riscv/kernel/tests/module_test/test_sub64.S new file mode 100644 index 000000000000..a59e8afa88fd --- /dev/null +++ b/arch/riscv/kernel/tests/module_test/test_sub64.S @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 Rivos Inc. + */ + +.text +.global test_sub64 +test_sub64: +#ifdef CONFIG_32BIT + lw a0, sub64 +#else + ld a0, sub64 +#endif + addi a0, a0, -32 + ret +first: + .space 32 +second: + +.data +sub64: + .reloc sub64, R_RISCV_ADD64, second + .reloc sub64, R_RISCV_SUB64, first + .word 0 + .word 0 diff --git a/arch/riscv/kernel/tests/module_test/test_sub8.S b/arch/riscv/kernel/tests/module_test/test_sub8.S new file mode 100644 index 000000000000..ac5d0ec98de3 --- /dev/null +++ b/arch/riscv/kernel/tests/module_test/test_sub8.S @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 Rivos Inc. + */ + +.text +.global test_sub8 +test_sub8: + lb a0, sub8 + addi a0, a0, -32 + ret +first: + .space 32 +second: + +.data +sub8: + .reloc sub8, R_RISCV_ADD8, second + .reloc sub8, R_RISCV_SUB8, first + .byte 0 diff --git a/arch/riscv/kernel/tests/module_test/test_uleb128.S b/arch/riscv/kernel/tests/module_test/test_uleb128.S new file mode 100644 index 000000000000..90f22049d553 --- /dev/null +++ b/arch/riscv/kernel/tests/module_test/test_uleb128.S @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2023 Rivos Inc. + */ + +.text +.global test_uleb_basic +test_uleb_basic: + ld a0, second + addi a0, a0, -127 + ret + +.global test_uleb_large +test_uleb_large: + ld a0, fourth + addi a0, a0, -0x07e8 + ret + +.data +first: + .space 127 +second: + .reloc second, R_RISCV_SET_ULEB128, second + .reloc second, R_RISCV_SUB_ULEB128, first + .dword 0 +third: + .space 1000 +fourth: + .reloc fourth, R_RISCV_SET_ULEB128, fourth + .reloc fourth, R_RISCV_SUB_ULEB128, third + .dword 0 From 28ea54bade76e2d47cb8cdb96e427cfb90d3eea7 Mon Sep 17 00:00:00 2001 From: Palmer Dabbelt Date: Tue, 7 Nov 2023 07:55:29 -0800 Subject: [PATCH 40/51] RISC-V: Don't rely on positional structure initialization Without this I get a bunch of warnings along the lines of arch/riscv/kernel/module.c:535:26: error: positional initialization of field in 'struct' declared with 'designated_init' attribute [-Werror=designated-init] 535 | [R_RISCV_32] = { apply_r_riscv_32_rela }, This just mades the member initializers explicit instead of positional. I also aligned some of the table, but mostly just to make the batch editing go faster. Fixes: b51fc88cb35e ("Merge patch series "riscv: Add remaining module relocations and tests"") Reviewed-by: Charlie Jenkins Link: https://lore.kernel.org/r/20231107155529.8368-1-palmer@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/module.c | 125 +++++++++++++++++++------------------ 1 file changed, 65 insertions(+), 60 deletions(-) diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index 4b339729d5ec..56a8c78e9e21 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -532,69 +532,74 @@ static int apply_uleb128_accumulation(struct module *me, void *location, long bu * This handles static linking only. */ static const struct relocation_handlers reloc_handlers[] = { - [R_RISCV_32] = { apply_r_riscv_32_rela }, - [R_RISCV_64] = { apply_r_riscv_64_rela }, - [R_RISCV_RELATIVE] = { dynamic_linking_not_supported }, - [R_RISCV_COPY] = { dynamic_linking_not_supported }, - [R_RISCV_JUMP_SLOT] = { dynamic_linking_not_supported }, - [R_RISCV_TLS_DTPMOD32] = { dynamic_linking_not_supported }, - [R_RISCV_TLS_DTPMOD64] = { dynamic_linking_not_supported }, - [R_RISCV_TLS_DTPREL32] = { dynamic_linking_not_supported }, - [R_RISCV_TLS_DTPREL64] = { dynamic_linking_not_supported }, - [R_RISCV_TLS_TPREL32] = { dynamic_linking_not_supported }, - [R_RISCV_TLS_TPREL64] = { dynamic_linking_not_supported }, + [R_RISCV_32] = { .reloc_handler = apply_r_riscv_32_rela }, + [R_RISCV_64] = { .reloc_handler = apply_r_riscv_64_rela }, + [R_RISCV_RELATIVE] = { .reloc_handler = dynamic_linking_not_supported }, + [R_RISCV_COPY] = { .reloc_handler = dynamic_linking_not_supported }, + [R_RISCV_JUMP_SLOT] = { .reloc_handler = dynamic_linking_not_supported }, + [R_RISCV_TLS_DTPMOD32] = { .reloc_handler = dynamic_linking_not_supported }, + [R_RISCV_TLS_DTPMOD64] = { .reloc_handler = dynamic_linking_not_supported }, + [R_RISCV_TLS_DTPREL32] = { .reloc_handler = dynamic_linking_not_supported }, + [R_RISCV_TLS_DTPREL64] = { .reloc_handler = dynamic_linking_not_supported }, + [R_RISCV_TLS_TPREL32] = { .reloc_handler = dynamic_linking_not_supported }, + [R_RISCV_TLS_TPREL64] = { .reloc_handler = dynamic_linking_not_supported }, /* 12-15 undefined */ - [R_RISCV_BRANCH] = { apply_r_riscv_branch_rela }, - [R_RISCV_JAL] = { apply_r_riscv_jal_rela }, - [R_RISCV_CALL] = { apply_r_riscv_call_rela }, - [R_RISCV_CALL_PLT] = { apply_r_riscv_call_plt_rela }, - [R_RISCV_GOT_HI20] = { apply_r_riscv_got_hi20_rela }, - [R_RISCV_TLS_GOT_HI20] = { tls_not_supported }, - [R_RISCV_TLS_GD_HI20] = { tls_not_supported }, - [R_RISCV_PCREL_HI20] = { apply_r_riscv_pcrel_hi20_rela }, - [R_RISCV_PCREL_LO12_I] = { apply_r_riscv_pcrel_lo12_i_rela }, - [R_RISCV_PCREL_LO12_S] = { apply_r_riscv_pcrel_lo12_s_rela }, - [R_RISCV_HI20] = { apply_r_riscv_hi20_rela }, - [R_RISCV_LO12_I] = { apply_r_riscv_lo12_i_rela }, - [R_RISCV_LO12_S] = { apply_r_riscv_lo12_s_rela }, - [R_RISCV_TPREL_HI20] = { tls_not_supported }, - [R_RISCV_TPREL_LO12_I] = { tls_not_supported }, - [R_RISCV_TPREL_LO12_S] = { tls_not_supported }, - [R_RISCV_TPREL_ADD] = { tls_not_supported }, - [R_RISCV_ADD8] = { apply_r_riscv_add8_rela, apply_8_bit_accumulation }, - [R_RISCV_ADD16] = { apply_r_riscv_add16_rela, - apply_16_bit_accumulation }, - [R_RISCV_ADD32] = { apply_r_riscv_add32_rela, - apply_32_bit_accumulation }, - [R_RISCV_ADD64] = { apply_r_riscv_add64_rela, - apply_64_bit_accumulation }, - [R_RISCV_SUB8] = { apply_r_riscv_sub8_rela, apply_8_bit_accumulation }, - [R_RISCV_SUB16] = { apply_r_riscv_sub16_rela, - apply_16_bit_accumulation }, - [R_RISCV_SUB32] = { apply_r_riscv_sub32_rela, - apply_32_bit_accumulation }, - [R_RISCV_SUB64] = { apply_r_riscv_sub64_rela, - apply_64_bit_accumulation }, + [R_RISCV_BRANCH] = { .reloc_handler = apply_r_riscv_branch_rela }, + [R_RISCV_JAL] = { .reloc_handler = apply_r_riscv_jal_rela }, + [R_RISCV_CALL] = { .reloc_handler = apply_r_riscv_call_rela }, + [R_RISCV_CALL_PLT] = { .reloc_handler = apply_r_riscv_call_plt_rela }, + [R_RISCV_GOT_HI20] = { .reloc_handler = apply_r_riscv_got_hi20_rela }, + [R_RISCV_TLS_GOT_HI20] = { .reloc_handler = tls_not_supported }, + [R_RISCV_TLS_GD_HI20] = { .reloc_handler = tls_not_supported }, + [R_RISCV_PCREL_HI20] = { .reloc_handler = apply_r_riscv_pcrel_hi20_rela }, + [R_RISCV_PCREL_LO12_I] = { .reloc_handler = apply_r_riscv_pcrel_lo12_i_rela }, + [R_RISCV_PCREL_LO12_S] = { .reloc_handler = apply_r_riscv_pcrel_lo12_s_rela }, + [R_RISCV_HI20] = { .reloc_handler = apply_r_riscv_hi20_rela }, + [R_RISCV_LO12_I] = { .reloc_handler = apply_r_riscv_lo12_i_rela }, + [R_RISCV_LO12_S] = { .reloc_handler = apply_r_riscv_lo12_s_rela }, + [R_RISCV_TPREL_HI20] = { .reloc_handler = tls_not_supported }, + [R_RISCV_TPREL_LO12_I] = { .reloc_handler = tls_not_supported }, + [R_RISCV_TPREL_LO12_S] = { .reloc_handler = tls_not_supported }, + [R_RISCV_TPREL_ADD] = { .reloc_handler = tls_not_supported }, + [R_RISCV_ADD8] = { .reloc_handler = apply_r_riscv_add8_rela, + .accumulate_handler = apply_8_bit_accumulation }, + [R_RISCV_ADD16] = { .reloc_handler = apply_r_riscv_add16_rela, + .accumulate_handler = apply_16_bit_accumulation }, + [R_RISCV_ADD32] = { .reloc_handler = apply_r_riscv_add32_rela, + .accumulate_handler = apply_32_bit_accumulation }, + [R_RISCV_ADD64] = { .reloc_handler = apply_r_riscv_add64_rela, + .accumulate_handler = apply_64_bit_accumulation }, + [R_RISCV_SUB8] = { .reloc_handler = apply_r_riscv_sub8_rela, + .accumulate_handler = apply_8_bit_accumulation }, + [R_RISCV_SUB16] = { .reloc_handler = apply_r_riscv_sub16_rela, + .accumulate_handler = apply_16_bit_accumulation }, + [R_RISCV_SUB32] = { .reloc_handler = apply_r_riscv_sub32_rela, + .accumulate_handler = apply_32_bit_accumulation }, + [R_RISCV_SUB64] = { .reloc_handler = apply_r_riscv_sub64_rela, + .accumulate_handler = apply_64_bit_accumulation }, /* 41-42 reserved for future standard use */ - [R_RISCV_ALIGN] = { apply_r_riscv_align_rela }, - [R_RISCV_RVC_BRANCH] = { apply_r_riscv_rvc_branch_rela }, - [R_RISCV_RVC_JUMP] = { apply_r_riscv_rvc_jump_rela }, + [R_RISCV_ALIGN] = { .reloc_handler = apply_r_riscv_align_rela }, + [R_RISCV_RVC_BRANCH] = { .reloc_handler = apply_r_riscv_rvc_branch_rela }, + [R_RISCV_RVC_JUMP] = { .reloc_handler = apply_r_riscv_rvc_jump_rela }, /* 46-50 reserved for future standard use */ - [R_RISCV_RELAX] = { apply_r_riscv_relax_rela }, - [R_RISCV_SUB6] = { apply_r_riscv_sub6_rela, apply_6_bit_accumulation }, - [R_RISCV_SET6] = { apply_r_riscv_set6_rela, apply_6_bit_accumulation }, - [R_RISCV_SET8] = { apply_r_riscv_set8_rela, apply_8_bit_accumulation }, - [R_RISCV_SET16] = { apply_r_riscv_set16_rela, - apply_16_bit_accumulation }, - [R_RISCV_SET32] = { apply_r_riscv_set32_rela, - apply_32_bit_accumulation }, - [R_RISCV_32_PCREL] = { apply_r_riscv_32_pcrel_rela }, - [R_RISCV_IRELATIVE] = { dynamic_linking_not_supported }, - [R_RISCV_PLT32] = { apply_r_riscv_plt32_rela }, - [R_RISCV_SET_ULEB128] = { apply_r_riscv_set_uleb128, - apply_uleb128_accumulation }, - [R_RISCV_SUB_ULEB128] = { apply_r_riscv_sub_uleb128, - apply_uleb128_accumulation }, + [R_RISCV_RELAX] = { .reloc_handler = apply_r_riscv_relax_rela }, + [R_RISCV_SUB6] = { .reloc_handler = apply_r_riscv_sub6_rela, + .accumulate_handler = apply_6_bit_accumulation }, + [R_RISCV_SET6] = { .reloc_handler = apply_r_riscv_set6_rela, + .accumulate_handler = apply_6_bit_accumulation }, + [R_RISCV_SET8] = { .reloc_handler = apply_r_riscv_set8_rela, + .accumulate_handler = apply_8_bit_accumulation }, + [R_RISCV_SET16] = { .reloc_handler = apply_r_riscv_set16_rela, + .accumulate_handler = apply_16_bit_accumulation }, + [R_RISCV_SET32] = { .reloc_handler = apply_r_riscv_set32_rela, + .accumulate_handler = apply_32_bit_accumulation }, + [R_RISCV_32_PCREL] = { .reloc_handler = apply_r_riscv_32_pcrel_rela }, + [R_RISCV_IRELATIVE] = { .reloc_handler = dynamic_linking_not_supported }, + [R_RISCV_PLT32] = { .reloc_handler = apply_r_riscv_plt32_rela }, + [R_RISCV_SET_ULEB128] = { .reloc_handler = apply_r_riscv_set_uleb128, + .accumulate_handler = apply_uleb128_accumulation }, + [R_RISCV_SUB_ULEB128] = { .reloc_handler = apply_r_riscv_sub_uleb128, + .accumulate_handler = apply_uleb128_accumulation }, /* 62-191 reserved for future standard use */ /* 192-255 nonstandard ABI extensions */ }; From d3d2cf1acab1857ae1982d431be9d96dc0e0775c Mon Sep 17 00:00:00 2001 From: Evan Green Date: Mon, 6 Nov 2023 15:24:39 -0800 Subject: [PATCH 41/51] RISC-V: Show accurate per-hart isa in /proc/cpuinfo In /proc/cpuinfo, most of the information we show for each processor is specific to that hart: marchid, mvendorid, mimpid, processor, hart, compatible, and the mmu size. But the ISA string gets filtered through a lowest common denominator mask, so that if one CPU is missing an ISA extension, no CPUs will show it. Now that we track the ISA extensions for each hart, let's report ISA extension info accurately per-hart in /proc/cpuinfo. We cannot change the "isa:" line, as usermode may be relying on that line to show only the common set of extensions supported across all harts. Add a new "hart isa" line instead, which reports the true set of extensions for that hart. Signed-off-by: Evan Green Reviewed-by: Andrew Jones Reviewed-by: Conor Dooley Link: https://lore.kernel.org/r/20231106232439.3176268-1-evan@rivosinc.com Signed-off-by: Palmer Dabbelt --- Documentation/riscv/uabi.rst | 20 ++++++++++++++++++++ arch/riscv/kernel/cpu.c | 22 ++++++++++++++++++---- 2 files changed, 38 insertions(+), 4 deletions(-) diff --git a/Documentation/riscv/uabi.rst b/Documentation/riscv/uabi.rst index 8960fac42c40..54d199dce78b 100644 --- a/Documentation/riscv/uabi.rst +++ b/Documentation/riscv/uabi.rst @@ -42,6 +42,26 @@ An example string following the order is:: rv64imadc_zifoo_zigoo_zafoo_sbar_scar_zxmbaz_xqux_xrux +"isa" and "hart isa" lines in /proc/cpuinfo +------------------------------------------- + +The "isa" line in /proc/cpuinfo describes the lowest common denominator of +RISC-V ISA extensions recognized by the kernel and implemented on all harts. The +"hart isa" line, in contrast, describes the set of extensions recognized by the +kernel on the particular hart being described, even if those extensions may not +be present on all harts in the system. + +In both lines, the presence of an extension guarantees only that the hardware +has the described capability. Additional kernel support or policy changes may be +required before an extension's capability is fully usable by userspace programs. +Similarly, for S-mode extensions, presence in one of these lines does not +guarantee that the kernel is taking advantage of the extension, or that the +feature will be visible in guest VMs managed by this kernel. + +Inversely, the absence of an extension in these lines does not necessarily mean +the hardware does not support that feature. The running kernel may not recognize +the extension, or may have deliberately removed it from the listing. + Misaligned accesses ------------------- diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index c17dacb1141c..bcfc0c8dfd01 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -202,9 +202,8 @@ arch_initcall(riscv_cpuinfo_init); #ifdef CONFIG_PROC_FS -static void print_isa(struct seq_file *f) +static void print_isa(struct seq_file *f, const unsigned long *isa_bitmap) { - seq_puts(f, "isa\t\t: "); if (IS_ENABLED(CONFIG_32BIT)) seq_write(f, "rv32", 4); @@ -212,7 +211,7 @@ static void print_isa(struct seq_file *f) seq_write(f, "rv64", 4); for (int i = 0; i < riscv_isa_ext_count; i++) { - if (!__riscv_isa_extension_available(NULL, riscv_isa_ext[i].id)) + if (!__riscv_isa_extension_available(isa_bitmap, riscv_isa_ext[i].id)) continue; /* Only multi-letter extensions are split by underscores */ @@ -276,7 +275,15 @@ static int c_show(struct seq_file *m, void *v) seq_printf(m, "processor\t: %lu\n", cpu_id); seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id)); - print_isa(m); + + /* + * For historical raisins, the isa: line is limited to the lowest common + * denominator of extensions supported across all harts. A true list of + * extensions supported on this hart is printed later in the hart isa: + * line. + */ + seq_puts(m, "isa\t\t: "); + print_isa(m, NULL); print_mmu(m); if (acpi_disabled) { @@ -292,6 +299,13 @@ static int c_show(struct seq_file *m, void *v) seq_printf(m, "mvendorid\t: 0x%lx\n", ci->mvendorid); seq_printf(m, "marchid\t\t: 0x%lx\n", ci->marchid); seq_printf(m, "mimpid\t\t: 0x%lx\n", ci->mimpid); + + /* + * Print the ISA extensions specific to this hart, which may show + * additional extensions not present across all harts. + */ + seq_puts(m, "hart isa\t: "); + print_isa(m, hart_isa[cpu_id].isa); seq_puts(m, "\n"); return 0; From 6eb7a6445b76a2fced91ebcf93d7a9073258c8cf Mon Sep 17 00:00:00 2001 From: Evan Green Date: Mon, 6 Nov 2023 15:11:05 -0800 Subject: [PATCH 42/51] RISC-V: Remove __init on unaligned_emulation_finish() This function shouldn't be __init, since it's called during hotplug. The warning says it well enough: WARNING: modpost: vmlinux: section mismatch in reference: check_unaligned_access_all_cpus+0x13a (section: .text) -> unaligned_emulation_finish (section: .init.text) Signed-off-by: Evan Green Fixes: 71c54b3d169d ("riscv: report misaligned accesses emulation to hwprobe") Link: https://lore.kernel.org/r/20231106231105.3141413-1-evan@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/traps_misaligned.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index bba301b5194d..5eba37147caa 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -625,7 +625,7 @@ bool check_unaligned_access_emulated(int cpu) return misaligned_emu_detected; } -void __init unaligned_emulation_finish(void) +void unaligned_emulation_finish(void) { int cpu; From 55e0bf49a0d0387d682d696e41cada071f516075 Mon Sep 17 00:00:00 2001 From: Evan Green Date: Mon, 6 Nov 2023 14:58:55 -0800 Subject: [PATCH 43/51] RISC-V: Probe misaligned access speed in parallel Probing for misaligned access speed takes about 0.06 seconds. On a system with 64 cores, doing this in smp_callin() means it's done serially, extending boot time by 3.8 seconds. That's a lot of boot time. Instead of measuring each CPU serially, let's do the measurements on all CPUs in parallel. If we disable preemption on all CPUs, the jiffies stop ticking, so we can do this in stages of 1) everybody except core 0, then 2) core 0. The allocations are all done outside of on_each_cpu() to avoid calling alloc_pages() with interrupts disabled. For hotplugged CPUs that come in after the boot time measurement, register CPU hotplug callbacks, and do the measurement there. Interrupts are enabled in those callbacks, so they're fine to do alloc_pages() in. Reported-by: Jisheng Zhang Closes: https://lore.kernel.org/all/mhng-9359993d-6872-4134-83ce-c97debe1cf9a@palmer-ri-x1c9/T/#mae9b8f40016f9df428829d33360144dc5026bcbf Fixes: 584ea6564bca ("RISC-V: Probe for unaligned access speed") Signed-off-by: Evan Green Link: https://lore.kernel.org/r/20231106225855.3121724-1-evan@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/cpufeature.h | 1 - arch/riscv/kernel/cpufeature.c | 106 +++++++++++++++++++++------- arch/riscv/kernel/smpboot.c | 1 - 3 files changed, 82 insertions(+), 26 deletions(-) diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index 7f1e46a9d445..69f2cae96f0b 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -30,7 +30,6 @@ DECLARE_PER_CPU(long, misaligned_access_speed); /* Per-cpu ISA extensions. */ extern struct riscv_isainfo hart_isa[NR_CPUS]; -void check_unaligned_access(int cpu); void riscv_user_isa_enable(void); #ifdef CONFIG_RISCV_MISALIGNED diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 6a01ded615cd..fe59e18dbd5b 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -29,6 +30,7 @@ #define MISALIGNED_ACCESS_JIFFIES_LG2 1 #define MISALIGNED_BUFFER_SIZE 0x4000 +#define MISALIGNED_BUFFER_ORDER get_order(MISALIGNED_BUFFER_SIZE) #define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80) unsigned long elf_hwcap __read_mostly; @@ -557,30 +559,21 @@ unsigned long riscv_get_elf_hwcap(void) return hwcap; } -void check_unaligned_access(int cpu) +static int check_unaligned_access(void *param) { + int cpu = smp_processor_id(); u64 start_cycles, end_cycles; u64 word_cycles; u64 byte_cycles; int ratio; unsigned long start_jiffies, now; - struct page *page; + struct page *page = param; void *dst; void *src; long speed = RISCV_HWPROBE_MISALIGNED_SLOW; if (check_unaligned_access_emulated(cpu)) - return; - - /* We are already set since the last check */ - if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN) - return; - - page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE)); - if (!page) { - pr_warn("Can't alloc pages to measure memcpy performance"); - return; - } + return 0; /* Make an unaligned destination buffer. */ dst = (void *)((unsigned long)page_address(page) | 0x1); @@ -634,7 +627,7 @@ void check_unaligned_access(int cpu) pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n", cpu); - goto out; + return 0; } if (word_cycles < byte_cycles) @@ -648,19 +641,84 @@ void check_unaligned_access(int cpu) (speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow"); per_cpu(misaligned_access_speed, cpu) = speed; - -out: - __free_pages(page, get_order(MISALIGNED_BUFFER_SIZE)); -} - -static int __init check_unaligned_access_boot_cpu(void) -{ - check_unaligned_access(0); - unaligned_emulation_finish(); return 0; } -arch_initcall(check_unaligned_access_boot_cpu); +static void check_unaligned_access_nonboot_cpu(void *param) +{ + unsigned int cpu = smp_processor_id(); + struct page **pages = param; + + if (smp_processor_id() != 0) + check_unaligned_access(pages[cpu]); +} + +static int riscv_online_cpu(unsigned int cpu) +{ + static struct page *buf; + + /* We are already set since the last check */ + if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN) + return 0; + + buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER); + if (!buf) { + pr_warn("Allocation failure, not measuring misaligned performance\n"); + return -ENOMEM; + } + + check_unaligned_access(buf); + __free_pages(buf, MISALIGNED_BUFFER_ORDER); + return 0; +} + +/* Measure unaligned access on all CPUs present at boot in parallel. */ +static int check_unaligned_access_all_cpus(void) +{ + unsigned int cpu; + unsigned int cpu_count = num_possible_cpus(); + struct page **bufs = kzalloc(cpu_count * sizeof(struct page *), + GFP_KERNEL); + + if (!bufs) { + pr_warn("Allocation failure, not measuring misaligned performance\n"); + return 0; + } + + /* + * Allocate separate buffers for each CPU so there's no fighting over + * cache lines. + */ + for_each_cpu(cpu, cpu_online_mask) { + bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER); + if (!bufs[cpu]) { + pr_warn("Allocation failure, not measuring misaligned performance\n"); + goto out; + } + } + + /* Check everybody except 0, who stays behind to tend jiffies. */ + on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1); + + /* Check core 0. */ + smp_call_on_cpu(0, check_unaligned_access, bufs[0], true); + + /* Setup hotplug callback for any new CPUs that come online. */ + cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online", + riscv_online_cpu, NULL); + +out: + unaligned_emulation_finish(); + for_each_cpu(cpu, cpu_online_mask) { + if (bufs[cpu]) + __free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER); + } + + kfree(bufs); + return 0; +} + +arch_initcall(check_unaligned_access_all_cpus); void riscv_user_isa_enable(void) { diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index d69c628c24f4..d162bf339beb 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -247,7 +247,6 @@ asmlinkage __visible void smp_callin(void) riscv_ipi_enable(); numa_add_cpu(curr_cpuid); - check_unaligned_access(curr_cpuid); set_cpu_online(curr_cpuid, 1); if (has_vector()) { From 629db01c64ff6cea08fc61b52426362689ef8618 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Wed, 8 Nov 2023 08:59:29 +0100 Subject: [PATCH 44/51] riscv: Don't use PGD entries for the linear mapping Propagating changes at this level is cumbersome as we need to go through all the page tables when that happens (either when changing the permissions or when splitting the mapping). Note that this prevents the use of 4MB mapping for sv32 and 1GB mapping for sv39 in the linear mapping. Signed-off-by: Alexandre Ghiti Link: https://lore.kernel.org/r/20231108075930.7157-2-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/init.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 0798bd861dcb..6dc61d3c392f 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -664,16 +664,16 @@ void __init create_pgd_mapping(pgd_t *pgdp, static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va, phys_addr_t size) { - if (!(pa & (PGDIR_SIZE - 1)) && !(va & (PGDIR_SIZE - 1)) && size >= PGDIR_SIZE) - return PGDIR_SIZE; - - if (!(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE) + if (pgtable_l5_enabled && + !(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE) return P4D_SIZE; - if (!(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE) + if (pgtable_l4_enabled && + !(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE) return PUD_SIZE; - if (!(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE) + if (IS_ENABLED(CONFIG_64BIT) && + !(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE) return PMD_SIZE; return PAGE_SIZE; From 311cd2f6e25380cff0abc2884dc6a3d33bc9b5c3 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Wed, 8 Nov 2023 08:59:30 +0100 Subject: [PATCH 45/51] riscv: Fix set_memory_XX() and set_direct_map_XX() by splitting huge linear mappings When STRICT_KERNEL_RWX is set, any change of permissions on any kernel mapping (vmalloc/modules/kernel text...etc) should be applied on its linear mapping alias. The problem is that the riscv kernel uses huge mappings for the linear mapping and walk_page_range_novma() does not split those huge mappings. So this patchset implements such split in order to apply fine-grained permissions on the linear mapping. Below is the difference before and after (the first PUD mapping is split into PTE/PMD mappings): Before: ---[ Linear mapping ]--- 0xffffaf8000080000-0xffffaf8000200000 0x0000000080080000 1536K PTE D A G . . W R V 0xffffaf8000200000-0xffffaf8077c00000 0x0000000080200000 1914M PMD D A G . . W R V 0xffffaf8077c00000-0xffffaf8078800000 0x00000000f7c00000 12M PMD D A G . . . R V 0xffffaf8078800000-0xffffaf8078c00000 0x00000000f8800000 4M PMD D A G . . W R V 0xffffaf8078c00000-0xffffaf8079200000 0x00000000f8c00000 6M PMD D A G . . . R V 0xffffaf8079200000-0xffffaf807e600000 0x00000000f9200000 84M PMD D A G . . W R V 0xffffaf807e600000-0xffffaf807e716000 0x00000000fe600000 1112K PTE D A G . . W R V 0xffffaf807e717000-0xffffaf807e71a000 0x00000000fe717000 12K PTE D A G . . W R V 0xffffaf807e71d000-0xffffaf807e71e000 0x00000000fe71d000 4K PTE D A G . . W R V 0xffffaf807e722000-0xffffaf807e800000 0x00000000fe722000 888K PTE D A G . . W R V 0xffffaf807e800000-0xffffaf807fe00000 0x00000000fe800000 22M PMD D A G . . W R V 0xffffaf807fe00000-0xffffaf807ff54000 0x00000000ffe00000 1360K PTE D A G . . W R V 0xffffaf807ff55000-0xffffaf8080000000 0x00000000fff55000 684K PTE D A G . . W R V 0xffffaf8080000000-0xffffaf8400000000 0x0000000100000000 14G PUD D A G . . W R V After: ---[ Linear mapping ]--- 0xffffaf8000080000-0xffffaf8000200000 0x0000000080080000 1536K PTE D A G . . W R V 0xffffaf8000200000-0xffffaf8077c00000 0x0000000080200000 1914M PMD D A G . . W R V 0xffffaf8077c00000-0xffffaf8078800000 0x00000000f7c00000 12M PMD D A G . . . R V 0xffffaf8078800000-0xffffaf8078a00000 0x00000000f8800000 2M PMD D A G . . W R V 0xffffaf8078a00000-0xffffaf8078c00000 0x00000000f8a00000 2M PTE D A G . . W R V 0xffffaf8078c00000-0xffffaf8079200000 0x00000000f8c00000 6M PMD D A G . . . R V 0xffffaf8079200000-0xffffaf807e600000 0x00000000f9200000 84M PMD D A G . . W R V 0xffffaf807e600000-0xffffaf807e716000 0x00000000fe600000 1112K PTE D A G . . W R V 0xffffaf807e717000-0xffffaf807e71a000 0x00000000fe717000 12K PTE D A G . . W R V 0xffffaf807e71d000-0xffffaf807e71e000 0x00000000fe71d000 4K PTE D A G . . W R V 0xffffaf807e722000-0xffffaf807e800000 0x00000000fe722000 888K PTE D A G . . W R V 0xffffaf807e800000-0xffffaf807fe00000 0x00000000fe800000 22M PMD D A G . . W R V 0xffffaf807fe00000-0xffffaf807ff54000 0x00000000ffe00000 1360K PTE D A G . . W R V 0xffffaf807ff55000-0xffffaf8080000000 0x00000000fff55000 684K PTE D A G . . W R V 0xffffaf8080000000-0xffffaf8080800000 0x0000000100000000 8M PMD D A G . . W R V 0xffffaf8080800000-0xffffaf8080af6000 0x0000000100800000 3032K PTE D A G . . W R V 0xffffaf8080af6000-0xffffaf8080af8000 0x0000000100af6000 8K PTE D A G . X . R V 0xffffaf8080af8000-0xffffaf8080c00000 0x0000000100af8000 1056K PTE D A G . . W R V 0xffffaf8080c00000-0xffffaf8081a00000 0x0000000100c00000 14M PMD D A G . . W R V 0xffffaf8081a00000-0xffffaf8081a40000 0x0000000101a00000 256K PTE D A G . . W R V 0xffffaf8081a40000-0xffffaf8081a44000 0x0000000101a40000 16K PTE D A G . X . R V 0xffffaf8081a44000-0xffffaf8081a52000 0x0000000101a44000 56K PTE D A G . . W R V 0xffffaf8081a52000-0xffffaf8081a54000 0x0000000101a52000 8K PTE D A G . X . R V ... 0xffffaf809e800000-0xffffaf80c0000000 0x000000011e800000 536M PMD D A G . . W R V 0xffffaf80c0000000-0xffffaf8400000000 0x0000000140000000 13G PUD D A G . . W R V Note that this also fixes memfd_secret() syscall which uses set_direct_map_invalid_noflush() and set_direct_map_default_noflush() to remove the pages from the linear mapping. Below is the kernel page table while a memfd_secret() syscall is running, you can see all the !valid page table entries in the linear mapping: ... 0xffffaf8082240000-0xffffaf8082241000 0x0000000102240000 4K PTE D A G . . W R . 0xffffaf8082241000-0xffffaf8082250000 0x0000000102241000 60K PTE D A G . . W R V 0xffffaf8082250000-0xffffaf8082252000 0x0000000102250000 8K PTE D A G . . W R . 0xffffaf8082252000-0xffffaf8082256000 0x0000000102252000 16K PTE D A G . . W R V 0xffffaf8082256000-0xffffaf8082257000 0x0000000102256000 4K PTE D A G . . W R . 0xffffaf8082257000-0xffffaf8082258000 0x0000000102257000 4K PTE D A G . . W R V 0xffffaf8082258000-0xffffaf8082259000 0x0000000102258000 4K PTE D A G . . W R . 0xffffaf8082259000-0xffffaf808225a000 0x0000000102259000 4K PTE D A G . . W R V 0xffffaf808225a000-0xffffaf808225c000 0x000000010225a000 8K PTE D A G . . W R . 0xffffaf808225c000-0xffffaf8082266000 0x000000010225c000 40K PTE D A G . . W R V 0xffffaf8082266000-0xffffaf8082268000 0x0000000102266000 8K PTE D A G . . W R . 0xffffaf8082268000-0xffffaf8082284000 0x0000000102268000 112K PTE D A G . . W R V 0xffffaf8082284000-0xffffaf8082288000 0x0000000102284000 16K PTE D A G . . W R . 0xffffaf8082288000-0xffffaf808229c000 0x0000000102288000 80K PTE D A G . . W R V 0xffffaf808229c000-0xffffaf80822a0000 0x000000010229c000 16K PTE D A G . . W R . 0xffffaf80822a0000-0xffffaf80822a5000 0x00000001022a0000 20K PTE D A G . . W R V 0xffffaf80822a5000-0xffffaf80822a6000 0x00000001022a5000 4K PTE D A G . . . R V 0xffffaf80822a6000-0xffffaf80822ab000 0x00000001022a6000 20K PTE D A G . . W R V ... And when the memfd_secret() fd is released, the linear mapping is correctly reset: ... 0xffffaf8082240000-0xffffaf80822a5000 0x0000000102240000 404K PTE D A G . . W R V 0xffffaf80822a5000-0xffffaf80822a6000 0x00000001022a5000 4K PTE D A G . . . R V 0xffffaf80822a6000-0xffffaf80822af000 0x00000001022a6000 36K PTE D A G . . W R V ... Signed-off-by: Alexandre Ghiti Link: https://lore.kernel.org/r/20231108075930.7157-3-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- arch/riscv/mm/pageattr.c | 270 +++++++++++++++++++++++++++++++++------ 1 file changed, 230 insertions(+), 40 deletions(-) diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c index 161d0b34c2cb..fc5fc4f785c4 100644 --- a/arch/riscv/mm/pageattr.c +++ b/arch/riscv/mm/pageattr.c @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -25,19 +26,6 @@ static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk) return new_val; } -static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr, - unsigned long next, struct mm_walk *walk) -{ - pgd_t val = READ_ONCE(*pgd); - - if (pgd_leaf(val)) { - val = __pgd(set_pageattr_masks(pgd_val(val), walk)); - set_pgd(pgd, val); - } - - return 0; -} - static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr, unsigned long next, struct mm_walk *walk) { @@ -96,7 +84,6 @@ static int pageattr_pte_hole(unsigned long addr, unsigned long next, } static const struct mm_walk_ops pageattr_ops = { - .pgd_entry = pageattr_pgd_entry, .p4d_entry = pageattr_p4d_entry, .pud_entry = pageattr_pud_entry, .pmd_entry = pageattr_pmd_entry, @@ -105,12 +92,181 @@ static const struct mm_walk_ops pageattr_ops = { .walk_lock = PGWALK_RDLOCK, }; +#ifdef CONFIG_64BIT +static int __split_linear_mapping_pmd(pud_t *pudp, + unsigned long vaddr, unsigned long end) +{ + pmd_t *pmdp; + unsigned long next; + + pmdp = pmd_offset(pudp, vaddr); + + do { + next = pmd_addr_end(vaddr, end); + + if (next - vaddr >= PMD_SIZE && + vaddr <= (vaddr & PMD_MASK) && end >= next) + continue; + + if (pmd_leaf(*pmdp)) { + struct page *pte_page; + unsigned long pfn = _pmd_pfn(*pmdp); + pgprot_t prot = __pgprot(pmd_val(*pmdp) & ~_PAGE_PFN_MASK); + pte_t *ptep_new; + int i; + + pte_page = alloc_page(GFP_KERNEL); + if (!pte_page) + return -ENOMEM; + + ptep_new = (pte_t *)page_address(pte_page); + for (i = 0; i < PTRS_PER_PTE; ++i, ++ptep_new) + set_pte(ptep_new, pfn_pte(pfn + i, prot)); + + smp_wmb(); + + set_pmd(pmdp, pfn_pmd(page_to_pfn(pte_page), PAGE_TABLE)); + } + } while (pmdp++, vaddr = next, vaddr != end); + + return 0; +} + +static int __split_linear_mapping_pud(p4d_t *p4dp, + unsigned long vaddr, unsigned long end) +{ + pud_t *pudp; + unsigned long next; + int ret; + + pudp = pud_offset(p4dp, vaddr); + + do { + next = pud_addr_end(vaddr, end); + + if (next - vaddr >= PUD_SIZE && + vaddr <= (vaddr & PUD_MASK) && end >= next) + continue; + + if (pud_leaf(*pudp)) { + struct page *pmd_page; + unsigned long pfn = _pud_pfn(*pudp); + pgprot_t prot = __pgprot(pud_val(*pudp) & ~_PAGE_PFN_MASK); + pmd_t *pmdp_new; + int i; + + pmd_page = alloc_page(GFP_KERNEL); + if (!pmd_page) + return -ENOMEM; + + pmdp_new = (pmd_t *)page_address(pmd_page); + for (i = 0; i < PTRS_PER_PMD; ++i, ++pmdp_new) + set_pmd(pmdp_new, + pfn_pmd(pfn + ((i * PMD_SIZE) >> PAGE_SHIFT), prot)); + + smp_wmb(); + + set_pud(pudp, pfn_pud(page_to_pfn(pmd_page), PAGE_TABLE)); + } + + ret = __split_linear_mapping_pmd(pudp, vaddr, next); + if (ret) + return ret; + } while (pudp++, vaddr = next, vaddr != end); + + return 0; +} + +static int __split_linear_mapping_p4d(pgd_t *pgdp, + unsigned long vaddr, unsigned long end) +{ + p4d_t *p4dp; + unsigned long next; + int ret; + + p4dp = p4d_offset(pgdp, vaddr); + + do { + next = p4d_addr_end(vaddr, end); + + /* + * If [vaddr; end] contains [vaddr & P4D_MASK; next], we don't + * need to split, we'll change the protections on the whole P4D. + */ + if (next - vaddr >= P4D_SIZE && + vaddr <= (vaddr & P4D_MASK) && end >= next) + continue; + + if (p4d_leaf(*p4dp)) { + struct page *pud_page; + unsigned long pfn = _p4d_pfn(*p4dp); + pgprot_t prot = __pgprot(p4d_val(*p4dp) & ~_PAGE_PFN_MASK); + pud_t *pudp_new; + int i; + + pud_page = alloc_page(GFP_KERNEL); + if (!pud_page) + return -ENOMEM; + + /* + * Fill the pud level with leaf puds that have the same + * protections as the leaf p4d. + */ + pudp_new = (pud_t *)page_address(pud_page); + for (i = 0; i < PTRS_PER_PUD; ++i, ++pudp_new) + set_pud(pudp_new, + pfn_pud(pfn + ((i * PUD_SIZE) >> PAGE_SHIFT), prot)); + + /* + * Make sure the pud filling is not reordered with the + * p4d store which could result in seeing a partially + * filled pud level. + */ + smp_wmb(); + + set_p4d(p4dp, pfn_p4d(page_to_pfn(pud_page), PAGE_TABLE)); + } + + ret = __split_linear_mapping_pud(p4dp, vaddr, next); + if (ret) + return ret; + } while (p4dp++, vaddr = next, vaddr != end); + + return 0; +} + +static int __split_linear_mapping_pgd(pgd_t *pgdp, + unsigned long vaddr, + unsigned long end) +{ + unsigned long next; + int ret; + + do { + next = pgd_addr_end(vaddr, end); + /* We never use PGD mappings for the linear mapping */ + ret = __split_linear_mapping_p4d(pgdp, vaddr, next); + if (ret) + return ret; + } while (pgdp++, vaddr = next, vaddr != end); + + return 0; +} + +static int split_linear_mapping(unsigned long start, unsigned long end) +{ + return __split_linear_mapping_pgd(pgd_offset_k(start), start, end); +} +#endif /* CONFIG_64BIT */ + static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, pgprot_t clear_mask) { int ret; unsigned long start = addr; unsigned long end = start + PAGE_SIZE * numpages; + unsigned long __maybe_unused lm_start; + unsigned long __maybe_unused lm_end; struct pageattr_masks masks = { .set_mask = set_mask, .clear_mask = clear_mask @@ -120,11 +276,67 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask, return 0; mmap_write_lock(&init_mm); + +#ifdef CONFIG_64BIT + /* + * We are about to change the permissions of a kernel mapping, we must + * apply the same changes to its linear mapping alias, which may imply + * splitting a huge mapping. + */ + + if (is_vmalloc_or_module_addr((void *)start)) { + struct vm_struct *area = NULL; + int i, page_start; + + area = find_vm_area((void *)start); + page_start = (start - (unsigned long)area->addr) >> PAGE_SHIFT; + + for (i = page_start; i < page_start + numpages; ++i) { + lm_start = (unsigned long)page_address(area->pages[i]); + lm_end = lm_start + PAGE_SIZE; + + ret = split_linear_mapping(lm_start, lm_end); + if (ret) + goto unlock; + + ret = walk_page_range_novma(&init_mm, lm_start, lm_end, + &pageattr_ops, NULL, &masks); + if (ret) + goto unlock; + } + } else if (is_kernel_mapping(start) || is_linear_mapping(start)) { + lm_start = (unsigned long)lm_alias(start); + lm_end = (unsigned long)lm_alias(end); + + ret = split_linear_mapping(lm_start, lm_end); + if (ret) + goto unlock; + + ret = walk_page_range_novma(&init_mm, lm_start, lm_end, + &pageattr_ops, NULL, &masks); + if (ret) + goto unlock; + } + ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, &masks); + +unlock: + mmap_write_unlock(&init_mm); + + /* + * We can't use flush_tlb_kernel_range() here as we may have split a + * hugepage that is larger than that, so let's flush everything. + */ + flush_tlb_all(); +#else + ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL, + &masks); + mmap_write_unlock(&init_mm); flush_tlb_kernel_range(start, end); +#endif return ret; } @@ -159,36 +371,14 @@ int set_memory_nx(unsigned long addr, int numpages) int set_direct_map_invalid_noflush(struct page *page) { - int ret; - unsigned long start = (unsigned long)page_address(page); - unsigned long end = start + PAGE_SIZE; - struct pageattr_masks masks = { - .set_mask = __pgprot(0), - .clear_mask = __pgprot(_PAGE_PRESENT) - }; - - mmap_read_lock(&init_mm); - ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks); - mmap_read_unlock(&init_mm); - - return ret; + return __set_memory((unsigned long)page_address(page), 1, + __pgprot(0), __pgprot(_PAGE_PRESENT)); } int set_direct_map_default_noflush(struct page *page) { - int ret; - unsigned long start = (unsigned long)page_address(page); - unsigned long end = start + PAGE_SIZE; - struct pageattr_masks masks = { - .set_mask = PAGE_KERNEL, - .clear_mask = __pgprot(0) - }; - - mmap_read_lock(&init_mm); - ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks); - mmap_read_unlock(&init_mm); - - return ret; + return __set_memory((unsigned long)page_address(page), 1, + PAGE_KERNEL, __pgprot(0)); } #ifdef CONFIG_DEBUG_PAGEALLOC From c4676f8dc1e12e68d6511f9ed89707fdad4c962c Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Fri, 27 Oct 2023 21:12:53 +0530 Subject: [PATCH 46/51] RISC-V: Don't fail in riscv_of_parent_hartid() for disabled HARTs The riscv_of_processor_hartid() used by riscv_of_parent_hartid() fails for HARTs disabled in the DT. This results in the following warning thrown by the RISC-V INTC driver for the E-core on SiFive boards: [ 0.000000] riscv-intc: unable to find hart id for /cpus/cpu@0/interrupt-controller The riscv_of_parent_hartid() is only expected to read the hartid from the DT so we directly call of_get_cpu_hwid() instead of calling riscv_of_processor_hartid(). Fixes: ad635e723e17 ("riscv: cpu: Add 64bit hartid support on RV64") Signed-off-by: Anup Patel Reviewed-by: Atish Patra Link: https://lore.kernel.org/r/20231027154254.355853-2-apatel@ventanamicro.com Signed-off-by: Palmer Dabbelt --- arch/riscv/kernel/cpu.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c index c17dacb1141c..157ace8b262c 100644 --- a/arch/riscv/kernel/cpu.c +++ b/arch/riscv/kernel/cpu.c @@ -125,13 +125,14 @@ old_interface: */ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid) { - int rc; - for (; node; node = node->parent) { if (of_device_is_compatible(node, "riscv")) { - rc = riscv_of_processor_hartid(node, hartid); - if (!rc) - return 0; + *hartid = (unsigned long)of_get_cpu_hwid(node, 0); + if (*hartid == ~0UL) { + pr_warn("Found CPU without hart ID\n"); + return -ENODEV; + } + return 0; } } From c5e4ce9db635fef5ebffdfba1e7d80710474b176 Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Fri, 27 Oct 2023 21:12:54 +0530 Subject: [PATCH 47/51] of: property: Add fw_devlink support for msi-parent This allows fw_devlink to create device links between consumers of a MSI and the supplier of the MSI. Signed-off-by: Anup Patel Acked-by: Rob Herring Reviewed-by: Saravana Kannan Link: https://lore.kernel.org/r/20231027154254.355853-3-apatel@ventanamicro.com Signed-off-by: Palmer Dabbelt --- drivers/of/property.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/of/property.c b/drivers/of/property.c index cf8dacf3e3b8..afdaefbd03f6 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -1267,6 +1267,7 @@ DEFINE_SIMPLE_PROP(resets, "resets", "#reset-cells") DEFINE_SIMPLE_PROP(leds, "leds", NULL) DEFINE_SIMPLE_PROP(backlight, "backlight", NULL) DEFINE_SIMPLE_PROP(panel, "panel", NULL) +DEFINE_SIMPLE_PROP(msi_parent, "msi-parent", "#msi-cells") DEFINE_SUFFIX_PROP(regulators, "-supply", NULL) DEFINE_SUFFIX_PROP(gpio, "-gpio", "#gpio-cells") @@ -1356,6 +1357,7 @@ static const struct supplier_bindings of_supplier_bindings[] = { { .parse_prop = parse_leds, }, { .parse_prop = parse_backlight, }, { .parse_prop = parse_panel, }, + { .parse_prop = parse_msi_parent, }, { .parse_prop = parse_gpio_compat, }, { .parse_prop = parse_interrupts, }, { .parse_prop = parse_regulators, }, From c6e316ac05532febb0c966fa9b55f5258ed037be Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Thu, 9 Nov 2023 09:21:28 +0100 Subject: [PATCH 48/51] drivers: perf: Check find_first_bit() return value We must check the return value of find_first_bit() before using the return value as an index array since it happens to overflow the array and then panic: [ 107.318430] Kernel BUG [#1] [ 107.319434] CPU: 3 PID: 1238 Comm: kill Tainted: G E 6.6.0-rc6ubuntu-defconfig #2 [ 107.319465] Hardware name: riscv-virtio,qemu (DT) [ 107.319551] epc : pmu_sbi_ovf_handler+0x3a4/0x3ae [ 107.319840] ra : pmu_sbi_ovf_handler+0x52/0x3ae [ 107.319868] epc : ffffffff80a0a77c ra : ffffffff80a0a42a sp : ffffaf83fecda350 [ 107.319884] gp : ffffffff823961a8 tp : ffffaf8083db1dc0 t0 : ffffaf83fecda480 [ 107.319899] t1 : ffffffff80cafe62 t2 : 000000000000ff00 s0 : ffffaf83fecda520 [ 107.319921] s1 : ffffaf83fecda380 a0 : 00000018fca29df0 a1 : ffffffffffffffff [ 107.319936] a2 : 0000000001073734 a3 : 0000000000000004 a4 : 0000000000000000 [ 107.319951] a5 : 0000000000000040 a6 : 000000001d1c8774 a7 : 0000000000504d55 [ 107.319965] s2 : ffffffff82451f10 s3 : ffffffff82724e70 s4 : 000000000000003f [ 107.319980] s5 : 0000000000000011 s6 : ffffaf8083db27c0 s7 : 0000000000000000 [ 107.319995] s8 : 0000000000000001 s9 : 00007fffb45d6558 s10: 00007fffb45d81a0 [ 107.320009] s11: ffffaf7ffff60000 t3 : 0000000000000004 t4 : 0000000000000000 [ 107.320023] t5 : ffffaf7f80000000 t6 : ffffaf8000000000 [ 107.320037] status: 0000000200000100 badaddr: 0000000000000000 cause: 0000000000000003 [ 107.320081] [] pmu_sbi_ovf_handler+0x3a4/0x3ae [ 107.320112] [] handle_percpu_devid_irq+0x9e/0x1a0 [ 107.320131] [] generic_handle_domain_irq+0x28/0x36 [ 107.320148] [] riscv_intc_irq+0x36/0x4e [ 107.320166] [] handle_riscv_irq+0x54/0x86 [ 107.320189] [] do_irq+0x64/0x96 [ 107.320271] Code: 85a6 855e b097 ff7f 80e7 9220 b709 9002 4501 bbd9 (9002) 6097 [ 107.320585] ---[ end trace 0000000000000000 ]--- [ 107.320704] Kernel panic - not syncing: Fatal exception in interrupt [ 107.320775] SMP: stopping secondary CPUs [ 107.321219] Kernel Offset: 0x0 from 0xffffffff80000000 [ 107.333051] ---[ end Kernel panic - not syncing: Fatal exception in interrupt ]--- Fixes: 4905ec2fb7e6 ("RISC-V: Add sscofpmf extension support") Signed-off-by: Alexandre Ghiti Link: https://lore.kernel.org/r/20231109082128.40777-1-alexghiti@rivosinc.com Cc: stable@vger.kernel.org Signed-off-by: Palmer Dabbelt --- drivers/perf/riscv_pmu_sbi.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index 9a51053b1f99..0b5053152ee6 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -685,6 +685,11 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev) /* Firmware counter don't support overflow yet */ fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS); + if (fidx == RISCV_MAX_COUNTERS) { + csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); + return IRQ_NONE; + } + event = cpu_hw_evt->events[fidx]; if (!event) { csr_clear(CSR_SIP, BIT(riscv_pmu_irq_num)); From 61e3d993c8bd3e80f8f1363ed5e04f88ab531b72 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Thu, 26 Oct 2023 10:40:10 +0200 Subject: [PATCH 49/51] drivers: perf: Do not broadcast to other cpus when starting a counter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This command: $ perf record -e cycles:k -e instructions:k -c 10000 -m 64M dd if=/dev/zero of=/dev/null count=1000 gives rise to this kernel warning: [ 444.364395] WARNING: CPU: 0 PID: 104 at kernel/smp.c:775 smp_call_function_many_cond+0x42c/0x436 [ 444.364515] Modules linked in: [ 444.364657] CPU: 0 PID: 104 Comm: perf-exec Not tainted 6.6.0-rc6-00051-g391df82e8ec3-dirty #73 [ 444.364771] Hardware name: riscv-virtio,qemu (DT) [ 444.364868] epc : smp_call_function_many_cond+0x42c/0x436 [ 444.364917] ra : on_each_cpu_cond_mask+0x20/0x32 [ 444.364948] epc : ffffffff8009f9e0 ra : ffffffff8009fa5a sp : ff20000000003800 [ 444.364966] gp : ffffffff81500aa0 tp : ff60000002b83000 t0 : ff200000000038c0 [ 444.364982] t1 : ffffffff815021f0 t2 : 000000000000001f s0 : ff200000000038b0 [ 444.364998] s1 : ff60000002c54d98 a0 : ff60000002a73940 a1 : 0000000000000000 [ 444.365013] a2 : 0000000000000000 a3 : 0000000000000003 a4 : 0000000000000100 [ 444.365029] a5 : 0000000000010100 a6 : 0000000000f00000 a7 : 0000000000000000 [ 444.365044] s2 : 0000000000000000 s3 : ffffffffffffffff s4 : ff60000002c54d98 [ 444.365060] s5 : ffffffff81539610 s6 : ffffffff80c20c48 s7 : 0000000000000000 [ 444.365075] s8 : 0000000000000000 s9 : 0000000000000001 s10: 0000000000000001 [ 444.365090] s11: ffffffff80099394 t3 : 0000000000000003 t4 : 00000000eac0c6e6 [ 444.365104] t5 : 0000000400000000 t6 : ff60000002e010d0 [ 444.365120] status: 0000000200000100 badaddr: 0000000000000000 cause: 0000000000000003 [ 444.365226] [] smp_call_function_many_cond+0x42c/0x436 [ 444.365295] [] on_each_cpu_cond_mask+0x20/0x32 [ 444.365311] [] pmu_sbi_ctr_start+0x7a/0xaa [ 444.365327] [] riscv_pmu_start+0x48/0x66 [ 444.365339] [] perf_adjust_freq_unthr_context+0x196/0x1ac [ 444.365356] [] perf_event_task_tick+0x78/0x8c [ 444.365368] [] scheduler_tick+0xe6/0x25e [ 444.365383] [] update_process_times+0x80/0x96 [ 444.365398] [] tick_sched_handle+0x26/0x52 [ 444.365410] [] tick_sched_timer+0x50/0x98 [ 444.365422] [] __hrtimer_run_queues+0x126/0x18a [ 444.365433] [] hrtimer_interrupt+0xce/0x1da [ 444.365444] [] riscv_timer_interrupt+0x30/0x3a [ 444.365457] [] handle_percpu_devid_irq+0x80/0x114 [ 444.365470] [] generic_handle_domain_irq+0x1c/0x2a [ 444.365483] [] riscv_intc_irq+0x2e/0x46 [ 444.365497] [] handle_riscv_irq+0x4a/0x74 [ 444.365521] [] do_irq+0x7c/0x7e [ 444.365796] ---[ end trace 0000000000000000 ]--- That's because the fix in commit 3fec323339a4 ("drivers: perf: Fix panic in riscv SBI mmap support") was wrong since there is no need to broadcast to other cpus when starting a counter, that's only needed in mmap when the counters could have already been started on other cpus, so simply remove this broadcast. Fixes: 3fec323339a4 ("drivers: perf: Fix panic in riscv SBI mmap support") Signed-off-by: Alexandre Ghiti Tested-by: Clément Léger Tested-by: Yu Chien Peter Lin Tested-by: Lad Prabhakar #On Link: https://lore.kernel.org/r/20231026084010.11888-1-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt --- drivers/perf/riscv_pmu_sbi.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index 96c7f670c8f0..fcb0c70ca222 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -543,8 +543,7 @@ static void pmu_sbi_ctr_start(struct perf_event *event, u64 ival) if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) && (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) - on_each_cpu_mask(mm_cpumask(event->owner->mm), - pmu_sbi_set_scounteren, (void *)event, 1); + pmu_sbi_set_scounteren((void *)event); } static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag) @@ -554,8 +553,7 @@ static void pmu_sbi_ctr_stop(struct perf_event *event, unsigned long flag) if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) && (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) - on_each_cpu_mask(mm_cpumask(event->owner->mm), - pmu_sbi_reset_scounteren, (void *)event, 1); + pmu_sbi_reset_scounteren((void *)event); ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0); if (ret.error && (ret.error != SBI_ERR_ALREADY_STOPPED) && From e72c4333d2f2e7f2200f71a88c0480fd2a769a64 Mon Sep 17 00:00:00 2001 From: Xiao Wang Date: Tue, 31 Oct 2023 14:45:52 +0800 Subject: [PATCH 50/51] riscv: Rearrange hwcap.h and cpufeature.h Now hwcap.h and cpufeature.h are mutually including each other, and most of the variable/API declarations in hwcap.h are implemented in cpufeature.c, so, it's better to move them into cpufeature.h and leave only macros for ISA extension logical IDs in hwcap.h. BTW, the riscv_isa_extension_mask macro is not used now, so this patch removes it. Suggested-by: Andrew Jones Signed-off-by: Xiao Wang Reviewed-by: Andrew Jones Link: https://lore.kernel.org/r/20231031064553.2319688-2-xiao.w.wang@intel.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/cpufeature.h | 83 ++++++++++++++++++++++++++ arch/riscv/include/asm/elf.h | 2 +- arch/riscv/include/asm/hwcap.h | 91 ----------------------------- arch/riscv/include/asm/pgtable.h | 1 + arch/riscv/include/asm/switch_to.h | 2 +- arch/riscv/include/asm/vector.h | 2 +- arch/riscv/kvm/aia.c | 2 +- arch/riscv/kvm/main.c | 2 +- arch/riscv/kvm/tlb.c | 2 +- arch/riscv/kvm/vcpu_fp.c | 2 +- arch/riscv/kvm/vcpu_onereg.c | 2 +- arch/riscv/kvm/vcpu_vector.c | 2 +- drivers/clocksource/timer-riscv.c | 2 +- drivers/perf/riscv_pmu_sbi.c | 2 +- 14 files changed, 95 insertions(+), 102 deletions(-) diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index 69f2cae96f0b..a418c3112cd6 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -7,7 +7,10 @@ #define _ASM_CPUFEATURE_H #include +#include #include +#include +#include /* * These are probed via a device_initcall(), via either the SBI or directly @@ -50,4 +53,84 @@ static inline bool check_unaligned_access_emulated(int cpu) static inline void unaligned_emulation_finish(void) {} #endif +unsigned long riscv_get_elf_hwcap(void); + +struct riscv_isa_ext_data { + const unsigned int id; + const char *name; + const char *property; +}; + +extern const struct riscv_isa_ext_data riscv_isa_ext[]; +extern const size_t riscv_isa_ext_count; +extern bool riscv_isa_fallback; + +unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); + +bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit); +#define riscv_isa_extension_available(isa_bitmap, ext) \ + __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) + +static __always_inline bool +riscv_has_extension_likely(const unsigned long ext) +{ + compiletime_assert(ext < RISCV_ISA_EXT_MAX, + "ext must be < RISCV_ISA_EXT_MAX"); + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { + asm_volatile_goto( + ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1) + : + : [ext] "i" (ext) + : + : l_no); + } else { + if (!__riscv_isa_extension_available(NULL, ext)) + goto l_no; + } + + return true; +l_no: + return false; +} + +static __always_inline bool +riscv_has_extension_unlikely(const unsigned long ext) +{ + compiletime_assert(ext < RISCV_ISA_EXT_MAX, + "ext must be < RISCV_ISA_EXT_MAX"); + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { + asm_volatile_goto( + ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1) + : + : [ext] "i" (ext) + : + : l_yes); + } else { + if (__riscv_isa_extension_available(NULL, ext)) + goto l_yes; + } + + return false; +l_yes: + return true; +} + +static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext) +{ + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_likely(ext)) + return true; + + return __riscv_isa_extension_available(hart_isa[cpu].isa, ext); +} + +static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsigned long ext) +{ + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_unlikely(ext)) + return true; + + return __riscv_isa_extension_available(hart_isa[cpu].isa, ext); +} + #endif diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h index b3b2dfbdf945..06c236bfab53 100644 --- a/arch/riscv/include/asm/elf.h +++ b/arch/riscv/include/asm/elf.h @@ -14,7 +14,7 @@ #include #include #include -#include +#include /* * These are used to set parameters in the core dumps. diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index 31774bcdf1c6..141b7109c25c 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -8,9 +8,6 @@ #ifndef _ASM_RISCV_HWCAP_H #define _ASM_RISCV_HWCAP_H -#include -#include -#include #include #define RISCV_ISA_EXT_a ('a' - 'a') @@ -67,92 +64,4 @@ #define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SSAIA #endif -#ifndef __ASSEMBLY__ - -#include -#include - -unsigned long riscv_get_elf_hwcap(void); - -struct riscv_isa_ext_data { - const unsigned int id; - const char *name; - const char *property; -}; - -extern const struct riscv_isa_ext_data riscv_isa_ext[]; -extern const size_t riscv_isa_ext_count; -extern bool riscv_isa_fallback; - -unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); - -#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext) - -bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit); -#define riscv_isa_extension_available(isa_bitmap, ext) \ - __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) - -static __always_inline bool -riscv_has_extension_likely(const unsigned long ext) -{ - compiletime_assert(ext < RISCV_ISA_EXT_MAX, - "ext must be < RISCV_ISA_EXT_MAX"); - - if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { - asm_volatile_goto( - ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1) - : - : [ext] "i" (ext) - : - : l_no); - } else { - if (!__riscv_isa_extension_available(NULL, ext)) - goto l_no; - } - - return true; -l_no: - return false; -} - -static __always_inline bool -riscv_has_extension_unlikely(const unsigned long ext) -{ - compiletime_assert(ext < RISCV_ISA_EXT_MAX, - "ext must be < RISCV_ISA_EXT_MAX"); - - if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { - asm_volatile_goto( - ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1) - : - : [ext] "i" (ext) - : - : l_yes); - } else { - if (__riscv_isa_extension_available(NULL, ext)) - goto l_yes; - } - - return false; -l_yes: - return true; -} - -static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext) -{ - if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_likely(ext)) - return true; - - return __riscv_isa_extension_available(hart_isa[cpu].isa, ext); -} - -static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsigned long ext) -{ - if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_unlikely(ext)) - return true; - - return __riscv_isa_extension_available(hart_isa[cpu].isa, ext); -} -#endif - #endif /* _ASM_RISCV_HWCAP_H */ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index c8e8867c42f6..294044429e8e 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -291,6 +291,7 @@ static inline pte_t pud_pte(pud_t pud) } #ifdef CONFIG_RISCV_ISA_SVNAPOT +#include static __always_inline bool has_svnapot(void) { diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h index a727be723c56..f90d8e42f3c7 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -9,7 +9,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h index c5ee07b3df07..87aaef656257 100644 --- a/arch/riscv/include/asm/vector.h +++ b/arch/riscv/include/asm/vector.h @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c index 74bb27440527..a944294f6f23 100644 --- a/arch/riscv/kvm/aia.c +++ b/arch/riscv/kvm/aia.c @@ -14,7 +14,7 @@ #include #include #include -#include +#include #include struct aia_hgei_control { diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c index 48ae0d4b3932..225a435d9c9a 100644 --- a/arch/riscv/kvm/main.c +++ b/arch/riscv/kvm/main.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include long kvm_arch_dev_ioctl(struct file *filp, diff --git a/arch/riscv/kvm/tlb.c b/arch/riscv/kvm/tlb.c index 44bc324aeeb0..23c0e82b5103 100644 --- a/arch/riscv/kvm/tlb.c +++ b/arch/riscv/kvm/tlb.c @@ -12,7 +12,7 @@ #include #include #include -#include +#include #include #define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL) diff --git a/arch/riscv/kvm/vcpu_fp.c b/arch/riscv/kvm/vcpu_fp.c index 08ba48a395aa..030904d82b58 100644 --- a/arch/riscv/kvm/vcpu_fp.c +++ b/arch/riscv/kvm/vcpu_fp.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #ifdef CONFIG_FPU void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu) diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index 1b7e9fa265cb..b03e0c879dab 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/riscv/kvm/vcpu_vector.c b/arch/riscv/kvm/vcpu_vector.c index b430cbb69521..b339a2682f25 100644 --- a/arch/riscv/kvm/vcpu_vector.c +++ b/arch/riscv/kvm/vcpu_vector.c @@ -11,7 +11,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/clocksource/timer-riscv.c b/drivers/clocksource/timer-riscv.c index 9c8f3e2decc2..e0333142c18c 100644 --- a/drivers/clocksource/timer-riscv.c +++ b/drivers/clocksource/timer-riscv.c @@ -25,7 +25,7 @@ #include #include #include -#include +#include #include #include diff --git a/drivers/perf/riscv_pmu_sbi.c b/drivers/perf/riscv_pmu_sbi.c index cd8a2b9efd78..16acd4dcdb96 100644 --- a/drivers/perf/riscv_pmu_sbi.c +++ b/drivers/perf/riscv_pmu_sbi.c @@ -22,7 +22,7 @@ #include #include -#include +#include #define SYSCTL_NO_USER_ACCESS 0 #define SYSCTL_USER_ACCESS 1 From 457926b253200bd9bdfae9a016a3b1d1dc661d55 Mon Sep 17 00:00:00 2001 From: Xiao Wang Date: Tue, 31 Oct 2023 14:45:53 +0800 Subject: [PATCH 51/51] riscv: Optimize bitops with Zbb extension This patch leverages the alternative mechanism to dynamically optimize bitops (including __ffs, __fls, ffs, fls) with Zbb instructions. When Zbb ext is not supported by the runtime CPU, legacy implementation is used. If Zbb is supported, then the optimized variants will be selected via alternative patching. The legacy bitops support is taken from the generic C implementation as fallback. If the parameter is a build-time constant, we leverage compiler builtin to calculate the result directly, this approach is inspired by x86 bitops implementation. EFI stub runs before the kernel, so alternative mechanism should not be used there, this patch introduces a macro NO_ALTERNATIVE for this purpose. Signed-off-by: Xiao Wang Reviewed-by: Charlie Jenkins Link: https://lore.kernel.org/r/20231031064553.2319688-3-xiao.w.wang@intel.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/bitops.h | 254 +++++++++++++++++++++++++- drivers/firmware/efi/libstub/Makefile | 2 +- 2 files changed, 252 insertions(+), 4 deletions(-) diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h index 3540b690944b..b212c2708cda 100644 --- a/arch/riscv/include/asm/bitops.h +++ b/arch/riscv/include/asm/bitops.h @@ -15,13 +15,261 @@ #include #include +#if !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE) #include -#include -#include #include +#include +#include + +#else +#include +#include + +#if (BITS_PER_LONG == 64) +#define CTZW "ctzw " +#define CLZW "clzw " +#elif (BITS_PER_LONG == 32) +#define CTZW "ctz " +#define CLZW "clz " +#else +#error "Unexpected BITS_PER_LONG" +#endif + +static __always_inline unsigned long variable__ffs(unsigned long word) +{ + int num; + + asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : : : : legacy); + + asm volatile (".option push\n" + ".option arch,+zbb\n" + "ctz %0, %1\n" + ".option pop\n" + : "=r" (word) : "r" (word) :); + + return word; + +legacy: + num = 0; +#if BITS_PER_LONG == 64 + if ((word & 0xffffffff) == 0) { + num += 32; + word >>= 32; + } +#endif + if ((word & 0xffff) == 0) { + num += 16; + word >>= 16; + } + if ((word & 0xff) == 0) { + num += 8; + word >>= 8; + } + if ((word & 0xf) == 0) { + num += 4; + word >>= 4; + } + if ((word & 0x3) == 0) { + num += 2; + word >>= 2; + } + if ((word & 0x1) == 0) + num += 1; + return num; +} + +/** + * __ffs - find first set bit in a long word + * @word: The word to search + * + * Undefined if no set bit exists, so code should check against 0 first. + */ +#define __ffs(word) \ + (__builtin_constant_p(word) ? \ + (unsigned long)__builtin_ctzl(word) : \ + variable__ffs(word)) + +static __always_inline unsigned long variable__fls(unsigned long word) +{ + int num; + + asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : : : : legacy); + + asm volatile (".option push\n" + ".option arch,+zbb\n" + "clz %0, %1\n" + ".option pop\n" + : "=r" (word) : "r" (word) :); + + return BITS_PER_LONG - 1 - word; + +legacy: + num = BITS_PER_LONG - 1; +#if BITS_PER_LONG == 64 + if (!(word & (~0ul << 32))) { + num -= 32; + word <<= 32; + } +#endif + if (!(word & (~0ul << (BITS_PER_LONG - 16)))) { + num -= 16; + word <<= 16; + } + if (!(word & (~0ul << (BITS_PER_LONG - 8)))) { + num -= 8; + word <<= 8; + } + if (!(word & (~0ul << (BITS_PER_LONG - 4)))) { + num -= 4; + word <<= 4; + } + if (!(word & (~0ul << (BITS_PER_LONG - 2)))) { + num -= 2; + word <<= 2; + } + if (!(word & (~0ul << (BITS_PER_LONG - 1)))) + num -= 1; + return num; +} + +/** + * __fls - find last set bit in a long word + * @word: the word to search + * + * Undefined if no set bit exists, so code should check against 0 first. + */ +#define __fls(word) \ + (__builtin_constant_p(word) ? \ + (unsigned long)(BITS_PER_LONG - 1 - __builtin_clzl(word)) : \ + variable__fls(word)) + +static __always_inline int variable_ffs(int x) +{ + int r; + + if (!x) + return 0; + + asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : : : : legacy); + + asm volatile (".option push\n" + ".option arch,+zbb\n" + CTZW "%0, %1\n" + ".option pop\n" + : "=r" (r) : "r" (x) :); + + return r + 1; + +legacy: + r = 1; + if (!(x & 0xffff)) { + x >>= 16; + r += 16; + } + if (!(x & 0xff)) { + x >>= 8; + r += 8; + } + if (!(x & 0xf)) { + x >>= 4; + r += 4; + } + if (!(x & 3)) { + x >>= 2; + r += 2; + } + if (!(x & 1)) { + x >>= 1; + r += 1; + } + return r; +} + +/** + * ffs - find first set bit in a word + * @x: the word to search + * + * This is defined the same way as the libc and compiler builtin ffs routines. + * + * ffs(value) returns 0 if value is 0 or the position of the first set bit if + * value is nonzero. The first (least significant) bit is at position 1. + */ +#define ffs(x) (__builtin_constant_p(x) ? __builtin_ffs(x) : variable_ffs(x)) + +static __always_inline int variable_fls(unsigned int x) +{ + int r; + + if (!x) + return 0; + + asm_volatile_goto(ALTERNATIVE("j %l[legacy]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : : : : legacy); + + asm volatile (".option push\n" + ".option arch,+zbb\n" + CLZW "%0, %1\n" + ".option pop\n" + : "=r" (r) : "r" (x) :); + + return 32 - r; + +legacy: + r = 32; + if (!(x & 0xffff0000u)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xff000000u)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xf0000000u)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xc0000000u)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000u)) { + x <<= 1; + r -= 1; + } + return r; +} + +/** + * fls - find last set bit in a word + * @x: the word to search + * + * This is defined in a similar way as ffs, but returns the position of the most + * significant set bit. + * + * fls(value) returns 0 if value is 0 or the position of the last set bit if + * value is nonzero. The last (most significant) bit is at position 32. + */ +#define fls(x) \ +({ \ + typeof(x) x_ = (x); \ + __builtin_constant_p(x_) ? \ + (int)((x_ != 0) ? (32 - __builtin_clz(x_)) : 0) \ + : \ + variable_fls(x_); \ +}) + +#endif /* !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE) */ + +#include #include #include -#include #include diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index a1157c2a7170..d68cacd4e3af 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -28,7 +28,7 @@ cflags-$(CONFIG_ARM) += -DEFI_HAVE_STRLEN -DEFI_HAVE_STRNLEN \ -DEFI_HAVE_MEMCHR -DEFI_HAVE_STRRCHR \ -DEFI_HAVE_STRCMP -fno-builtin -fpic \ $(call cc-option,-mno-single-pic-base) -cflags-$(CONFIG_RISCV) += -fpic +cflags-$(CONFIG_RISCV) += -fpic -DNO_ALTERNATIVE cflags-$(CONFIG_LOONGARCH) += -fpie cflags-$(CONFIG_EFI_PARAMS_FROM_FDT) += -I$(srctree)/scripts/dtc/libfdt