mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-28 22:54:05 +08:00
Merge patch series "Add support to handle misaligned accesses in S-mode"
Clément Léger <cleger@rivosinc.com> says: Since commit 61cadb9 ("Provide new description of misaligned load/store behavior compatible with privileged architecture.") in the RISC-V ISA manual, it is stated that misaligned load/store might not be supported. However, the RISC-V kernel uABI describes that misaligned accesses are supported. In order to support that, this series adds support for S-mode handling of misaligned accesses as well support for prctl(PR_UNALIGN). Handling misaligned access in kernel allows for a finer grain control of the misaligned accesses behavior, and thanks to the prctl() call, can allow disabling misaligned access emulation to generate SIGBUS. User space can then optimize its software by removing such access based on SIGBUS generation. This series is useful when using a SBI implementation that does not handle misaligned traps as well as detecting misaligned accesses generated by userspace application using the prctrl(PR_SET_UNALIGN) feature. This series can be tested using the spike simulator[1] and a modified openSBI version[2] which allows to always delegate misaligned load/store to S-mode. A test[3] that exercise various instructions/registers can be executed to verify the unaligned access support. [1] https://github.com/riscv-software-src/riscv-isa-sim [2] https://github.com/rivosinc/opensbi/tree/dev/cleger/no_misaligned [3] https://github.com/clementleger/unaligned_test * b4-shazam-merge: riscv: add support for PR_SET_UNALIGN and PR_GET_UNALIGN riscv: report misaligned accesses emulation to hwprobe riscv: annotate check_unaligned_access_boot_cpu() with __init riscv: add support for sysctl unaligned_enabled control riscv: add floating point insn support to misaligned access emulation riscv: report perf event for misaligned fault riscv: add support for misaligned trap handling in S-mode riscv: remove unused functions in traps_misaligned.c Link: https://lore.kernel.org/r/20231004151405.521596-1-cleger@rivosinc.com Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
This commit is contained in:
commit
0619ff9f02
@ -643,6 +643,15 @@ config THREAD_SIZE_ORDER
|
||||
Specify the Pages of thread stack size (from 4KB to 64KB), which also
|
||||
affects irq stack size, which is equal to thread stack size.
|
||||
|
||||
config RISCV_MISALIGNED
|
||||
bool "Support misaligned load/store traps for kernel and userspace"
|
||||
select SYSCTL_ARCH_UNALIGN_ALLOW
|
||||
default y
|
||||
help
|
||||
Say Y here if you want the kernel to embed support for misaligned
|
||||
load/store for both kernel and userspace. When disable, misaligned
|
||||
accesses will generate SIGBUS in userspace and panic in kernel.
|
||||
|
||||
endmenu # "Platform type"
|
||||
|
||||
menu "Kernel features"
|
||||
|
@ -33,4 +33,22 @@ extern struct riscv_isainfo hart_isa[NR_CPUS];
|
||||
void check_unaligned_access(int cpu);
|
||||
void riscv_user_isa_enable(void);
|
||||
|
||||
#ifdef CONFIG_RISCV_MISALIGNED
|
||||
bool unaligned_ctl_available(void);
|
||||
bool check_unaligned_access_emulated(int cpu);
|
||||
void unaligned_emulation_finish(void);
|
||||
#else
|
||||
static inline bool unaligned_ctl_available(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline bool check_unaligned_access_emulated(int cpu)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void unaligned_emulation_finish(void) {}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -8,4 +8,18 @@
|
||||
void handle_page_fault(struct pt_regs *regs);
|
||||
void handle_break(struct pt_regs *regs);
|
||||
|
||||
#ifdef CONFIG_RISCV_MISALIGNED
|
||||
int handle_misaligned_load(struct pt_regs *regs);
|
||||
int handle_misaligned_store(struct pt_regs *regs);
|
||||
#else
|
||||
static inline int handle_misaligned_load(struct pt_regs *regs)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
static inline int handle_misaligned_store(struct pt_regs *regs)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif /* _ASM_RISCV_ENTRY_COMMON_H */
|
||||
|
@ -8,6 +8,7 @@
|
||||
|
||||
#include <linux/const.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/prctl.h>
|
||||
|
||||
#include <vdso/processor.h>
|
||||
|
||||
@ -82,6 +83,7 @@ struct thread_struct {
|
||||
unsigned long bad_cause;
|
||||
unsigned long vstate_ctrl;
|
||||
struct __riscv_v_ext_state vstate;
|
||||
unsigned long align_ctl;
|
||||
};
|
||||
|
||||
/* Whitelist the fstate from the task_struct for hardened usercopy */
|
||||
@ -94,6 +96,7 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
|
||||
|
||||
#define INIT_THREAD { \
|
||||
.sp = sizeof(init_stack) + (long)&init_stack, \
|
||||
.align_ctl = PR_UNALIGN_NOPRINT, \
|
||||
}
|
||||
|
||||
#define task_pt_regs(tsk) \
|
||||
@ -134,6 +137,12 @@ extern long riscv_v_vstate_ctrl_set_current(unsigned long arg);
|
||||
extern long riscv_v_vstate_ctrl_get_current(void);
|
||||
#endif /* CONFIG_RISCV_ISA_V */
|
||||
|
||||
extern int get_unalign_ctl(struct task_struct *tsk, unsigned long addr);
|
||||
extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val);
|
||||
|
||||
#define GET_UNALIGN_CTL(tsk, addr) get_unalign_ctl((tsk), (addr))
|
||||
#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val))
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* _ASM_RISCV_PROCESSOR_H */
|
||||
|
@ -59,7 +59,7 @@ obj-y += patch.o
|
||||
obj-y += probes/
|
||||
obj-$(CONFIG_MMU) += vdso.o vdso/
|
||||
|
||||
obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o
|
||||
obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o
|
||||
obj-$(CONFIG_FPU) += fpu.o
|
||||
obj-$(CONFIG_RISCV_ISA_V) += vector.o
|
||||
obj-$(CONFIG_SMP) += smpboot.o
|
||||
|
@ -569,6 +569,9 @@ void check_unaligned_access(int cpu)
|
||||
void *src;
|
||||
long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
|
||||
|
||||
if (check_unaligned_access_emulated(cpu))
|
||||
return;
|
||||
|
||||
page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
|
||||
if (!page) {
|
||||
pr_warn("Can't alloc pages to measure memcpy performance");
|
||||
@ -646,9 +649,10 @@ out:
|
||||
__free_pages(page, get_order(MISALIGNED_BUFFER_SIZE));
|
||||
}
|
||||
|
||||
static int check_unaligned_access_boot_cpu(void)
|
||||
static int __init check_unaligned_access_boot_cpu(void)
|
||||
{
|
||||
check_unaligned_access(0);
|
||||
unaligned_emulation_finish();
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -104,3 +104,124 @@ ENTRY(__fstate_restore)
|
||||
csrc CSR_STATUS, t1
|
||||
ret
|
||||
ENDPROC(__fstate_restore)
|
||||
|
||||
#define get_f32(which) fmv.x.s a0, which; j 2f
|
||||
#define put_f32(which) fmv.s.x which, a1; j 2f
|
||||
#if __riscv_xlen == 64
|
||||
# define get_f64(which) fmv.x.d a0, which; j 2f
|
||||
# define put_f64(which) fmv.d.x which, a1; j 2f
|
||||
#else
|
||||
# define get_f64(which) fsd which, 0(a1); j 2f
|
||||
# define put_f64(which) fld which, 0(a1); j 2f
|
||||
#endif
|
||||
|
||||
.macro fp_access_prologue
|
||||
/*
|
||||
* Compute jump offset to store the correct FP register since we don't
|
||||
* have indirect FP register access
|
||||
*/
|
||||
sll t0, a0, 3
|
||||
la t2, 1f
|
||||
add t0, t0, t2
|
||||
li t1, SR_FS
|
||||
csrs CSR_STATUS, t1
|
||||
jr t0
|
||||
1:
|
||||
.endm
|
||||
|
||||
.macro fp_access_epilogue
|
||||
2:
|
||||
csrc CSR_STATUS, t1
|
||||
ret
|
||||
.endm
|
||||
|
||||
#define fp_access_body(__access_func) \
|
||||
__access_func(f0); \
|
||||
__access_func(f1); \
|
||||
__access_func(f2); \
|
||||
__access_func(f3); \
|
||||
__access_func(f4); \
|
||||
__access_func(f5); \
|
||||
__access_func(f6); \
|
||||
__access_func(f7); \
|
||||
__access_func(f8); \
|
||||
__access_func(f9); \
|
||||
__access_func(f10); \
|
||||
__access_func(f11); \
|
||||
__access_func(f12); \
|
||||
__access_func(f13); \
|
||||
__access_func(f14); \
|
||||
__access_func(f15); \
|
||||
__access_func(f16); \
|
||||
__access_func(f17); \
|
||||
__access_func(f18); \
|
||||
__access_func(f19); \
|
||||
__access_func(f20); \
|
||||
__access_func(f21); \
|
||||
__access_func(f22); \
|
||||
__access_func(f23); \
|
||||
__access_func(f24); \
|
||||
__access_func(f25); \
|
||||
__access_func(f26); \
|
||||
__access_func(f27); \
|
||||
__access_func(f28); \
|
||||
__access_func(f29); \
|
||||
__access_func(f30); \
|
||||
__access_func(f31)
|
||||
|
||||
|
||||
#ifdef CONFIG_RISCV_MISALIGNED
|
||||
|
||||
/*
|
||||
* Disable compressed instructions set to keep a constant offset between FP
|
||||
* load/store/move instructions
|
||||
*/
|
||||
.option norvc
|
||||
/*
|
||||
* put_f32_reg - Set a FP register from a register containing the value
|
||||
* a0 = FP register index to be set
|
||||
* a1 = value to be loaded in the FP register
|
||||
*/
|
||||
SYM_FUNC_START(put_f32_reg)
|
||||
fp_access_prologue
|
||||
fp_access_body(put_f32)
|
||||
fp_access_epilogue
|
||||
SYM_FUNC_END(put_f32_reg)
|
||||
|
||||
/*
|
||||
* get_f32_reg - Get a FP register value and return it
|
||||
* a0 = FP register index to be retrieved
|
||||
*/
|
||||
SYM_FUNC_START(get_f32_reg)
|
||||
fp_access_prologue
|
||||
fp_access_body(get_f32)
|
||||
fp_access_epilogue
|
||||
SYM_FUNC_END(get_f32_reg)
|
||||
|
||||
/*
|
||||
* put_f64_reg - Set a 64 bits FP register from a value or a pointer.
|
||||
* a0 = FP register index to be set
|
||||
* a1 = value/pointer to be loaded in the FP register (when xlen == 32 bits, we
|
||||
* load the value to a pointer).
|
||||
*/
|
||||
SYM_FUNC_START(put_f64_reg)
|
||||
fp_access_prologue
|
||||
fp_access_body(put_f64)
|
||||
fp_access_epilogue
|
||||
SYM_FUNC_END(put_f64_reg)
|
||||
|
||||
/*
|
||||
* put_f64_reg - Get a 64 bits FP register value and returned it or store it to
|
||||
* a pointer.
|
||||
* a0 = FP register index to be retrieved
|
||||
* a1 = If xlen == 32, pointer which should be loaded with the FP register value
|
||||
* or unused if xlen == 64. In which case the FP register value is returned
|
||||
* through a0
|
||||
*/
|
||||
SYM_FUNC_START(get_f64_reg)
|
||||
fp_access_prologue
|
||||
fp_access_body(get_f64)
|
||||
fp_access_epilogue
|
||||
SYM_FUNC_END(get_f64_reg)
|
||||
|
||||
#endif /* CONFIG_RISCV_MISALIGNED */
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/cpuidle.h>
|
||||
#include <asm/vector.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
register unsigned long gp_in_global __asm__("gp");
|
||||
|
||||
@ -41,6 +42,23 @@ void arch_cpu_idle(void)
|
||||
cpu_do_idle();
|
||||
}
|
||||
|
||||
int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
|
||||
{
|
||||
if (!unaligned_ctl_available())
|
||||
return -EINVAL;
|
||||
|
||||
tsk->thread.align_ctl = val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
|
||||
{
|
||||
if (!unaligned_ctl_available())
|
||||
return -EINVAL;
|
||||
|
||||
return put_user(tsk->thread.align_ctl, (unsigned long __user *)adr);
|
||||
}
|
||||
|
||||
void __show_regs(struct pt_regs *regs)
|
||||
{
|
||||
show_regs_print_info(KERN_DEFAULT);
|
||||
|
@ -247,8 +247,8 @@ asmlinkage __visible void smp_callin(void)
|
||||
riscv_ipi_enable();
|
||||
|
||||
numa_add_cpu(curr_cpuid);
|
||||
set_cpu_online(curr_cpuid, 1);
|
||||
check_unaligned_access(curr_cpuid);
|
||||
set_cpu_online(curr_cpuid, 1);
|
||||
|
||||
if (has_vector()) {
|
||||
if (riscv_v_setup_vsize())
|
||||
|
@ -179,14 +179,6 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re
|
||||
|
||||
DO_ERROR_INFO(do_trap_load_fault,
|
||||
SIGSEGV, SEGV_ACCERR, "load access fault");
|
||||
#ifndef CONFIG_RISCV_M_MODE
|
||||
DO_ERROR_INFO(do_trap_load_misaligned,
|
||||
SIGBUS, BUS_ADRALN, "Oops - load address misaligned");
|
||||
DO_ERROR_INFO(do_trap_store_misaligned,
|
||||
SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned");
|
||||
#else
|
||||
int handle_misaligned_load(struct pt_regs *regs);
|
||||
int handle_misaligned_store(struct pt_regs *regs);
|
||||
|
||||
asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
|
||||
{
|
||||
@ -229,7 +221,6 @@ asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs
|
||||
irqentry_nmi_exit(regs, state);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
DO_ERROR_INFO(do_trap_store_fault,
|
||||
SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
|
||||
DO_ERROR_INFO(do_trap_ecall_s,
|
||||
|
@ -6,12 +6,16 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/stringify.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/csr.h>
|
||||
#include <asm/entry-common.h>
|
||||
#include <asm/hwprobe.h>
|
||||
#include <asm/cpufeature.h>
|
||||
|
||||
#define INSN_MATCH_LB 0x3
|
||||
#define INSN_MASK_LB 0x707f
|
||||
@ -151,53 +155,134 @@
|
||||
#define PRECISION_S 0
|
||||
#define PRECISION_D 1
|
||||
|
||||
#define DECLARE_UNPRIVILEGED_LOAD_FUNCTION(type, insn) \
|
||||
static inline type load_##type(const type *addr) \
|
||||
{ \
|
||||
type val; \
|
||||
asm (#insn " %0, %1" \
|
||||
: "=&r" (val) : "m" (*addr)); \
|
||||
return val; \
|
||||
#ifdef CONFIG_FPU
|
||||
|
||||
#define FP_GET_RD(insn) (insn >> 7 & 0x1F)
|
||||
|
||||
extern void put_f32_reg(unsigned long fp_reg, unsigned long value);
|
||||
|
||||
static int set_f32_rd(unsigned long insn, struct pt_regs *regs,
|
||||
unsigned long val)
|
||||
{
|
||||
unsigned long fp_reg = FP_GET_RD(insn);
|
||||
|
||||
put_f32_reg(fp_reg, val);
|
||||
regs->status |= SR_FS_DIRTY;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define DECLARE_UNPRIVILEGED_STORE_FUNCTION(type, insn) \
|
||||
static inline void store_##type(type *addr, type val) \
|
||||
{ \
|
||||
asm volatile (#insn " %0, %1\n" \
|
||||
: : "r" (val), "m" (*addr)); \
|
||||
}
|
||||
extern void put_f64_reg(unsigned long fp_reg, unsigned long value);
|
||||
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u8, lbu)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u16, lhu)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s8, lb)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s16, lh)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s32, lw)
|
||||
DECLARE_UNPRIVILEGED_STORE_FUNCTION(u8, sb)
|
||||
DECLARE_UNPRIVILEGED_STORE_FUNCTION(u16, sh)
|
||||
DECLARE_UNPRIVILEGED_STORE_FUNCTION(u32, sw)
|
||||
#if defined(CONFIG_64BIT)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lwu)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u64, ld)
|
||||
DECLARE_UNPRIVILEGED_STORE_FUNCTION(u64, sd)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, ld)
|
||||
static int set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val)
|
||||
{
|
||||
unsigned long fp_reg = FP_GET_RD(insn);
|
||||
unsigned long value;
|
||||
|
||||
#if __riscv_xlen == 32
|
||||
value = (unsigned long) &val;
|
||||
#else
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lw)
|
||||
DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, lw)
|
||||
value = val;
|
||||
#endif
|
||||
put_f64_reg(fp_reg, value);
|
||||
regs->status |= SR_FS_DIRTY;
|
||||
|
||||
static inline u64 load_u64(const u64 *addr)
|
||||
{
|
||||
return load_u32((u32 *)addr)
|
||||
+ ((u64)load_u32((u32 *)addr + 1) << 32);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void store_u64(u64 *addr, u64 val)
|
||||
#if __riscv_xlen == 32
|
||||
extern void get_f64_reg(unsigned long fp_reg, u64 *value);
|
||||
|
||||
static u64 get_f64_rs(unsigned long insn, u8 fp_reg_offset,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
store_u32((u32 *)addr, val);
|
||||
store_u32((u32 *)addr + 1, val >> 32);
|
||||
unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
|
||||
u64 val;
|
||||
|
||||
get_f64_reg(fp_reg, &val);
|
||||
regs->status |= SR_FS_DIRTY;
|
||||
|
||||
return val;
|
||||
}
|
||||
#else
|
||||
|
||||
extern unsigned long get_f64_reg(unsigned long fp_reg);
|
||||
|
||||
static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
|
||||
unsigned long val;
|
||||
|
||||
val = get_f64_reg(fp_reg);
|
||||
regs->status |= SR_FS_DIRTY;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static inline ulong get_insn(ulong mepc)
|
||||
extern unsigned long get_f32_reg(unsigned long fp_reg);
|
||||
|
||||
static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
|
||||
unsigned long val;
|
||||
|
||||
val = get_f32_reg(fp_reg);
|
||||
regs->status |= SR_FS_DIRTY;
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
#else /* CONFIG_FPU */
|
||||
static void set_f32_rd(unsigned long insn, struct pt_regs *regs,
|
||||
unsigned long val) {}
|
||||
|
||||
static void set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val) {}
|
||||
|
||||
static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#define GET_F64_RS2(insn, regs) (get_f64_rs(insn, 20, regs))
|
||||
#define GET_F64_RS2C(insn, regs) (get_f64_rs(insn, 2, regs))
|
||||
#define GET_F64_RS2S(insn, regs) (get_f64_rs(RVC_RS2S(insn), 0, regs))
|
||||
|
||||
#define GET_F32_RS2(insn, regs) (get_f32_rs(insn, 20, regs))
|
||||
#define GET_F32_RS2C(insn, regs) (get_f32_rs(insn, 2, regs))
|
||||
#define GET_F32_RS2S(insn, regs) (get_f32_rs(RVC_RS2S(insn), 0, regs))
|
||||
|
||||
#ifdef CONFIG_RISCV_M_MODE
|
||||
static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
|
||||
{
|
||||
u8 val;
|
||||
|
||||
asm volatile("lbu %0, %1" : "=&r" (val) : "m" (*addr));
|
||||
*r_val = val;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
|
||||
{
|
||||
asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int get_insn(struct pt_regs *regs, ulong mepc, ulong *r_insn)
|
||||
{
|
||||
register ulong __mepc asm ("a2") = mepc;
|
||||
ulong val, rvc_mask = 3, tmp;
|
||||
@ -226,8 +311,86 @@ static inline ulong get_insn(ulong mepc)
|
||||
: [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask),
|
||||
[xlen_minus_16] "i" (XLEN_MINUS_16));
|
||||
|
||||
return val;
|
||||
*r_insn = val;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
|
||||
{
|
||||
if (user_mode(regs)) {
|
||||
return __get_user(*r_val, addr);
|
||||
} else {
|
||||
*r_val = *addr;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
|
||||
{
|
||||
if (user_mode(regs)) {
|
||||
return __put_user(val, addr);
|
||||
} else {
|
||||
*addr = val;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
#define __read_insn(regs, insn, insn_addr) \
|
||||
({ \
|
||||
int __ret; \
|
||||
\
|
||||
if (user_mode(regs)) { \
|
||||
__ret = __get_user(insn, insn_addr); \
|
||||
} else { \
|
||||
insn = *insn_addr; \
|
||||
__ret = 0; \
|
||||
} \
|
||||
\
|
||||
__ret; \
|
||||
})
|
||||
|
||||
static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
|
||||
{
|
||||
ulong insn = 0;
|
||||
|
||||
if (epc & 0x2) {
|
||||
ulong tmp = 0;
|
||||
u16 __user *insn_addr = (u16 __user *)epc;
|
||||
|
||||
if (__read_insn(regs, insn, insn_addr))
|
||||
return -EFAULT;
|
||||
/* __get_user() uses regular "lw" which sign extend the loaded
|
||||
* value make sure to clear higher order bits in case we "or" it
|
||||
* below with the upper 16 bits half.
|
||||
*/
|
||||
insn &= GENMASK(15, 0);
|
||||
if ((insn & __INSN_LENGTH_MASK) != __INSN_LENGTH_32) {
|
||||
*r_insn = insn;
|
||||
return 0;
|
||||
}
|
||||
insn_addr++;
|
||||
if (__read_insn(regs, tmp, insn_addr))
|
||||
return -EFAULT;
|
||||
*r_insn = (tmp << 16) | insn;
|
||||
|
||||
return 0;
|
||||
} else {
|
||||
u32 __user *insn_addr = (u32 __user *)epc;
|
||||
|
||||
if (__read_insn(regs, insn, insn_addr))
|
||||
return -EFAULT;
|
||||
if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) {
|
||||
*r_insn = insn;
|
||||
return 0;
|
||||
}
|
||||
insn &= GENMASK(15, 0);
|
||||
*r_insn = insn;
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
union reg_data {
|
||||
u8 data_bytes[8];
|
||||
@ -235,14 +398,32 @@ union reg_data {
|
||||
u64 data_u64;
|
||||
};
|
||||
|
||||
static bool unaligned_ctl __read_mostly;
|
||||
|
||||
/* sysctl hooks */
|
||||
int unaligned_enabled __read_mostly = 1; /* Enabled by default */
|
||||
|
||||
int handle_misaligned_load(struct pt_regs *regs)
|
||||
{
|
||||
union reg_data val;
|
||||
unsigned long epc = regs->epc;
|
||||
unsigned long insn = get_insn(epc);
|
||||
unsigned long addr = csr_read(mtval);
|
||||
unsigned long insn;
|
||||
unsigned long addr = regs->badaddr;
|
||||
int i, fp = 0, shift = 0, len = 0;
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
|
||||
|
||||
*this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_EMULATED;
|
||||
|
||||
if (!unaligned_enabled)
|
||||
return -1;
|
||||
|
||||
if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS))
|
||||
return -1;
|
||||
|
||||
if (get_insn(regs, epc, &insn))
|
||||
return -1;
|
||||
|
||||
regs->epc = 0;
|
||||
|
||||
if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
|
||||
@ -305,13 +486,21 @@ int handle_misaligned_load(struct pt_regs *regs)
|
||||
return -1;
|
||||
}
|
||||
|
||||
val.data_u64 = 0;
|
||||
for (i = 0; i < len; i++)
|
||||
val.data_bytes[i] = load_u8((void *)(addr + i));
|
||||
if (!IS_ENABLED(CONFIG_FPU) && fp)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (fp)
|
||||
return -1;
|
||||
SET_RD(insn, regs, val.data_ulong << shift >> shift);
|
||||
val.data_u64 = 0;
|
||||
for (i = 0; i < len; i++) {
|
||||
if (load_u8(regs, (void *)(addr + i), &val.data_bytes[i]))
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!fp)
|
||||
SET_RD(insn, regs, val.data_ulong << shift >> shift);
|
||||
else if (len == 8)
|
||||
set_f64_rd(insn, regs, val.data_u64);
|
||||
else
|
||||
set_f32_rd(insn, regs, val.data_ulong);
|
||||
|
||||
regs->epc = epc + INSN_LEN(insn);
|
||||
|
||||
@ -322,9 +511,20 @@ int handle_misaligned_store(struct pt_regs *regs)
|
||||
{
|
||||
union reg_data val;
|
||||
unsigned long epc = regs->epc;
|
||||
unsigned long insn = get_insn(epc);
|
||||
unsigned long addr = csr_read(mtval);
|
||||
int i, len = 0;
|
||||
unsigned long insn;
|
||||
unsigned long addr = regs->badaddr;
|
||||
int i, len = 0, fp = 0;
|
||||
|
||||
perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
|
||||
|
||||
if (!unaligned_enabled)
|
||||
return -1;
|
||||
|
||||
if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS))
|
||||
return -1;
|
||||
|
||||
if (get_insn(regs, epc, &insn))
|
||||
return -1;
|
||||
|
||||
regs->epc = 0;
|
||||
|
||||
@ -336,6 +536,14 @@ int handle_misaligned_store(struct pt_regs *regs)
|
||||
} else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
|
||||
len = 8;
|
||||
#endif
|
||||
} else if ((insn & INSN_MASK_FSD) == INSN_MATCH_FSD) {
|
||||
fp = 1;
|
||||
len = 8;
|
||||
val.data_u64 = GET_F64_RS2(insn, regs);
|
||||
} else if ((insn & INSN_MASK_FSW) == INSN_MATCH_FSW) {
|
||||
fp = 1;
|
||||
len = 4;
|
||||
val.data_ulong = GET_F32_RS2(insn, regs);
|
||||
} else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
|
||||
len = 2;
|
||||
#if defined(CONFIG_64BIT)
|
||||
@ -354,15 +562,88 @@ int handle_misaligned_store(struct pt_regs *regs)
|
||||
((insn >> SH_RD) & 0x1f)) {
|
||||
len = 4;
|
||||
val.data_ulong = GET_RS2C(insn, regs);
|
||||
} else if ((insn & INSN_MASK_C_FSD) == INSN_MATCH_C_FSD) {
|
||||
fp = 1;
|
||||
len = 8;
|
||||
val.data_u64 = GET_F64_RS2S(insn, regs);
|
||||
} else if ((insn & INSN_MASK_C_FSDSP) == INSN_MATCH_C_FSDSP) {
|
||||
fp = 1;
|
||||
len = 8;
|
||||
val.data_u64 = GET_F64_RS2C(insn, regs);
|
||||
#if !defined(CONFIG_64BIT)
|
||||
} else if ((insn & INSN_MASK_C_FSW) == INSN_MATCH_C_FSW) {
|
||||
fp = 1;
|
||||
len = 4;
|
||||
val.data_ulong = GET_F32_RS2S(insn, regs);
|
||||
} else if ((insn & INSN_MASK_C_FSWSP) == INSN_MATCH_C_FSWSP) {
|
||||
fp = 1;
|
||||
len = 4;
|
||||
val.data_ulong = GET_F32_RS2C(insn, regs);
|
||||
#endif
|
||||
} else {
|
||||
regs->epc = epc;
|
||||
return -1;
|
||||
}
|
||||
|
||||
for (i = 0; i < len; i++)
|
||||
store_u8((void *)(addr + i), val.data_bytes[i]);
|
||||
if (!IS_ENABLED(CONFIG_FPU) && fp)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
if (store_u8(regs, (void *)(addr + i), val.data_bytes[i]))
|
||||
return -1;
|
||||
}
|
||||
|
||||
regs->epc = epc + INSN_LEN(insn);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool check_unaligned_access_emulated(int cpu)
|
||||
{
|
||||
long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
|
||||
unsigned long tmp_var, tmp_val;
|
||||
bool misaligned_emu_detected;
|
||||
|
||||
*mas_ptr = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
|
||||
|
||||
__asm__ __volatile__ (
|
||||
" "REG_L" %[tmp], 1(%[ptr])\n"
|
||||
: [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
|
||||
|
||||
misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_EMULATED);
|
||||
/*
|
||||
* If unaligned_ctl is already set, this means that we detected that all
|
||||
* CPUS uses emulated misaligned access at boot time. If that changed
|
||||
* when hotplugging the new cpu, this is something we don't handle.
|
||||
*/
|
||||
if (unlikely(unaligned_ctl && !misaligned_emu_detected)) {
|
||||
pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
|
||||
while (true)
|
||||
cpu_relax();
|
||||
}
|
||||
|
||||
return misaligned_emu_detected;
|
||||
}
|
||||
|
||||
void __init unaligned_emulation_finish(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* We can only support PR_UNALIGN controls if all CPUs have misaligned
|
||||
* accesses emulated since tasks requesting such control can run on any
|
||||
* CPU.
|
||||
*/
|
||||
for_each_present_cpu(cpu) {
|
||||
if (per_cpu(misaligned_access_speed, cpu) !=
|
||||
RISCV_HWPROBE_MISALIGNED_EMULATED) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
unaligned_ctl = true;
|
||||
}
|
||||
|
||||
bool unaligned_ctl_available(void)
|
||||
{
|
||||
return unaligned_ctl;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user