mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-14 06:24:53 +08:00
arm64: uaccess: remove set_fs()
Now that the uaccess primitives dont take addr_limit into account, we have no need to manipulate this via set_fs() and get_fs(). Remove support for these, along with some infrastructure this renders redundant. We no longer need to flip UAO to access kernel memory under KERNEL_DS, and head.S unconditionally clears UAO for all kernel configurations via an ERET in init_kernel_el. Thus, we don't need to dynamically flip UAO, nor do we need to context-switch it. However, we still need to adjust PAN during SDEI entry. Masking of __user pointers no longer needs to use the dynamic value of addr_limit, and can use a constant derived from the maximum possible userspace task size. A new TASK_SIZE_MAX constant is introduced for this, which is also used by core code. In configurations supporting 52-bit VAs, this may include a region of unusable VA space above a 48-bit TTBR0 limit, but never includes any portion of TTBR1. Note that TASK_SIZE_MAX is an exclusive limit, while USER_DS and KERNEL_DS were inclusive limits, and is converted to a mask by subtracting one. As the SDEI entry code repurposes the otherwise unnecessary pt_regs::orig_addr_limit field to store the TTBR1 of the interrupted context, for now we rename that to pt_regs::sdei_ttbr1. In future we can consider factoring that out. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: James Morse <james.morse@arm.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20201202131558.39270-10-mark.rutland@arm.com Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
This commit is contained in:
parent
7b90dc40e3
commit
3d2403fd10
@ -195,7 +195,6 @@ config ARM64
|
||||
select PCI_SYSCALL if PCI
|
||||
select POWER_RESET
|
||||
select POWER_SUPPLY
|
||||
select SET_FS
|
||||
select SPARSE_IRQ
|
||||
select SWIOTLB
|
||||
select SYSCTL_EXCEPTION_TRACE
|
||||
|
@ -10,6 +10,5 @@
|
||||
#include <linux/sched.h>
|
||||
|
||||
extern unsigned long arch_align_stack(unsigned long sp);
|
||||
void uao_thread_switch(struct task_struct *next);
|
||||
|
||||
#endif /* __ASM_EXEC_H */
|
||||
|
@ -8,9 +8,6 @@
|
||||
#ifndef __ASM_PROCESSOR_H
|
||||
#define __ASM_PROCESSOR_H
|
||||
|
||||
#define KERNEL_DS UL(-1)
|
||||
#define USER_DS ((UL(1) << VA_BITS) - 1)
|
||||
|
||||
/*
|
||||
* On arm64 systems, unaligned accesses by the CPU are cheap, and so there is
|
||||
* no point in shifting all network buffers by 2 bytes just to make some IP
|
||||
@ -48,6 +45,7 @@
|
||||
|
||||
#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN)
|
||||
#define TASK_SIZE_64 (UL(1) << vabits_actual)
|
||||
#define TASK_SIZE_MAX (UL(1) << VA_BITS)
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
#if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS)
|
||||
|
@ -193,8 +193,7 @@ struct pt_regs {
|
||||
s32 syscallno;
|
||||
u32 unused2;
|
||||
#endif
|
||||
|
||||
u64 orig_addr_limit;
|
||||
u64 sdei_ttbr1;
|
||||
/* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */
|
||||
u64 pmr_save;
|
||||
u64 stackframe[2];
|
||||
|
@ -18,14 +18,11 @@ struct task_struct;
|
||||
#include <asm/stack_pointer.h>
|
||||
#include <asm/types.h>
|
||||
|
||||
typedef unsigned long mm_segment_t;
|
||||
|
||||
/*
|
||||
* low level task data that entry.S needs immediate access to.
|
||||
*/
|
||||
struct thread_info {
|
||||
unsigned long flags; /* low level flags */
|
||||
mm_segment_t addr_limit; /* address limit */
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
u64 ttbr0; /* saved TTBR0_EL1 */
|
||||
#endif
|
||||
@ -119,7 +116,6 @@ void arch_release_task_struct(struct task_struct *tsk);
|
||||
{ \
|
||||
.flags = _TIF_FOREIGN_FPSTATE, \
|
||||
.preempt_count = INIT_PREEMPT_COUNT, \
|
||||
.addr_limit = KERNEL_DS, \
|
||||
INIT_SCS \
|
||||
}
|
||||
|
||||
|
@ -26,44 +26,16 @@
|
||||
|
||||
#define HAVE_GET_KERNEL_NOFAULT
|
||||
|
||||
#define get_fs() (current_thread_info()->addr_limit)
|
||||
|
||||
static inline void set_fs(mm_segment_t fs)
|
||||
{
|
||||
current_thread_info()->addr_limit = fs;
|
||||
|
||||
/*
|
||||
* Prevent a mispredicted conditional call to set_fs from forwarding
|
||||
* the wrong address limit to access_ok under speculation.
|
||||
*/
|
||||
spec_bar();
|
||||
|
||||
/* On user-mode return, check fs is correct */
|
||||
set_thread_flag(TIF_FSCHECK);
|
||||
|
||||
/*
|
||||
* Enable/disable UAO so that copy_to_user() etc can access
|
||||
* kernel memory with the unprivileged instructions.
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
|
||||
else
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
|
||||
CONFIG_ARM64_UAO));
|
||||
}
|
||||
|
||||
#define uaccess_kernel() (get_fs() == KERNEL_DS)
|
||||
|
||||
/*
|
||||
* Test whether a block of memory is a valid user space address.
|
||||
* Returns 1 if the range is valid, 0 otherwise.
|
||||
*
|
||||
* This is equivalent to the following test:
|
||||
* (u65)addr + (u65)size <= (u65)current->addr_limit + 1
|
||||
* (u65)addr + (u65)size <= (u65)TASK_SIZE_MAX
|
||||
*/
|
||||
static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
|
||||
{
|
||||
unsigned long ret, limit = current_thread_info()->addr_limit;
|
||||
unsigned long ret, limit = TASK_SIZE_MAX - 1;
|
||||
|
||||
/*
|
||||
* Asynchronous I/O running in a kernel thread does not have the
|
||||
@ -96,7 +68,6 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
|
||||
}
|
||||
|
||||
#define access_ok(addr, size) __range_ok(addr, size)
|
||||
#define user_addr_max get_fs
|
||||
|
||||
#define _ASM_EXTABLE(from, to) \
|
||||
" .pushsection __ex_table, \"a\"\n" \
|
||||
@ -226,9 +197,9 @@ static inline void uaccess_enable_not_uao(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* Sanitise a uaccess pointer such that it becomes NULL if above the
|
||||
* current addr_limit. In case the pointer is tagged (has the top byte set),
|
||||
* untag the pointer before checking.
|
||||
* Sanitise a uaccess pointer such that it becomes NULL if above the maximum
|
||||
* user address. In case the pointer is tagged (has the top byte set), untag
|
||||
* the pointer before checking.
|
||||
*/
|
||||
#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
|
||||
static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
|
||||
@ -239,7 +210,7 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
|
||||
" bics xzr, %3, %2\n"
|
||||
" csel %0, %1, xzr, eq\n"
|
||||
: "=&r" (safe_ptr)
|
||||
: "r" (ptr), "r" (current_thread_info()->addr_limit),
|
||||
: "r" (ptr), "r" (TASK_SIZE_MAX - 1),
|
||||
"r" (untagged_addr(ptr))
|
||||
: "cc");
|
||||
|
||||
|
@ -30,7 +30,6 @@ int main(void)
|
||||
BLANK();
|
||||
DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
|
||||
DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
|
||||
DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
|
||||
#ifdef CONFIG_ARM64_SW_TTBR0_PAN
|
||||
DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
|
||||
#endif
|
||||
@ -70,7 +69,7 @@ int main(void)
|
||||
DEFINE(S_PSTATE, offsetof(struct pt_regs, pstate));
|
||||
DEFINE(S_PC, offsetof(struct pt_regs, pc));
|
||||
DEFINE(S_SYSCALLNO, offsetof(struct pt_regs, syscallno));
|
||||
DEFINE(S_ORIG_ADDR_LIMIT, offsetof(struct pt_regs, orig_addr_limit));
|
||||
DEFINE(S_SDEI_TTBR1, offsetof(struct pt_regs, sdei_ttbr1));
|
||||
DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save));
|
||||
DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
|
||||
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
|
||||
|
@ -1777,10 +1777,6 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
.sys_reg = SYS_ID_AA64MMFR2_EL1,
|
||||
.field_pos = ID_AA64MMFR2_UAO_SHIFT,
|
||||
.min_field_value = 1,
|
||||
/*
|
||||
* We rely on stop_machine() calling uao_thread_switch() to set
|
||||
* UAO immediately after patching.
|
||||
*/
|
||||
},
|
||||
#endif /* CONFIG_ARM64_UAO */
|
||||
#ifdef CONFIG_ARM64_PAN
|
||||
|
@ -216,12 +216,6 @@ alternative_else_nop_endif
|
||||
.else
|
||||
add x21, sp, #S_FRAME_SIZE
|
||||
get_current_task tsk
|
||||
/* Save the task's original addr_limit and set USER_DS */
|
||||
ldr x20, [tsk, #TSK_TI_ADDR_LIMIT]
|
||||
str x20, [sp, #S_ORIG_ADDR_LIMIT]
|
||||
mov x20, #USER_DS
|
||||
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
|
||||
/* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
|
||||
.endif /* \el == 0 */
|
||||
mrs x22, elr_el1
|
||||
mrs x23, spsr_el1
|
||||
@ -279,12 +273,6 @@ alternative_else_nop_endif
|
||||
.macro kernel_exit, el
|
||||
.if \el != 0
|
||||
disable_daif
|
||||
|
||||
/* Restore the task's original addr_limit. */
|
||||
ldr x20, [sp, #S_ORIG_ADDR_LIMIT]
|
||||
str x20, [tsk, #TSK_TI_ADDR_LIMIT]
|
||||
|
||||
/* No need to restore UAO, it will be restored from SPSR_EL1 */
|
||||
.endif
|
||||
|
||||
/* Restore pmr */
|
||||
@ -999,10 +987,9 @@ SYM_CODE_START(__sdei_asm_entry_trampoline)
|
||||
mov x4, xzr
|
||||
|
||||
/*
|
||||
* Use reg->interrupted_regs.addr_limit to remember whether to unmap
|
||||
* the kernel on exit.
|
||||
* Remember whether to unmap the kernel on exit.
|
||||
*/
|
||||
1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
|
||||
1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
|
||||
|
||||
#ifdef CONFIG_RANDOMIZE_BASE
|
||||
adr x4, tramp_vectors + PAGE_SIZE
|
||||
@ -1023,7 +1010,7 @@ NOKPROBE(__sdei_asm_entry_trampoline)
|
||||
* x4: struct sdei_registered_event argument from registration time.
|
||||
*/
|
||||
SYM_CODE_START(__sdei_asm_exit_trampoline)
|
||||
ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
|
||||
ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)]
|
||||
cbnz x4, 1f
|
||||
|
||||
tramp_unmap_kernel tmp=x4
|
||||
|
@ -460,17 +460,6 @@ static void tls_thread_switch(struct task_struct *next)
|
||||
write_sysreg(*task_user_tls(next), tpidr_el0);
|
||||
}
|
||||
|
||||
/* Restore the UAO state depending on next's addr_limit */
|
||||
void uao_thread_switch(struct task_struct *next)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_ARM64_UAO)) {
|
||||
if (task_thread_info(next)->addr_limit == KERNEL_DS)
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
|
||||
else
|
||||
asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Force SSBS state on context-switch, since it may be lost after migrating
|
||||
* from a CPU which treats the bit as RES0 in a heterogeneous system.
|
||||
@ -554,7 +543,6 @@ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev,
|
||||
hw_breakpoint_thread_switch(next);
|
||||
contextidr_thread_switch(next);
|
||||
entry_task_switch(next);
|
||||
uao_thread_switch(next);
|
||||
ssbs_thread_switch(next);
|
||||
erratum_1418040_thread_switch(prev, next);
|
||||
|
||||
|
@ -242,15 +242,12 @@ asmlinkage __kprobes notrace unsigned long
|
||||
__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
|
||||
{
|
||||
unsigned long ret;
|
||||
mm_segment_t orig_addr_limit;
|
||||
|
||||
/*
|
||||
* We didn't take an exception to get here, so the HW hasn't
|
||||
* set/cleared bits in PSTATE that we may rely on. Initialize PAN, then
|
||||
* use force_uaccess_begin() to reset addr_limit.
|
||||
* set/cleared bits in PSTATE that we may rely on. Initialize PAN.
|
||||
*/
|
||||
__sdei_pstate_entry();
|
||||
orig_addr_limit = force_uaccess_begin();
|
||||
|
||||
nmi_enter();
|
||||
|
||||
@ -258,7 +255,5 @@ __sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
|
||||
|
||||
nmi_exit();
|
||||
|
||||
force_uaccess_end(orig_addr_limit);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -58,7 +58,6 @@ void notrace __cpu_suspend_exit(void)
|
||||
* features that might not have been set correctly.
|
||||
*/
|
||||
__uaccess_enable_hw_pan();
|
||||
uao_thread_switch(current);
|
||||
|
||||
/*
|
||||
* Restore HW breakpoint registers to sane values
|
||||
|
@ -479,11 +479,6 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
||||
}
|
||||
|
||||
if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
|
||||
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
|
||||
if (regs->orig_addr_limit == KERNEL_DS)
|
||||
die_kernel_fault("access to user memory with fs=KERNEL_DS",
|
||||
addr, esr, regs);
|
||||
|
||||
if (is_el1_instruction_abort(esr))
|
||||
die_kernel_fault("execution of user memory",
|
||||
addr, esr, regs);
|
||||
|
Loading…
Reference in New Issue
Block a user