From 6690e86be83ac75832e461c141055b5d601c0a6d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 14 Feb 2019 10:30:52 +0100 Subject: [PATCH 01/27] sched/x86: Save [ER]FLAGS on context switch Effectively reverts commit: 2c7577a75837 ("sched/x86_64: Don't save flags on context switch") Specifically because SMAP uses FLAGS.AC which invalidates the claim that the kernel has clean flags. In particular; while preemption from interrupt return is fine (the IRET frame on the exception stack contains FLAGS) it breaks any code that does synchonous scheduling, including preempt_enable(). This has become a significant issue ever since commit: 5b24a7a2aa20 ("Add 'unsafe' user access functions for batched accesses") provided for means of having 'normal' C code between STAC / CLAC, exposing the FLAGS.AC state. So far this hasn't led to trouble, however fix it before it comes apart. Reported-by: Julien Thierry Signed-off-by: Peter Zijlstra (Intel) Acked-by: Andy Lutomirski Cc: Borislav Petkov Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: stable@kernel.org Fixes: 5b24a7a2aa20 ("Add 'unsafe' user access functions for batched accesses") Signed-off-by: Ingo Molnar --- arch/x86/entry/entry_32.S | 2 ++ arch/x86/entry/entry_64.S | 2 ++ arch/x86/include/asm/switch_to.h | 1 + arch/x86/kernel/process_32.c | 7 +++++++ arch/x86/kernel/process_64.c | 8 ++++++++ 5 files changed, 20 insertions(+) diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index d309f30cf7af..5fc76b755510 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -650,6 +650,7 @@ ENTRY(__switch_to_asm) pushl %ebx pushl %edi pushl %esi + pushfl /* switch stack */ movl %esp, TASK_threadsp(%eax) @@ -672,6 +673,7 @@ ENTRY(__switch_to_asm) #endif /* restore callee-saved registers */ + popfl popl %esi popl %edi popl %ebx diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 1f0efdb7b629..4fe27b67d7e2 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -291,6 +291,7 @@ ENTRY(__switch_to_asm) pushq %r13 pushq %r14 pushq %r15 + pushfq /* switch stack */ movq %rsp, TASK_threadsp(%rdi) @@ -313,6 +314,7 @@ ENTRY(__switch_to_asm) #endif /* restore callee-saved registers */ + popfq popq %r15 popq %r14 popq %r13 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 7cf1a270d891..157149d4129c 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -40,6 +40,7 @@ asmlinkage void ret_from_fork(void); * order of the fields must match the code in __switch_to_asm(). */ struct inactive_task_frame { + unsigned long flags; #ifdef CONFIG_X86_64 unsigned long r15; unsigned long r14; diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index e471d8e6f0b2..70933193878c 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -127,6 +127,13 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, struct task_struct *tsk; int err; + /* + * For a new task use the RESET flags value since there is no before. + * All the status flags are zero; DF and all the system flags must also + * be 0, specifically IF must be 0 because we context switch to the new + * task with interrupts disabled. + */ + frame->flags = X86_EFLAGS_FIXED; frame->bp = 0; frame->ret_addr = (unsigned long) ret_from_fork; p->thread.sp = (unsigned long) fork_frame; diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 6a62f4af9fcf..026a43be9bd1 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -392,6 +392,14 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, childregs = task_pt_regs(p); fork_frame = container_of(childregs, struct fork_frame, regs); frame = &fork_frame->frame; + + /* + * For a new task use the RESET flags value since there is no before. + * All the status flags are zero; DF and all the system flags must also + * be 0, specifically IF must be 0 because we context switch to the new + * task with interrupts disabled. + */ + frame->flags = X86_EFLAGS_FIXED; frame->bp = 0; frame->ret_addr = (unsigned long) ret_from_fork; p->thread.sp = (unsigned long) fork_frame; From 37686b1353cfc30e127cef811959cdbcd0495d98 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Thu, 7 Mar 2019 11:48:02 -0600 Subject: [PATCH 02/27] tracing: Improve "if" macro code generation With CONFIG_PROFILE_ALL_BRANCHES=y, the "if" macro converts the conditional to an array index. This can cause GCC to create horrible code. When there are nested ifs, the generated code uses register values to encode branching decisions. Make it easier for GCC to optimize by keeping the conditional as a conditional rather than converting it to an integer. This shrinks the generated code quite a bit, and also makes the code sane enough for objtool to understand. Reported-by: Peter Zijlstra Signed-off-by: Josh Poimboeuf Signed-off-by: Peter Zijlstra (Intel) Cc: Borislav Petkov Cc: Linus Torvalds Cc: Thomas Gleixner Cc: brgerst@gmail.com Cc: catalin.marinas@arm.com Cc: dvlasenk@redhat.com Cc: dvyukov@google.com Cc: hpa@zytor.com Cc: james.morse@arm.com Cc: julien.thierry@arm.com Cc: luto@amacapital.net Cc: luto@kernel.org Cc: rostedt@goodmis.org Cc: valentin.schneider@arm.com Cc: will.deacon@arm.com Link: https://lkml.kernel.org/r/20190307174802.46fmpysxyo35hh43@treble Signed-off-by: Ingo Molnar --- include/linux/compiler.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 445348facea9..d58aa0db05f9 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -67,7 +67,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, .line = __LINE__, \ }; \ ______r = !!(cond); \ - ______f.miss_hit[______r]++; \ + ______r ? ______f.miss_hit[1]++ : ______f.miss_hit[0]++;\ ______r; \ })) #endif /* CONFIG_PROFILE_ALL_BRANCHES */ From 67a0514afdbb8b2fc70b771b8c77661a9cb9d3a9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 25 Feb 2019 12:56:35 +0100 Subject: [PATCH 03/27] x86/ia32: Fix ia32_restore_sigcontext() AC leak Objtool spotted that we call native_load_gs_index() with AC set. Re-arrange the code to avoid that. Signed-off-by: Peter Zijlstra (Intel) Cc: Borislav Petkov Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/ia32/ia32_signal.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c index 321fe5f5d0e9..4d5fcd47ab75 100644 --- a/arch/x86/ia32/ia32_signal.c +++ b/arch/x86/ia32/ia32_signal.c @@ -61,9 +61,8 @@ } while (0) #define RELOAD_SEG(seg) { \ - unsigned int pre = GET_SEG(seg); \ + unsigned int pre = (seg) | 3; \ unsigned int cur = get_user_seg(seg); \ - pre |= 3; \ if (pre != cur) \ set_user_seg(seg, pre); \ } @@ -72,6 +71,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, struct sigcontext_32 __user *sc) { unsigned int tmpflags, err = 0; + u16 gs, fs, es, ds; void __user *buf; u32 tmp; @@ -79,16 +79,10 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, current->restart_block.fn = do_no_restart_syscall; get_user_try { - /* - * Reload fs and gs if they have changed in the signal - * handler. This does not handle long fs/gs base changes in - * the handler, but does not clobber them at least in the - * normal case. - */ - RELOAD_SEG(gs); - RELOAD_SEG(fs); - RELOAD_SEG(ds); - RELOAD_SEG(es); + gs = GET_SEG(gs); + fs = GET_SEG(fs); + ds = GET_SEG(ds); + es = GET_SEG(es); COPY(di); COPY(si); COPY(bp); COPY(sp); COPY(bx); COPY(dx); COPY(cx); COPY(ip); COPY(ax); @@ -106,6 +100,17 @@ static int ia32_restore_sigcontext(struct pt_regs *regs, buf = compat_ptr(tmp); } get_user_catch(err); + /* + * Reload fs and gs if they have changed in the signal + * handler. This does not handle long fs/gs base changes in + * the handler, but does not clobber them at least in the + * normal case. + */ + RELOAD_SEG(gs); + RELOAD_SEG(fs); + RELOAD_SEG(ds); + RELOAD_SEG(es); + err |= fpu__restore_sig(buf, 1); force_iret(); From 8f4faed01e3015955801c8ef066ec7fd7a8b3902 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 28 Feb 2019 13:52:31 +0100 Subject: [PATCH 04/27] i915, uaccess: Fix redundant CLAC New tooling noticed this: drivers/gpu/drm/i915/i915_gem_execbuffer.o: warning: objtool: .altinstr_replacement+0x3c: redundant UACCESS disable drivers/gpu/drm/i915/i915_gem_execbuffer.o: warning: objtool: .altinstr_replacement+0x66: redundant UACCESS disable You don't need user_access_end() if user_access_begin() fails. Signed-off-by: Peter Zijlstra (Intel) Cc: Borislav Petkov Cc: Chris Wilson Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 02adcaf6ebea..16f80a448820 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1667,6 +1667,7 @@ static int eb_copy_relocations(const struct i915_execbuffer *eb) len)) { end_user: user_access_end(); +end: kvfree(relocs); err = -EFAULT; goto err; @@ -1686,7 +1687,7 @@ end_user: * relocations were valid. */ if (!user_access_begin(urelocs, size)) - goto end_user; + goto end; for (copied = 0; copied < nreloc; copied++) unsafe_put_user(-1, @@ -2695,7 +2696,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, * when we did the "copy_from_user()" above. */ if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list))) - goto end_user; + goto end; for (i = 0; i < args->buffer_count; i++) { if (!(exec2_list[i].offset & UPDATE)) @@ -2709,6 +2710,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, } end_user: user_access_end(); +end:; } args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS; From 3693ca81151eacd498675baae56abede577e8b31 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 1 Mar 2019 15:24:33 +0100 Subject: [PATCH 05/27] x86/uaccess: Move copy_user_handle_tail() into asm By writing the function in asm we avoid cross object code flow and objtool no longer gets confused about a 'stray' CLAC. Also; the asm version is actually _simpler_. Signed-off-by: Peter Zijlstra (Intel) Cc: Borislav Petkov Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/include/asm/asm.h | 24 ---------------- arch/x86/include/asm/uaccess_64.h | 3 -- arch/x86/lib/copy_user_64.S | 48 +++++++++++++++++++++++++++++++ arch/x86/lib/usercopy_64.c | 20 ------------- 4 files changed, 48 insertions(+), 47 deletions(-) diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 6467757bb39f..3ff577c0b102 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -148,30 +148,6 @@ _ASM_PTR (entry); \ .popsection -.macro ALIGN_DESTINATION - /* check for bad alignment of destination */ - movl %edi,%ecx - andl $7,%ecx - jz 102f /* already aligned */ - subl $8,%ecx - negl %ecx - subl %ecx,%edx -100: movb (%rsi),%al -101: movb %al,(%rdi) - incq %rsi - incq %rdi - decl %ecx - jnz 100b -102: - .section .fixup,"ax" -103: addl %ecx,%edx /* ecx is zerorest also */ - jmp copy_user_handle_tail - .previous - - _ASM_EXTABLE_UA(100b, 103b) - _ASM_EXTABLE_UA(101b, 103b) - .endm - #else # define _EXPAND_EXTABLE_HANDLE(x) #x # define _ASM_EXTABLE_HANDLE(from, to, handler) \ diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index a9d637bc301d..5cd1caa8bc65 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -207,9 +207,6 @@ __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size) return __copy_user_flushcache(dst, src, size); } -unsigned long -copy_user_handle_tail(char *to, char *from, unsigned len); - unsigned long mcsafe_handle_tail(char *to, char *from, unsigned len); diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S index db4e5aa0858b..b2f1822084ae 100644 --- a/arch/x86/lib/copy_user_64.S +++ b/arch/x86/lib/copy_user_64.S @@ -16,6 +16,30 @@ #include #include +.macro ALIGN_DESTINATION + /* check for bad alignment of destination */ + movl %edi,%ecx + andl $7,%ecx + jz 102f /* already aligned */ + subl $8,%ecx + negl %ecx + subl %ecx,%edx +100: movb (%rsi),%al +101: movb %al,(%rdi) + incq %rsi + incq %rdi + decl %ecx + jnz 100b +102: + .section .fixup,"ax" +103: addl %ecx,%edx /* ecx is zerorest also */ + jmp copy_user_handle_tail + .previous + + _ASM_EXTABLE_UA(100b, 103b) + _ASM_EXTABLE_UA(101b, 103b) + .endm + /* * copy_user_generic_unrolled - memory copy with exception handling. * This version is for CPUs like P4 that don't have efficient micro @@ -193,6 +217,30 @@ ENTRY(copy_user_enhanced_fast_string) ENDPROC(copy_user_enhanced_fast_string) EXPORT_SYMBOL(copy_user_enhanced_fast_string) +/* + * Try to copy last bytes and clear the rest if needed. + * Since protection fault in copy_from/to_user is not a normal situation, + * it is not necessary to optimize tail handling. + * + * Input: + * rdi destination + * rsi source + * rdx count + * + * Output: + * eax uncopied bytes or 0 if successful. + */ +ALIGN; +copy_user_handle_tail: + movl %edx,%ecx +1: rep movsb +2: mov %ecx,%eax + ASM_CLAC + ret + + _ASM_EXTABLE_UA(1b, 2b) +ENDPROC(copy_user_handle_tail) + /* * copy_user_nocache - Uncached memory copy with exception handling * This will force destination out of cache for more performance. diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index ee42bb0cbeb3..9952a01cad24 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -54,26 +54,6 @@ unsigned long clear_user(void __user *to, unsigned long n) } EXPORT_SYMBOL(clear_user); -/* - * Try to copy last bytes and clear the rest if needed. - * Since protection fault in copy_from/to_user is not a normal situation, - * it is not necessary to optimize tail handling. - */ -__visible unsigned long -copy_user_handle_tail(char *to, char *from, unsigned len) -{ - for (; len; --len, to++) { - char c; - - if (__get_user_nocheck(c, from++, sizeof(char))) - break; - if (__put_user_nocheck(c, to, sizeof(char))) - break; - } - clac(); - return len; -} - /* * Similar to copy_user_handle_tail, probe for the write fault point, * but reuse __memcpy_mcsafe in case a new read error is encountered. From b69656fa7ea2f75e47d7bd5b9430359fa46488af Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 3 Apr 2019 09:39:45 +0200 Subject: [PATCH 06/27] x86/uaccess: Fix up the fixup New tooling got confused about this: arch/x86/lib/memcpy_64.o: warning: objtool: .fixup+0x7: return with UACCESS enabled While the code isn't wrong, it is tedious (if at all possible) to figure out what function a particular chunk of .fixup belongs to. This then confuses the objtool uaccess validation. Instead of returning directly from the .fixup, jump back into the right function. Signed-off-by: Peter Zijlstra (Intel) Cc: Borislav Petkov Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/lib/memcpy_64.S | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S index 3b24dc05251c..9d05572370ed 100644 --- a/arch/x86/lib/memcpy_64.S +++ b/arch/x86/lib/memcpy_64.S @@ -257,6 +257,7 @@ ENTRY(__memcpy_mcsafe) /* Copy successful. Return zero */ .L_done_memcpy_trap: xorl %eax, %eax +.L_done: ret ENDPROC(__memcpy_mcsafe) EXPORT_SYMBOL_GPL(__memcpy_mcsafe) @@ -273,7 +274,7 @@ EXPORT_SYMBOL_GPL(__memcpy_mcsafe) addl %edx, %ecx .E_trailing_bytes: mov %ecx, %eax - ret + jmp .L_done /* * For write fault handling, given the destination is unaligned, From ff05ab2305aaeb21a3002ae95a17e176c198b71b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 18 Mar 2019 14:33:07 +0100 Subject: [PATCH 07/27] x86/nospec, objtool: Introduce ANNOTATE_IGNORE_ALTERNATIVE To facillitate other usage of ignoring alternatives; rename ANNOTATE_NOSPEC_IGNORE to ANNOTATE_IGNORE_ALTERNATIVE. Signed-off-by: Peter Zijlstra (Intel) Cc: Borislav Petkov Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/include/asm/alternative-asm.h | 11 ++++++++++ arch/x86/include/asm/alternative.h | 10 +++++++++ arch/x86/include/asm/nospec-branch.h | 28 +++++++++----------------- tools/objtool/check.c | 8 ++++---- 4 files changed, 34 insertions(+), 23 deletions(-) diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h index 31b627b43a8e..464034db299f 100644 --- a/arch/x86/include/asm/alternative-asm.h +++ b/arch/x86/include/asm/alternative-asm.h @@ -19,6 +19,17 @@ .endm #endif +/* + * objtool annotation to ignore the alternatives and only consider the original + * instruction(s). + */ +.macro ANNOTATE_IGNORE_ALTERNATIVE + .Lannotate_\@: + .pushsection .discard.ignore_alts + .long .Lannotate_\@ - . + .popsection +.endm + /* * Issue one struct alt_instr descriptor entry (need to put it into * the section .altinstructions, see below). This entry contains diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h index 4c74073a19cc..094fbc9c0b1c 100644 --- a/arch/x86/include/asm/alternative.h +++ b/arch/x86/include/asm/alternative.h @@ -45,6 +45,16 @@ #define LOCK_PREFIX "" #endif +/* + * objtool annotation to ignore the alternatives and only consider the original + * instruction(s). + */ +#define ANNOTATE_IGNORE_ALTERNATIVE \ + "999:\n\t" \ + ".pushsection .discard.ignore_alts\n\t" \ + ".long 999b - .\n\t" \ + ".popsection\n\t" + struct alt_instr { s32 instr_offset; /* original instruction */ s32 repl_offset; /* offset to replacement instruction */ diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index dad12b767ba0..daf25b60c9e3 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -10,6 +10,15 @@ #include #include +/* + * This should be used immediately before a retpoline alternative. It tells + * objtool where the retpolines are so that it can make sense of the control + * flow by just reading the original instruction(s) and ignoring the + * alternatives. + */ +#define ANNOTATE_NOSPEC_ALTERNATIVE \ + ANNOTATE_IGNORE_ALTERNATIVE + /* * Fill the CPU return stack buffer. * @@ -56,19 +65,6 @@ #ifdef __ASSEMBLY__ -/* - * This should be used immediately before a retpoline alternative. It tells - * objtool where the retpolines are so that it can make sense of the control - * flow by just reading the original instruction(s) and ignoring the - * alternatives. - */ -.macro ANNOTATE_NOSPEC_ALTERNATIVE - .Lannotate_\@: - .pushsection .discard.nospec - .long .Lannotate_\@ - . - .popsection -.endm - /* * This should be used immediately before an indirect jump/call. It tells * objtool the subsequent indirect jump/call is vouched safe for retpoline @@ -152,12 +148,6 @@ #else /* __ASSEMBLY__ */ -#define ANNOTATE_NOSPEC_ALTERNATIVE \ - "999:\n\t" \ - ".pushsection .discard.nospec\n\t" \ - ".long 999b - .\n\t" \ - ".popsection\n\t" - #define ANNOTATE_RETPOLINE_SAFE \ "999:\n\t" \ ".pushsection .discard.retpoline_safe\n\t" \ diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 5dde107083c6..110ea3d84772 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -457,13 +457,13 @@ static void add_ignores(struct objtool_file *file) * But it at least allows objtool to understand the control flow *around* the * retpoline. */ -static int add_nospec_ignores(struct objtool_file *file) +static int add_ignore_alternatives(struct objtool_file *file) { struct section *sec; struct rela *rela; struct instruction *insn; - sec = find_section_by_name(file->elf, ".rela.discard.nospec"); + sec = find_section_by_name(file->elf, ".rela.discard.ignore_alts"); if (!sec) return 0; @@ -475,7 +475,7 @@ static int add_nospec_ignores(struct objtool_file *file) insn = find_insn(file, rela->sym->sec, rela->addend); if (!insn) { - WARN("bad .discard.nospec entry"); + WARN("bad .discard.ignore_alts entry"); return -1; } @@ -1239,7 +1239,7 @@ static int decode_sections(struct objtool_file *file) add_ignores(file); - ret = add_nospec_ignores(file); + ret = add_ignore_alternatives(file); if (ret) return ret; From 4fc0f0e9471ec573ab2d44e7f462d996d614536e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 6 Mar 2019 19:09:08 +0100 Subject: [PATCH 08/27] x86/uaccess, xen: Suppress SMAP warnings drivers/xen/privcmd.o: warning: objtool: privcmd_ioctl()+0x1414: call to hypercall_page() with UACCESS enabled Some Xen hypercalls allow parameter buffers in user land, so they need to set AC=1. Avoid the warning for those cases. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Juergen Gross Cc: Borislav Petkov Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: andrew.cooper3@citrix.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/xen/hypercall.h | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index de6f0d59a24f..580a909afad4 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h @@ -214,6 +214,22 @@ xen_single_call(unsigned int call, return (long)__res; } +static __always_inline void __xen_stac(void) +{ + /* + * Suppress objtool seeing the STAC/CLAC and getting confused about it + * calling random code with AC=1. + */ + asm volatile(ANNOTATE_IGNORE_ALTERNATIVE + ASM_STAC ::: "memory", "flags"); +} + +static __always_inline void __xen_clac(void) +{ + asm volatile(ANNOTATE_IGNORE_ALTERNATIVE + ASM_CLAC ::: "memory", "flags"); +} + static inline long privcmd_call(unsigned int call, unsigned long a1, unsigned long a2, @@ -222,9 +238,9 @@ privcmd_call(unsigned int call, { long res; - stac(); + __xen_stac(); res = xen_single_call(call, a1, a2, a3, a4, a5); - clac(); + __xen_clac(); return res; } @@ -421,9 +437,9 @@ HYPERVISOR_dm_op( domid_t dom, unsigned int nr_bufs, struct xen_dm_op_buf *bufs) { int ret; - stac(); + __xen_stac(); ret = _hypercall3(int, dm_op, dom, nr_bufs, bufs); - clac(); + __xen_clac(); return ret; } From b7f89bfe52cd523a229d6dd22639225b2e3681dc Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 3 Apr 2019 09:39:47 +0200 Subject: [PATCH 09/27] x86/uaccess: Always inline user_access_begin() If GCC out-of-lines it, the STAC and CLAC are in different fuctions and objtool gets upset. Signed-off-by: Peter Zijlstra (Intel) Cc: Borislav Petkov Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uaccess.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 1954dd5552a2..ae5783b5fab0 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -705,7 +705,7 @@ extern struct movsl_mask { * checking before using them, but you have to surround them with the * user_access_begin/end() pair. */ -static __must_check inline bool user_access_begin(const void __user *ptr, size_t len) +static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len) { if (unlikely(!access_ok(ptr,len))) return 0; From 88e4718275c1bddca6f61f300688b4553dc8584b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 3 Apr 2019 09:39:48 +0200 Subject: [PATCH 10/27] x86/uaccess, signal: Fix AC=1 bloat Occasionally GCC is less agressive with inlining and the following is observed: arch/x86/kernel/signal.o: warning: objtool: restore_sigcontext()+0x3cc: call to force_valid_ss.isra.5() with UACCESS enabled arch/x86/kernel/signal.o: warning: objtool: do_signal()+0x384: call to frame_uc_flags.isra.0() with UACCESS enabled Cure this by moving this code out of the AC=1 region, since it really isn't needed for the user access. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Andy Lutomirski Cc: Borislav Petkov Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/kernel/signal.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index 08dfd4c1a4f9..c8aa58a2bab9 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -132,16 +132,6 @@ static int restore_sigcontext(struct pt_regs *regs, COPY_SEG_CPL3(cs); COPY_SEG_CPL3(ss); -#ifdef CONFIG_X86_64 - /* - * Fix up SS if needed for the benefit of old DOSEMU and - * CRIU. - */ - if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && - user_64bit_mode(regs))) - force_valid_ss(regs); -#endif - get_user_ex(tmpflags, &sc->flags); regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); regs->orig_ax = -1; /* disable syscall checks */ @@ -150,6 +140,15 @@ static int restore_sigcontext(struct pt_regs *regs, buf = (void __user *)buf_val; } get_user_catch(err); +#ifdef CONFIG_X86_64 + /* + * Fix up SS if needed for the benefit of old DOSEMU and + * CRIU. + */ + if (unlikely(!(uc_flags & UC_STRICT_RESTORE_SS) && user_64bit_mode(regs))) + force_valid_ss(regs); +#endif + err |= fpu__restore_sig(buf, IS_ENABLED(CONFIG_X86_32)); force_iret(); @@ -461,6 +460,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, { struct rt_sigframe __user *frame; void __user *fp = NULL; + unsigned long uc_flags; int err = 0; frame = get_sigframe(&ksig->ka, regs, sizeof(struct rt_sigframe), &fp); @@ -473,9 +473,11 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig, return -EFAULT; } + uc_flags = frame_uc_flags(regs); + put_user_try { /* Create the ucontext. */ - put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags); + put_user_ex(uc_flags, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); save_altstack_ex(&frame->uc.uc_stack, regs->sp); @@ -541,6 +543,7 @@ static int x32_setup_rt_frame(struct ksignal *ksig, { #ifdef CONFIG_X86_X32_ABI struct rt_sigframe_x32 __user *frame; + unsigned long uc_flags; void __user *restorer; int err = 0; void __user *fpstate = NULL; @@ -555,9 +558,11 @@ static int x32_setup_rt_frame(struct ksignal *ksig, return -EFAULT; } + uc_flags = frame_uc_flags(regs); + put_user_try { /* Create the ucontext. */ - put_user_ex(frame_uc_flags(regs), &frame->uc.uc_flags); + put_user_ex(uc_flags, &frame->uc.uc_flags); put_user_ex(0, &frame->uc.uc_link); compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp); put_user_ex(0, &frame->uc.uc__pad0); From e74deb11931ff682b59d5b9d387f7115f689698e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 3 Apr 2019 09:39:48 +0200 Subject: [PATCH 11/27] x86/uaccess: Introduce user_access_{save,restore}() Introduce common helpers for when we need to safely suspend a uaccess section; for instance to generate a {KA,UB}SAN report. Signed-off-by: Peter Zijlstra (Intel) Cc: Borislav Petkov Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/include/asm/smap.h | 20 ++++++++++++++++++++ arch/x86/include/asm/uaccess.h | 3 +++ include/linux/uaccess.h | 2 ++ 3 files changed, 25 insertions(+) diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h index db333300bd4b..6cfe43171020 100644 --- a/arch/x86/include/asm/smap.h +++ b/arch/x86/include/asm/smap.h @@ -58,6 +58,23 @@ static __always_inline void stac(void) alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP); } +static __always_inline unsigned long smap_save(void) +{ + unsigned long flags; + + asm volatile (ALTERNATIVE("", "pushf; pop %0; " __stringify(__ASM_CLAC), + X86_FEATURE_SMAP) + : "=rm" (flags) : : "memory", "cc"); + + return flags; +} + +static __always_inline void smap_restore(unsigned long flags) +{ + asm volatile (ALTERNATIVE("", "push %0; popf", X86_FEATURE_SMAP) + : : "g" (flags) : "memory", "cc"); +} + /* These macros can be used in asm() statements */ #define ASM_CLAC \ ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP) @@ -69,6 +86,9 @@ static __always_inline void stac(void) static inline void clac(void) { } static inline void stac(void) { } +static inline unsigned long smap_save(void) { return 0; } +static inline void smap_restore(unsigned long flags) { } + #define ASM_CLAC #define ASM_STAC diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index ae5783b5fab0..5ca7b91faf67 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -715,6 +715,9 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt #define user_access_begin(a,b) user_access_begin(a,b) #define user_access_end() __uaccess_end() +#define user_access_save() smap_save() +#define user_access_restore(x) smap_restore(x) + #define unsafe_put_user(x, ptr, label) \ __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 37b226e8df13..2b70130af585 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -268,6 +268,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); #define user_access_end() do { } while (0) #define unsafe_get_user(x, ptr, err) do { if (unlikely(__get_user(x, ptr))) goto err; } while (0) #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) +static inline unsigned long user_access_save(void) { return 0UL; } +static inline void user_access_restore(unsigned long flags) { } #endif #ifdef CONFIG_HARDENED_USERCOPY From a936af8ea3580855adcc80daa8e01c0196afeb15 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 7 Mar 2019 14:44:44 +0100 Subject: [PATCH 12/27] x86/smap: Ditch __stringify() Linus noticed all users of __ASM_STAC/__ASM_CLAC are with __stringify(). Just make them a string. Suggested-by: Linus Torvalds Signed-off-by: Peter Zijlstra (Intel) Cc: Borislav Petkov Cc: Josh Poimboeuf Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/include/asm/smap.h | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h index 6cfe43171020..f94a7d0ddd49 100644 --- a/arch/x86/include/asm/smap.h +++ b/arch/x86/include/asm/smap.h @@ -13,13 +13,12 @@ #ifndef _ASM_X86_SMAP_H #define _ASM_X86_SMAP_H -#include #include #include /* "Raw" instruction opcodes */ -#define __ASM_CLAC .byte 0x0f,0x01,0xca -#define __ASM_STAC .byte 0x0f,0x01,0xcb +#define __ASM_CLAC ".byte 0x0f,0x01,0xca" +#define __ASM_STAC ".byte 0x0f,0x01,0xcb" #ifdef __ASSEMBLY__ @@ -28,10 +27,10 @@ #ifdef CONFIG_X86_SMAP #define ASM_CLAC \ - ALTERNATIVE "", __stringify(__ASM_CLAC), X86_FEATURE_SMAP + ALTERNATIVE "", __ASM_CLAC, X86_FEATURE_SMAP #define ASM_STAC \ - ALTERNATIVE "", __stringify(__ASM_STAC), X86_FEATURE_SMAP + ALTERNATIVE "", __ASM_STAC, X86_FEATURE_SMAP #else /* CONFIG_X86_SMAP */ @@ -49,20 +48,20 @@ static __always_inline void clac(void) { /* Note: a barrier is implicit in alternative() */ - alternative("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP); + alternative("", __ASM_CLAC, X86_FEATURE_SMAP); } static __always_inline void stac(void) { /* Note: a barrier is implicit in alternative() */ - alternative("", __stringify(__ASM_STAC), X86_FEATURE_SMAP); + alternative("", __ASM_STAC, X86_FEATURE_SMAP); } static __always_inline unsigned long smap_save(void) { unsigned long flags; - asm volatile (ALTERNATIVE("", "pushf; pop %0; " __stringify(__ASM_CLAC), + asm volatile (ALTERNATIVE("", "pushf; pop %0; " __ASM_CLAC, X86_FEATURE_SMAP) : "=rm" (flags) : : "memory", "cc"); @@ -77,9 +76,9 @@ static __always_inline void smap_restore(unsigned long flags) /* These macros can be used in asm() statements */ #define ASM_CLAC \ - ALTERNATIVE("", __stringify(__ASM_CLAC), X86_FEATURE_SMAP) + ALTERNATIVE("", __ASM_CLAC, X86_FEATURE_SMAP) #define ASM_STAC \ - ALTERNATIVE("", __stringify(__ASM_STAC), X86_FEATURE_SMAP) + ALTERNATIVE("", __ASM_STAC, X86_FEATURE_SMAP) #else /* CONFIG_X86_SMAP */ From 57b78a62e7f23c4686fe54091cdc3d12e60d6513 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 3 Apr 2019 09:39:50 +0200 Subject: [PATCH 13/27] x86/uaccess, kasan: Fix KASAN vs SMAP KASAN inserts extra code for every LOAD/STORE emitted by te compiler. Much of this code is simple and safe to run with AC=1, however the kasan_report() function, called on error, is most certainly not safe to call with AC=1. Therefore wrap kasan_report() in user_access_{save,restore}; which for x86 SMAP, saves/restores EFLAGS and clears AC before calling the real function. Also ensure all the functions are without __fentry__ hook. The function tracer is also not safe. Signed-off-by: Peter Zijlstra (Intel) Cc: Borislav Petkov Cc: Dmitry Vyukov Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- mm/kasan/Makefile | 3 +++ mm/kasan/common.c | 10 ++++++++++ mm/kasan/report.c | 3 +-- 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index 5d1065efbd47..613dfe681e9f 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile @@ -2,11 +2,13 @@ KASAN_SANITIZE := n UBSAN_SANITIZE_common.o := n UBSAN_SANITIZE_generic.o := n +UBSAN_SANITIZE_generic_report.o := n UBSAN_SANITIZE_tags.o := n KCOV_INSTRUMENT := n CFLAGS_REMOVE_common.o = -pg CFLAGS_REMOVE_generic.o = -pg +CFLAGS_REMOVE_generic_report.o = -pg CFLAGS_REMOVE_tags.o = -pg # Function splitter causes unnecessary splits in __asan_load1/__asan_store1 @@ -14,6 +16,7 @@ CFLAGS_REMOVE_tags.o = -pg CFLAGS_common.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) CFLAGS_generic.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) +CFLAGS_generic_report.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) CFLAGS_tags.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) obj-$(CONFIG_KASAN) := common.o init.o report.o diff --git a/mm/kasan/common.c b/mm/kasan/common.c index 80bbe62b16cd..09c586474511 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -36,6 +36,7 @@ #include #include #include +#include #include "kasan.h" #include "../slab.h" @@ -614,6 +615,15 @@ void kasan_free_shadow(const struct vm_struct *vm) vfree(kasan_mem_to_shadow(vm->addr)); } +extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip); + +void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) +{ + unsigned long flags = user_access_save(); + __kasan_report(addr, size, is_write, ip); + user_access_restore(flags); +} + #ifdef CONFIG_MEMORY_HOTPLUG static bool shadow_mapped(unsigned long addr) { diff --git a/mm/kasan/report.c b/mm/kasan/report.c index ca9418fe9232..0772820ad098 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -281,8 +281,7 @@ void kasan_report_invalid_free(void *object, unsigned long ip) end_report(&flags); } -void kasan_report(unsigned long addr, size_t size, - bool is_write, unsigned long ip) +void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip) { struct kasan_access_info info; void *tagged_addr; From d08965a27e84ca090b504844d50c24fc98587b11 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 3 Apr 2019 09:40:16 +0200 Subject: [PATCH 14/27] x86/uaccess, ubsan: Fix UBSAN vs. SMAP UBSAN can insert extra code in random locations; including AC=1 sections. Typically this code is not safe and needs wrapping. So far, only __ubsan_handle_type_mismatch* have been observed in AC=1 sections and therefore only those are annotated. Signed-off-by: Peter Zijlstra (Intel) Cc: Borislav Petkov Cc: Dmitry Vyukov Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- lib/Makefile | 1 + lib/ubsan.c | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/lib/Makefile b/lib/Makefile index 3b08673e8881..c4563e7c47ba 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -268,6 +268,7 @@ obj-$(CONFIG_UCS2_STRING) += ucs2_string.o obj-$(CONFIG_UBSAN) += ubsan.o UBSAN_SANITIZE_ubsan.o := n +CFLAGS_ubsan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) obj-$(CONFIG_SBITMAP) += sbitmap.o diff --git a/lib/ubsan.c b/lib/ubsan.c index e4162f59a81c..c8e905bfb627 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "ubsan.h" @@ -313,6 +314,7 @@ static void handle_object_size_mismatch(struct type_mismatch_data_common *data, static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data, unsigned long ptr) { + unsigned long flags = user_access_save(); if (!ptr) handle_null_ptr_deref(data); @@ -320,6 +322,8 @@ static void ubsan_type_mismatch_common(struct type_mismatch_data_common *data, handle_misaligned_access(data, ptr); else handle_object_size_mismatch(data, ptr); + + user_access_restore(flags); } void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, From 4a6c91fbdef846ec7250b82f2eeeb87ac5f18cf9 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 7 Mar 2019 11:09:13 +0100 Subject: [PATCH 15/27] x86/uaccess, ftrace: Fix ftrace_likely_update() vs. SMAP For CONFIG_TRACE_BRANCH_PROFILING=y the likely/unlikely things get overloaded and generate callouts to this code, and thus also when AC=1. Make it safe. Signed-off-by: Peter Zijlstra (Intel) Cc: Borislav Petkov Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Rostedt Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- kernel/trace/trace_branch.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/trace/trace_branch.c b/kernel/trace/trace_branch.c index 4ad967453b6f..3ea65cdff30d 100644 --- a/kernel/trace/trace_branch.c +++ b/kernel/trace/trace_branch.c @@ -205,6 +205,8 @@ void trace_likely_condition(struct ftrace_likely_data *f, int val, int expect) void ftrace_likely_update(struct ftrace_likely_data *f, int val, int expect, int is_constant) { + unsigned long flags = user_access_save(); + /* A constant is always correct */ if (is_constant) { f->constant++; @@ -223,6 +225,8 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, f->data.correct++; else f->data.incorrect++; + + user_access_restore(flags); } EXPORT_SYMBOL(ftrace_likely_update); From 40ea97290b08be2e038b31cbb33097d1145e8169 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 7 Mar 2019 19:54:25 +0100 Subject: [PATCH 16/27] x86/uaccess, kcov: Disable stack protector New tooling noticed this mishap: kernel/kcov.o: warning: objtool: write_comp_data()+0x138: call to __stack_chk_fail() with UACCESS enabled kernel/kcov.o: warning: objtool: __sanitizer_cov_trace_pc()+0xd9: call to __stack_chk_fail() with UACCESS enabled All the other instrumentation (KASAN,UBSAN) also have stack protector disabled. Signed-off-by: Peter Zijlstra (Intel) Cc: Borislav Petkov Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- kernel/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/Makefile b/kernel/Makefile index 6c57e78817da..62471e75a2b0 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -30,6 +30,7 @@ KCOV_INSTRUMENT_extable.o := n # Don't self-instrument. KCOV_INSTRUMENT_kcov.o := n KASAN_SANITIZE_kcov.o := n +CFLAGS_kcov.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) # cond_syscall is currently not LTO compatible CFLAGS_sys_ni.o = $(DISABLE_LTO) From a4d09dde9093a04a9b48fb9e5ef3177bdfaff199 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 25 Feb 2019 10:31:24 +0100 Subject: [PATCH 17/27] objtool: Set insn->func for alternatives In preparation of function attributes, we need each instruction to have a valid link back to its function. Therefore make sure we set the function association for alternative instruction sequences; they are, after all, still part of the function. Signed-off-by: Peter Zijlstra (Intel) Acked-by: Josh Poimboeuf Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- tools/objtool/check.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 110ea3d84772..950d0f62d22b 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -695,6 +695,7 @@ static int handle_group_alt(struct objtool_file *file, last_new_insn = insn; insn->ignore = orig_insn->ignore_alts; + insn->func = orig_insn->func; if (insn->type != INSN_JUMP_CONDITIONAL && insn->type != INSN_JUMP_UNCONDITIONAL) From 09f30d83d33029faf6377a86f5ae80a658af9254 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 28 Feb 2019 14:17:50 +0100 Subject: [PATCH 18/27] objtool: Handle function aliases Function aliases result in different symbols for the same set of instructions; track a canonical symbol so there is a unique point of access. This again prepares the way for function attributes. And in particular the need for aliases comes from how KASAN uses them. Signed-off-by: Peter Zijlstra (Intel) Acked-by: Josh Poimboeuf Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- tools/objtool/elf.c | 15 +++++++++++---- tools/objtool/elf.h | 2 +- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index b8f3cca8e58b..dd198d53387d 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -219,7 +219,7 @@ static int read_sections(struct elf *elf) static int read_symbols(struct elf *elf) { struct section *symtab, *sec; - struct symbol *sym, *pfunc; + struct symbol *sym, *pfunc, *alias; struct list_head *entry, *tmp; int symbols_nr, i; char *coldstr; @@ -239,6 +239,7 @@ static int read_symbols(struct elf *elf) return -1; } memset(sym, 0, sizeof(*sym)); + alias = sym; sym->idx = i; @@ -288,11 +289,17 @@ static int read_symbols(struct elf *elf) break; } - if (sym->offset == s->offset && sym->len >= s->len) { - entry = tmp; - break; + if (sym->offset == s->offset) { + if (sym->len == s->len && alias == sym) + alias = s; + + if (sym->len >= s->len) { + entry = tmp; + break; + } } } + sym->alias = alias; list_add(&sym->list, entry); hash_add(sym->sec->symbol_hash, &sym->hash, sym->idx); } diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h index bc97ed86b9cd..968265b4b4cd 100644 --- a/tools/objtool/elf.h +++ b/tools/objtool/elf.h @@ -61,7 +61,7 @@ struct symbol { unsigned char bind, type; unsigned long offset; unsigned int len; - struct symbol *pfunc, *cfunc; + struct symbol *pfunc, *cfunc, *alias; }; struct rela { From aaf5c623b915d64beba676b8c2e9708d1fda94d6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 27 Feb 2019 14:04:13 +0100 Subject: [PATCH 19/27] objtool: Rewrite add_ignores() The whole add_ignores() thing was wildly weird; rewrite it according to 'modern' ways. Signed-off-by: Peter Zijlstra (Intel) Acked-by: Josh Poimboeuf Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- tools/objtool/check.c | 53 +++++++++++++++++-------------------------- tools/objtool/check.h | 1 - 2 files changed, 21 insertions(+), 33 deletions(-) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 950d0f62d22b..8d8191f25381 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -104,29 +104,6 @@ static struct instruction *next_insn_same_func(struct objtool_file *file, for (insn = next_insn_same_sec(file, insn); insn; \ insn = next_insn_same_sec(file, insn)) -/* - * Check if the function has been manually whitelisted with the - * STACK_FRAME_NON_STANDARD macro, or if it should be automatically whitelisted - * due to its use of a context switching instruction. - */ -static bool ignore_func(struct objtool_file *file, struct symbol *func) -{ - struct rela *rela; - - /* check for STACK_FRAME_NON_STANDARD */ - if (file->whitelist && file->whitelist->rela) - list_for_each_entry(rela, &file->whitelist->rela->rela_list, list) { - if (rela->sym->type == STT_SECTION && - rela->sym->sec == func->sec && - rela->addend == func->offset) - return true; - if (rela->sym->type == STT_FUNC && rela->sym == func) - return true; - } - - return false; -} - /* * This checks to see if the given function is a "noreturn" function. * @@ -436,18 +413,31 @@ static void add_ignores(struct objtool_file *file) struct instruction *insn; struct section *sec; struct symbol *func; + struct rela *rela; - for_each_sec(file, sec) { - list_for_each_entry(func, &sec->symbol_list, list) { - if (func->type != STT_FUNC) + sec = find_section_by_name(file->elf, ".rela.discard.func_stack_frame_non_standard"); + if (!sec) + return; + + list_for_each_entry(rela, &sec->rela_list, list) { + switch (rela->sym->type) { + case STT_FUNC: + func = rela->sym; + break; + + case STT_SECTION: + func = find_symbol_by_offset(rela->sym->sec, rela->addend); + if (!func || func->type != STT_FUNC) continue; + break; - if (!ignore_func(file, func)) - continue; - - func_for_each_insn_all(file, func, insn) - insn->ignore = true; + default: + WARN("unexpected relocation symbol type in %s: %d", sec->name, rela->sym->type); + continue; } + + func_for_each_insn_all(file, func, insn) + insn->ignore = true; } } @@ -2199,7 +2189,6 @@ int check(const char *_objname, bool orc) INIT_LIST_HEAD(&file.insn_list); hash_init(file.insn_hash); - file.whitelist = find_section_by_name(file.elf, ".discard.func_stack_frame_non_standard"); file.c_file = find_section_by_name(file.elf, ".comment"); file.ignore_unreachables = no_unreachable; file.hints = false; diff --git a/tools/objtool/check.h b/tools/objtool/check.h index e6e8a655b556..d8896eb43521 100644 --- a/tools/objtool/check.h +++ b/tools/objtool/check.h @@ -60,7 +60,6 @@ struct objtool_file { struct elf *elf; struct list_head insn_list; DECLARE_HASHTABLE(insn_hash, 16); - struct section *whitelist; bool ignore_unreachables, c_file, hints, rodata; }; From 7697eee3ddd768a1fd78c1e687afaa6c5aa5072d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 1 Mar 2019 11:15:49 +0100 Subject: [PATCH 20/27] objtool: Add --backtrace support For when you want to know the path that reached your fail state: $ ./objtool check --no-fp --backtrace arch/x86/lib/usercopy_64.o arch/x86/lib/usercopy_64.o: warning: objtool: .altinstr_replacement+0x3: UACCESS disable without MEMOPs: __clear_user() arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x3a: (alt) arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x2e: (branch) arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x18: (branch) arch/x86/lib/usercopy_64.o: warning: objtool: .altinstr_replacement+0xffffffffffffffff: (branch) arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x5: (alt) arch/x86/lib/usercopy_64.o: warning: objtool: __clear_user()+0x0: <=== (func) 0000000000000000 <__clear_user>: 0: e8 00 00 00 00 callq 5 <__clear_user+0x5> 1: R_X86_64_PLT32 __fentry__-0x4 5: 90 nop 6: 90 nop 7: 90 nop 8: 48 89 f0 mov %rsi,%rax b: 48 c1 ee 03 shr $0x3,%rsi f: 83 e0 07 and $0x7,%eax 12: 48 89 f1 mov %rsi,%rcx 15: 48 85 c9 test %rcx,%rcx 18: 74 0f je 29 <__clear_user+0x29> 1a: 48 c7 07 00 00 00 00 movq $0x0,(%rdi) 21: 48 83 c7 08 add $0x8,%rdi 25: ff c9 dec %ecx 27: 75 f1 jne 1a <__clear_user+0x1a> 29: 48 89 c1 mov %rax,%rcx 2c: 85 c9 test %ecx,%ecx 2e: 74 0a je 3a <__clear_user+0x3a> 30: c6 07 00 movb $0x0,(%rdi) 33: 48 ff c7 inc %rdi 36: ff c9 dec %ecx 38: 75 f6 jne 30 <__clear_user+0x30> 3a: 90 nop 3b: 90 nop 3c: 90 nop 3d: 48 89 c8 mov %rcx,%rax 40: c3 retq Signed-off-by: Peter Zijlstra (Intel) Acked-by: Josh Poimboeuf Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- tools/objtool/builtin-check.c | 3 ++- tools/objtool/builtin.h | 2 +- tools/objtool/check.c | 18 ++++++++++++++---- tools/objtool/warn.h | 8 ++++++++ 4 files changed, 25 insertions(+), 6 deletions(-) diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c index 694abc628e9b..99f10c585cbe 100644 --- a/tools/objtool/builtin-check.c +++ b/tools/objtool/builtin-check.c @@ -29,7 +29,7 @@ #include "builtin.h" #include "check.h" -bool no_fp, no_unreachable, retpoline, module; +bool no_fp, no_unreachable, retpoline, module, backtrace; static const char * const check_usage[] = { "objtool check [] file.o", @@ -41,6 +41,7 @@ const struct option check_options[] = { OPT_BOOLEAN('u', "no-unreachable", &no_unreachable, "Skip 'unreachable instruction' warnings"), OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"), OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"), + OPT_BOOLEAN('b', "backtrace", &backtrace, "unwind on error"), OPT_END(), }; diff --git a/tools/objtool/builtin.h b/tools/objtool/builtin.h index 28ff40e19a14..65fd3cc3c98b 100644 --- a/tools/objtool/builtin.h +++ b/tools/objtool/builtin.h @@ -20,7 +20,7 @@ #include extern const struct option check_options[]; -extern bool no_fp, no_unreachable, retpoline, module; +extern bool no_fp, no_unreachable, retpoline, module, backtrace; extern int cmd_check(int argc, const char **argv); extern int cmd_orc(int argc, const char **argv); diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 8d8191f25381..ccc66af5907f 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -1885,8 +1885,11 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, if (!insn->ignore_alts) { list_for_each_entry(alt, &insn->alts, list) { ret = validate_branch(file, alt->insn, state); - if (ret) - return 1; + if (ret) { + if (backtrace) + BT_FUNC("(alt)", insn); + return ret; + } } } @@ -1933,8 +1936,11 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, insn->jump_dest->func->pfunc == func)) { ret = validate_branch(file, insn->jump_dest, state); - if (ret) - return 1; + if (ret) { + if (backtrace) + BT_FUNC("(branch)", insn); + return ret; + } } else if (func && has_modified_stack_frame(&state)) { WARN_FUNC("sibling call from callable instruction with modified stack frame", @@ -2005,6 +2011,8 @@ static int validate_unwind_hints(struct objtool_file *file) for_each_insn(file, insn) { if (insn->hint && !insn->visited) { ret = validate_branch(file, insn, state); + if (ret && backtrace) + BT_FUNC("<=== (hint)", insn); warnings += ret; } } @@ -2133,6 +2141,8 @@ static int validate_functions(struct objtool_file *file) continue; ret = validate_branch(file, insn, state); + if (ret && backtrace) + BT_FUNC("<=== (func)", insn); warnings += ret; } } diff --git a/tools/objtool/warn.h b/tools/objtool/warn.h index afd9f7a05f6d..f4fbb972b611 100644 --- a/tools/objtool/warn.h +++ b/tools/objtool/warn.h @@ -64,6 +64,14 @@ static inline char *offstr(struct section *sec, unsigned long offset) free(_str); \ }) +#define BT_FUNC(format, insn, ...) \ +({ \ + struct instruction *_insn = (insn); \ + char *_str = offstr(_insn->sec, _insn->offset); \ + WARN(" %s: " format, _str, ##__VA_ARGS__); \ + free(_str); \ +}) + #define WARN_ELF(format, ...) \ WARN(format ": %s", ##__VA_ARGS__, elf_errmsg(-1)) From 764eef4b109ae11e6c987de9c14fc7c482041be0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 1 Mar 2019 11:19:03 +0100 Subject: [PATCH 21/27] objtool: Rewrite alt->skip_orig Really skip the original instruction flow, instead of letting it continue with NOPs. Since the alternative code flow already continues after the original instructions, only the alt-original is skipped. Signed-off-by: Peter Zijlstra (Intel) Acked-by: Josh Poimboeuf Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- tools/objtool/check.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index ccc66af5907f..5264a305d658 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -31,6 +31,7 @@ struct alternative { struct list_head list; struct instruction *insn; + bool skip_orig; }; const char *objname; @@ -623,9 +624,6 @@ static int add_call_destinations(struct objtool_file *file) * conditionally jumps to the _end_ of the entry. We have to modify these * jumps' destinations to point back to .text rather than the end of the * entry in .altinstr_replacement. - * - * 4. It has been requested that we don't validate the !POPCNT feature path - * which is a "very very small percentage of machines". */ static int handle_group_alt(struct objtool_file *file, struct special_alt *special_alt, @@ -641,9 +639,6 @@ static int handle_group_alt(struct objtool_file *file, if (insn->offset >= special_alt->orig_off + special_alt->orig_len) break; - if (special_alt->skip_orig) - insn->type = INSN_NOP; - insn->alt_group = true; last_orig_insn = insn; } @@ -808,6 +803,7 @@ static int add_special_section_alts(struct objtool_file *file) } alt->insn = new_insn; + alt->skip_orig = special_alt->skip_orig; list_add_tail(&alt->list, &orig_insn->alts); list_del(&special_alt->list); @@ -1883,7 +1879,12 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, insn->visited = true; if (!insn->ignore_alts) { + bool skip_orig = false; + list_for_each_entry(alt, &insn->alts, list) { + if (alt->skip_orig) + skip_orig = true; + ret = validate_branch(file, alt->insn, state); if (ret) { if (backtrace) @@ -1891,6 +1892,9 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, return ret; } } + + if (skip_orig) + return 0; } switch (insn->type) { From 54262aa2830151f89699fa8a6c5aa05f0992e672 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 6 Mar 2019 12:58:15 +0100 Subject: [PATCH 22/27] objtool: Fix sibling call detection It turned out that we failed to detect some sibling calls; specifically those without relocation records; like: $ ./objdump-func.sh defconfig-build/mm/kasan/generic.o __asan_loadN 0000 0000000000000840 <__asan_loadN>: 0000 840: 48 8b 0c 24 mov (%rsp),%rcx 0004 844: 31 d2 xor %edx,%edx 0006 846: e9 45 fe ff ff jmpq 690 So extend the cross-function jump to also consider those that are not between known (or newly detected) parent/child functions, as sibling-cals when they jump to the start of the function. The second part of that condition is to deal with random jumps to the middle of other function, as can be found in arch/x86/lib/copy_user_64.S for example. This then (with later patches applied) makes the above recognise the sibling call: mm/kasan/generic.o: warning: objtool: __asan_loadN()+0x6: call to check_memory_region() with UACCESS enabled Also make sure to set insn->call_dest for sibling calls so we can know who we're calling. This is useful information when printing validation warnings later. Signed-off-by: Peter Zijlstra (Intel) Acked-by: Josh Poimboeuf Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- tools/objtool/check.c | 86 +++++++++++++++++++++++++++---------------- 1 file changed, 55 insertions(+), 31 deletions(-) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 5264a305d658..8118361295dd 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -515,7 +515,8 @@ static int add_jump_destinations(struct objtool_file *file) continue; } else { /* sibling call */ - insn->jump_dest = 0; + insn->call_dest = rela->sym; + insn->jump_dest = NULL; continue; } @@ -537,25 +538,38 @@ static int add_jump_destinations(struct objtool_file *file) } /* - * For GCC 8+, create parent/child links for any cold - * subfunctions. This is _mostly_ redundant with a similar - * initialization in read_symbols(). - * - * If a function has aliases, we want the *first* such function - * in the symbol table to be the subfunction's parent. In that - * case we overwrite the initialization done in read_symbols(). - * - * However this code can't completely replace the - * read_symbols() code because this doesn't detect the case - * where the parent function's only reference to a subfunction - * is through a switch table. + * Cross-function jump. */ if (insn->func && insn->jump_dest->func && - insn->func != insn->jump_dest->func && - !strstr(insn->func->name, ".cold.") && - strstr(insn->jump_dest->func->name, ".cold.")) { - insn->func->cfunc = insn->jump_dest->func; - insn->jump_dest->func->pfunc = insn->func; + insn->func != insn->jump_dest->func) { + + /* + * For GCC 8+, create parent/child links for any cold + * subfunctions. This is _mostly_ redundant with a + * similar initialization in read_symbols(). + * + * If a function has aliases, we want the *first* such + * function in the symbol table to be the subfunction's + * parent. In that case we overwrite the + * initialization done in read_symbols(). + * + * However this code can't completely replace the + * read_symbols() code because this doesn't detect the + * case where the parent function's only reference to a + * subfunction is through a switch table. + */ + if (!strstr(insn->func->name, ".cold.") && + strstr(insn->jump_dest->func->name, ".cold.")) { + insn->func->cfunc = insn->jump_dest->func; + insn->jump_dest->func->pfunc = insn->func; + + } else if (insn->jump_dest->func->pfunc != insn->func->pfunc && + insn->jump_dest->offset == insn->jump_dest->func->offset) { + + /* sibling class */ + insn->call_dest = insn->jump_dest->func; + insn->jump_dest = NULL; + } } } @@ -1785,6 +1799,17 @@ static bool insn_state_match(struct instruction *insn, struct insn_state *state) return false; } +static int validate_sibling_call(struct instruction *insn, struct insn_state *state) +{ + if (has_modified_stack_frame(state)) { + WARN_FUNC("sibling call from callable instruction with modified stack frame", + insn->sec, insn->offset); + return 1; + } + + return 0; +} + /* * Follow the branch starting at the given instruction, and recursively follow * any other branches (jumps). Meanwhile, track the frame pointer state at @@ -1935,9 +1960,14 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, case INSN_JUMP_CONDITIONAL: case INSN_JUMP_UNCONDITIONAL: - if (insn->jump_dest && - (!func || !insn->jump_dest->func || - insn->jump_dest->func->pfunc == func)) { + if (func && !insn->jump_dest) { + ret = validate_sibling_call(insn, &state); + if (ret) + return ret; + + } else if (insn->jump_dest && + (!func || !insn->jump_dest->func || + insn->jump_dest->func->pfunc == func)) { ret = validate_branch(file, insn->jump_dest, state); if (ret) { @@ -1945,11 +1975,6 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, BT_FUNC("(branch)", insn); return ret; } - - } else if (func && has_modified_stack_frame(&state)) { - WARN_FUNC("sibling call from callable instruction with modified stack frame", - sec, insn->offset); - return 1; } if (insn->type == INSN_JUMP_UNCONDITIONAL) @@ -1958,11 +1983,10 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, break; case INSN_JUMP_DYNAMIC: - if (func && list_empty(&insn->alts) && - has_modified_stack_frame(&state)) { - WARN_FUNC("sibling call from callable instruction with modified stack frame", - sec, insn->offset); - return 1; + if (func && list_empty(&insn->alts)) { + ret = validate_sibling_call(insn, &state); + if (ret) + return ret; } return 0; From ea24213d8088f9da73e1b6aadf7abd2435b70397 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 25 Feb 2019 12:50:09 +0100 Subject: [PATCH 23/27] objtool: Add UACCESS validation It is important that UACCESS regions are as small as possible; furthermore the UACCESS state is not scheduled, so doing anything that might directly call into the scheduler will cause random code to be ran with UACCESS enabled. Teach objtool too track UACCESS state and warn about any CALL made while UACCESS is enabled. This very much includes the __fentry__() and __preempt_schedule() calls. Note that exceptions _do_ save/restore the UACCESS state, and therefore they can drive preemption. This also means that all exception handlers must have an otherwise redundant UACCESS disable instruction; therefore ignore this warning for !STT_FUNC code (exception handlers are not normal functions). Signed-off-by: Peter Zijlstra (Intel) Acked-by: Josh Poimboeuf Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- scripts/Makefile.build | 3 + tools/objtool/arch.h | 6 +- tools/objtool/arch/x86/decode.c | 13 ++- tools/objtool/builtin-check.c | 3 +- tools/objtool/builtin.h | 2 +- tools/objtool/check.c | 199 +++++++++++++++++++++++++++++--- tools/objtool/check.h | 3 +- tools/objtool/elf.h | 1 + tools/objtool/special.c | 18 +++ tools/objtool/special.h | 1 + 10 files changed, 227 insertions(+), 22 deletions(-) diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 76ca30cc4791..0c5969fa795f 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -222,6 +222,9 @@ endif ifdef CONFIG_RETPOLINE objtool_args += --retpoline endif +ifdef CONFIG_X86_SMAP + objtool_args += --uaccess +endif # 'OBJECT_FILES_NON_STANDARD := y': skip objtool checking for a directory # 'OBJECT_FILES_NON_STANDARD_foo.o := 'y': skip objtool checking for a file diff --git a/tools/objtool/arch.h b/tools/objtool/arch.h index b0d7dc3d71b5..467c2fe798a9 100644 --- a/tools/objtool/arch.h +++ b/tools/objtool/arch.h @@ -33,7 +33,9 @@ #define INSN_STACK 8 #define INSN_BUG 9 #define INSN_NOP 10 -#define INSN_OTHER 11 +#define INSN_STAC 11 +#define INSN_CLAC 12 +#define INSN_OTHER 13 #define INSN_LAST INSN_OTHER enum op_dest_type { @@ -41,6 +43,7 @@ enum op_dest_type { OP_DEST_REG_INDIRECT, OP_DEST_MEM, OP_DEST_PUSH, + OP_DEST_PUSHF, OP_DEST_LEAVE, }; @@ -55,6 +58,7 @@ enum op_src_type { OP_SRC_REG_INDIRECT, OP_SRC_CONST, OP_SRC_POP, + OP_SRC_POPF, OP_SRC_ADD, OP_SRC_AND, }; diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 540a209b78ab..ab20a96fee50 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -357,19 +357,26 @@ int arch_decode_instruction(struct elf *elf, struct section *sec, /* pushf */ *type = INSN_STACK; op->src.type = OP_SRC_CONST; - op->dest.type = OP_DEST_PUSH; + op->dest.type = OP_DEST_PUSHF; break; case 0x9d: /* popf */ *type = INSN_STACK; - op->src.type = OP_SRC_POP; + op->src.type = OP_SRC_POPF; op->dest.type = OP_DEST_MEM; break; case 0x0f: - if (op2 >= 0x80 && op2 <= 0x8f) { + if (op2 == 0x01) { + + if (modrm == 0xca) + *type = INSN_CLAC; + else if (modrm == 0xcb) + *type = INSN_STAC; + + } else if (op2 >= 0x80 && op2 <= 0x8f) { *type = INSN_JUMP_CONDITIONAL; diff --git a/tools/objtool/builtin-check.c b/tools/objtool/builtin-check.c index 99f10c585cbe..f3b378126011 100644 --- a/tools/objtool/builtin-check.c +++ b/tools/objtool/builtin-check.c @@ -29,7 +29,7 @@ #include "builtin.h" #include "check.h" -bool no_fp, no_unreachable, retpoline, module, backtrace; +bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess; static const char * const check_usage[] = { "objtool check [] file.o", @@ -42,6 +42,7 @@ const struct option check_options[] = { OPT_BOOLEAN('r', "retpoline", &retpoline, "Validate retpoline assumptions"), OPT_BOOLEAN('m', "module", &module, "Indicates the object will be part of a kernel module"), OPT_BOOLEAN('b', "backtrace", &backtrace, "unwind on error"), + OPT_BOOLEAN('a', "uaccess", &uaccess, "enable uaccess checking"), OPT_END(), }; diff --git a/tools/objtool/builtin.h b/tools/objtool/builtin.h index 65fd3cc3c98b..69762f9c5602 100644 --- a/tools/objtool/builtin.h +++ b/tools/objtool/builtin.h @@ -20,7 +20,7 @@ #include extern const struct option check_options[]; -extern bool no_fp, no_unreachable, retpoline, module, backtrace; +extern bool no_fp, no_unreachable, retpoline, module, backtrace, uaccess; extern int cmd_check(int argc, const char **argv); extern int cmd_orc(int argc, const char **argv); diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 8118361295dd..965e954e07f4 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -442,6 +442,82 @@ static void add_ignores(struct objtool_file *file) } } +/* + * This is a whitelist of functions that is allowed to be called with AC set. + * The list is meant to be minimal and only contains compiler instrumentation + * ABI and a few functions used to implement *_{to,from}_user() functions. + * + * These functions must not directly change AC, but may PUSHF/POPF. + */ +static const char *uaccess_safe_builtin[] = { + /* KASAN */ + "kasan_report", + "check_memory_region", + /* KASAN out-of-line */ + "__asan_loadN_noabort", + "__asan_load1_noabort", + "__asan_load2_noabort", + "__asan_load4_noabort", + "__asan_load8_noabort", + "__asan_load16_noabort", + "__asan_storeN_noabort", + "__asan_store1_noabort", + "__asan_store2_noabort", + "__asan_store4_noabort", + "__asan_store8_noabort", + "__asan_store16_noabort", + /* KASAN in-line */ + "__asan_report_load_n_noabort", + "__asan_report_load1_noabort", + "__asan_report_load2_noabort", + "__asan_report_load4_noabort", + "__asan_report_load8_noabort", + "__asan_report_load16_noabort", + "__asan_report_store_n_noabort", + "__asan_report_store1_noabort", + "__asan_report_store2_noabort", + "__asan_report_store4_noabort", + "__asan_report_store8_noabort", + "__asan_report_store16_noabort", + /* KCOV */ + "write_comp_data", + "__sanitizer_cov_trace_pc", + "__sanitizer_cov_trace_const_cmp1", + "__sanitizer_cov_trace_const_cmp2", + "__sanitizer_cov_trace_const_cmp4", + "__sanitizer_cov_trace_const_cmp8", + "__sanitizer_cov_trace_cmp1", + "__sanitizer_cov_trace_cmp2", + "__sanitizer_cov_trace_cmp4", + "__sanitizer_cov_trace_cmp8", + /* UBSAN */ + "ubsan_type_mismatch_common", + "__ubsan_handle_type_mismatch", + "__ubsan_handle_type_mismatch_v1", + /* misc */ + "csum_partial_copy_generic", + "__memcpy_mcsafe", + "ftrace_likely_update", /* CONFIG_TRACE_BRANCH_PROFILING */ + NULL +}; + +static void add_uaccess_safe(struct objtool_file *file) +{ + struct symbol *func; + const char **name; + + if (!uaccess) + return; + + for (name = uaccess_safe_builtin; *name; name++) { + func = find_symbol_by_name(file->elf, *name); + if (!func) + continue; + + func->alias->uaccess_safe = true; + } +} + /* * FIXME: For now, just ignore any alternatives which add retpolines. This is * a temporary hack, as it doesn't allow ORC to unwind from inside a retpoline. @@ -818,6 +894,7 @@ static int add_special_section_alts(struct objtool_file *file) alt->insn = new_insn; alt->skip_orig = special_alt->skip_orig; + orig_insn->ignore_alts |= special_alt->skip_alt; list_add_tail(&alt->list, &orig_insn->alts); list_del(&special_alt->list); @@ -1239,6 +1316,7 @@ static int decode_sections(struct objtool_file *file) return ret; add_ignores(file); + add_uaccess_safe(file); ret = add_ignore_alternatives(file); if (ret) @@ -1320,11 +1398,11 @@ static int update_insn_state_regs(struct instruction *insn, struct insn_state *s return 0; /* push */ - if (op->dest.type == OP_DEST_PUSH) + if (op->dest.type == OP_DEST_PUSH || op->dest.type == OP_DEST_PUSHF) cfa->offset += 8; /* pop */ - if (op->src.type == OP_SRC_POP) + if (op->src.type == OP_SRC_POP || op->src.type == OP_SRC_POPF) cfa->offset -= 8; /* add immediate to sp */ @@ -1581,6 +1659,7 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state) break; case OP_SRC_POP: + case OP_SRC_POPF: if (!state->drap && op->dest.type == OP_DEST_REG && op->dest.reg == cfa->base) { @@ -1645,6 +1724,7 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state) break; case OP_DEST_PUSH: + case OP_DEST_PUSHF: state->stack_size += 8; if (cfa->base == CFI_SP) cfa->offset += 8; @@ -1735,7 +1815,7 @@ static int update_insn_state(struct instruction *insn, struct insn_state *state) break; case OP_DEST_MEM: - if (op->src.type != OP_SRC_POP) { + if (op->src.type != OP_SRC_POP && op->src.type != OP_SRC_POPF) { WARN_FUNC("unknown stack-related memory operation", insn->sec, insn->offset); return -1; @@ -1799,6 +1879,33 @@ static bool insn_state_match(struct instruction *insn, struct insn_state *state) return false; } +static inline bool func_uaccess_safe(struct symbol *func) +{ + if (func) + return func->alias->uaccess_safe; + + return false; +} + +static inline const char *insn_dest_name(struct instruction *insn) +{ + if (insn->call_dest) + return insn->call_dest->name; + + return "{dynamic}"; +} + +static int validate_call(struct instruction *insn, struct insn_state *state) +{ + if (state->uaccess && !func_uaccess_safe(insn->call_dest)) { + WARN_FUNC("call to %s() with UACCESS enabled", + insn->sec, insn->offset, insn_dest_name(insn)); + return 1; + } + + return 0; +} + static int validate_sibling_call(struct instruction *insn, struct insn_state *state) { if (has_modified_stack_frame(state)) { @@ -1807,7 +1914,7 @@ static int validate_sibling_call(struct instruction *insn, struct insn_state *st return 1; } - return 0; + return validate_call(insn, state); } /* @@ -1855,7 +1962,9 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, if (!insn->hint && !insn_state_match(insn, &state)) return 1; - return 0; + /* If we were here with AC=0, but now have AC=1, go again */ + if (insn->state.uaccess || !state.uaccess) + return 0; } if (insn->hint) { @@ -1925,6 +2034,16 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, switch (insn->type) { case INSN_RETURN: + if (state.uaccess && !func_uaccess_safe(func)) { + WARN_FUNC("return with UACCESS enabled", sec, insn->offset); + return 1; + } + + if (!state.uaccess && func_uaccess_safe(func)) { + WARN_FUNC("return with UACCESS disabled from a UACCESS-safe function", sec, insn->offset); + return 1; + } + if (func && has_modified_stack_frame(&state)) { WARN_FUNC("return with modified stack frame", sec, insn->offset); @@ -1940,17 +2059,22 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, return 0; case INSN_CALL: - if (is_fentry_call(insn)) - break; - - ret = dead_end_function(file, insn->call_dest); - if (ret == 1) - return 0; - if (ret == -1) - return 1; - - /* fallthrough */ case INSN_CALL_DYNAMIC: + ret = validate_call(insn, &state); + if (ret) + return ret; + + if (insn->type == INSN_CALL) { + if (is_fentry_call(insn)) + break; + + ret = dead_end_function(file, insn->call_dest); + if (ret == 1) + return 0; + if (ret == -1) + return 1; + } + if (!no_fp && func && !has_valid_stack_frame(&state)) { WARN_FUNC("call without frame pointer save/setup", sec, insn->offset); @@ -2003,6 +2127,49 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, if (update_insn_state(insn, &state)) return 1; + if (insn->stack_op.dest.type == OP_DEST_PUSHF) { + if (!state.uaccess_stack) { + state.uaccess_stack = 1; + } else if (state.uaccess_stack >> 31) { + WARN_FUNC("PUSHF stack exhausted", sec, insn->offset); + return 1; + } + state.uaccess_stack <<= 1; + state.uaccess_stack |= state.uaccess; + } + + if (insn->stack_op.src.type == OP_SRC_POPF) { + if (state.uaccess_stack) { + state.uaccess = state.uaccess_stack & 1; + state.uaccess_stack >>= 1; + if (state.uaccess_stack == 1) + state.uaccess_stack = 0; + } + } + + break; + + case INSN_STAC: + if (state.uaccess) { + WARN_FUNC("recursive UACCESS enable", sec, insn->offset); + return 1; + } + + state.uaccess = true; + break; + + case INSN_CLAC: + if (!state.uaccess && insn->func) { + WARN_FUNC("redundant UACCESS disable", sec, insn->offset); + return 1; + } + + if (func_uaccess_safe(func) && !state.uaccess_stack) { + WARN_FUNC("UACCESS-safe disables UACCESS", sec, insn->offset); + return 1; + } + + state.uaccess = false; break; default: @@ -2168,6 +2335,8 @@ static int validate_functions(struct objtool_file *file) if (!insn || insn->ignore) continue; + state.uaccess = func->alias->uaccess_safe; + ret = validate_branch(file, insn, state); if (ret && backtrace) BT_FUNC("<=== (func)", insn); diff --git a/tools/objtool/check.h b/tools/objtool/check.h index d8896eb43521..78a95d06c165 100644 --- a/tools/objtool/check.h +++ b/tools/objtool/check.h @@ -31,7 +31,8 @@ struct insn_state { int stack_size; unsigned char type; bool bp_scratch; - bool drap, end; + bool drap, end, uaccess; + unsigned int uaccess_stack; int drap_reg, drap_offset; struct cfi_reg vals[CFI_NUM_REGS]; }; diff --git a/tools/objtool/elf.h b/tools/objtool/elf.h index 968265b4b4cd..2cc2ed49322d 100644 --- a/tools/objtool/elf.h +++ b/tools/objtool/elf.h @@ -62,6 +62,7 @@ struct symbol { unsigned long offset; unsigned int len; struct symbol *pfunc, *cfunc, *alias; + bool uaccess_safe; }; struct rela { diff --git a/tools/objtool/special.c b/tools/objtool/special.c index 50af4e1274b3..4e50563d87c6 100644 --- a/tools/objtool/special.c +++ b/tools/objtool/special.c @@ -23,6 +23,7 @@ #include #include +#include "builtin.h" #include "special.h" #include "warn.h" @@ -42,6 +43,7 @@ #define ALT_NEW_LEN_OFFSET 11 #define X86_FEATURE_POPCNT (4*32+23) +#define X86_FEATURE_SMAP (9*32+20) struct special_entry { const char *sec; @@ -110,6 +112,22 @@ static int get_alt_entry(struct elf *elf, struct special_entry *entry, */ if (feature == X86_FEATURE_POPCNT) alt->skip_orig = true; + + /* + * If UACCESS validation is enabled; force that alternative; + * otherwise force it the other way. + * + * What we want to avoid is having both the original and the + * alternative code flow at the same time, in that case we can + * find paths that see the STAC but take the NOP instead of + * CLAC and the other way around. + */ + if (feature == X86_FEATURE_SMAP) { + if (uaccess) + alt->skip_orig = true; + else + alt->skip_alt = true; + } } orig_rela = find_rela_by_dest(sec, offset + entry->orig); diff --git a/tools/objtool/special.h b/tools/objtool/special.h index fad1d092f679..d5c062e718ef 100644 --- a/tools/objtool/special.h +++ b/tools/objtool/special.h @@ -26,6 +26,7 @@ struct special_alt { bool group; bool skip_orig; + bool skip_alt; bool jump_or_nop; struct section *orig_sec; From 2f0f9e9ad7b3459c5c54ef2c03145a98e65dd158 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 25 Feb 2019 11:10:55 +0100 Subject: [PATCH 24/27] objtool: Add Direction Flag validation Having DF escape is BAD(tm). Linus; you suggested this one, but since DF really is only used from ASM and the failure case is fairly obvious, do we really need this? OTOH the patch is fairly small and simple, so let's just do this to demonstrate objtool's superior awesomeness. Suggested-by: Linus Torvalds Signed-off-by: Peter Zijlstra (Intel) Acked-by: Josh Poimboeuf Cc: Borislav Petkov Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- tools/objtool/arch.h | 4 +++- tools/objtool/arch/x86/decode.c | 8 ++++++++ tools/objtool/check.c | 25 +++++++++++++++++++++++++ tools/objtool/check.h | 2 +- 4 files changed, 37 insertions(+), 2 deletions(-) diff --git a/tools/objtool/arch.h b/tools/objtool/arch.h index 467c2fe798a9..7a111a77b7aa 100644 --- a/tools/objtool/arch.h +++ b/tools/objtool/arch.h @@ -35,7 +35,9 @@ #define INSN_NOP 10 #define INSN_STAC 11 #define INSN_CLAC 12 -#define INSN_OTHER 13 +#define INSN_STD 13 +#define INSN_CLD 14 +#define INSN_OTHER 15 #define INSN_LAST INSN_OTHER enum op_dest_type { diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index ab20a96fee50..472e991f6512 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -451,6 +451,14 @@ int arch_decode_instruction(struct elf *elf, struct section *sec, *type = INSN_CALL; break; + case 0xfc: + *type = INSN_CLD; + break; + + case 0xfd: + *type = INSN_STD; + break; + case 0xff: if (modrm_reg == 2 || modrm_reg == 3) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 965e954e07f4..38b0517dc49e 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -1903,6 +1903,12 @@ static int validate_call(struct instruction *insn, struct insn_state *state) return 1; } + if (state->df) { + WARN_FUNC("call to %s() with DF set", + insn->sec, insn->offset, insn_dest_name(insn)); + return 1; + } + return 0; } @@ -2044,6 +2050,11 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, return 1; } + if (state.df) { + WARN_FUNC("return with DF set", sec, insn->offset); + return 1; + } + if (func && has_modified_stack_frame(&state)) { WARN_FUNC("return with modified stack frame", sec, insn->offset); @@ -2172,6 +2183,20 @@ static int validate_branch(struct objtool_file *file, struct instruction *first, state.uaccess = false; break; + case INSN_STD: + if (state.df) + WARN_FUNC("recursive STD", sec, insn->offset); + + state.df = true; + break; + + case INSN_CLD: + if (!state.df && insn->func) + WARN_FUNC("redundant CLD", sec, insn->offset); + + state.df = false; + break; + default: break; } diff --git a/tools/objtool/check.h b/tools/objtool/check.h index 78a95d06c165..71e54f97dbcd 100644 --- a/tools/objtool/check.h +++ b/tools/objtool/check.h @@ -31,7 +31,7 @@ struct insn_state { int stack_size; unsigned char type; bool bp_scratch; - bool drap, end, uaccess; + bool drap, end, uaccess, df; unsigned int uaccess_stack; int drap_reg, drap_offset; struct cfi_reg vals[CFI_NUM_REGS]; From 64604d54d3115fee89598bfb6d8d2252f8a2d114 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 19 Mar 2019 11:35:46 +0100 Subject: [PATCH 25/27] sched/x86_64: Don't save flags on context switch Now that we have objtool validating AC=1 state for all x86_64 code, we can once again guarantee clean flags on schedule. Signed-off-by: Peter Zijlstra (Intel) Cc: Borislav Petkov Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/entry/entry_64.S | 2 -- arch/x86/include/asm/switch_to.h | 2 +- arch/x86/kernel/process_64.c | 7 ------- 3 files changed, 1 insertion(+), 10 deletions(-) diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 4fe27b67d7e2..1f0efdb7b629 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -291,7 +291,6 @@ ENTRY(__switch_to_asm) pushq %r13 pushq %r14 pushq %r15 - pushfq /* switch stack */ movq %rsp, TASK_threadsp(%rdi) @@ -314,7 +313,6 @@ ENTRY(__switch_to_asm) #endif /* restore callee-saved registers */ - popfq popq %r15 popq %r14 popq %r13 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h index 157149d4129c..18a4b6890fa8 100644 --- a/arch/x86/include/asm/switch_to.h +++ b/arch/x86/include/asm/switch_to.h @@ -40,13 +40,13 @@ asmlinkage void ret_from_fork(void); * order of the fields must match the code in __switch_to_asm(). */ struct inactive_task_frame { - unsigned long flags; #ifdef CONFIG_X86_64 unsigned long r15; unsigned long r14; unsigned long r13; unsigned long r12; #else + unsigned long flags; unsigned long si; unsigned long di; #endif diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 026a43be9bd1..844a28b29967 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -393,13 +393,6 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp, fork_frame = container_of(childregs, struct fork_frame, regs); frame = &fork_frame->frame; - /* - * For a new task use the RESET flags value since there is no before. - * All the status flags are zero; DF and all the system flags must also - * be 0, specifically IF must be 0 because we context switch to the new - * task with interrupts disabled. - */ - frame->flags = X86_EFLAGS_FIXED; frame->bp = 0; frame->ret_addr = (unsigned long) ret_from_fork; p->thread.sp = (unsigned long) fork_frame; From 6ae865615fc43d014da2fd1f1bba7e81ee622d1b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 24 Apr 2019 09:19:24 +0200 Subject: [PATCH 26/27] x86/uaccess: Dont leak the AC flag into __put_user() argument evaluation The __put_user() macro evaluates it's @ptr argument inside the __uaccess_begin() / __uaccess_end() region. While this would normally not be expected to be an issue, an UBSAN bug (it ignored -fwrapv, fixed in GCC 8+) would transform the @ptr evaluation for: drivers/gpu/drm/i915/i915_gem_execbuffer.c: if (unlikely(__put_user(offset, &urelocs[r-stack].presumed_offset))) { into a signed-overflow-UB check and trigger the objtool AC validation. Finish this commit: 2a418cf3f5f1 ("x86/uaccess: Don't leak the AC flag into __put_user() value evaluation") and explicitly evaluate all 3 arguments early. Reported-by: Randy Dunlap Signed-off-by: Peter Zijlstra (Intel) Acked-by: Randy Dunlap # build-tested Acked-by: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: luto@kernel.org Fixes: 2a418cf3f5f1 ("x86/uaccess: Don't leak the AC flag into __put_user() value evaluation") Link: http://lkml.kernel.org/r/20190424072208.695962771@infradead.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/uaccess.h | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 5ca7b91faf67..bb21913885a3 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -427,10 +427,11 @@ do { \ ({ \ __label__ __pu_label; \ int __pu_err = -EFAULT; \ - __typeof__(*(ptr)) __pu_val; \ - __pu_val = x; \ + __typeof__(*(ptr)) __pu_val = (x); \ + __typeof__(ptr) __pu_ptr = (ptr); \ + __typeof__(size) __pu_size = (size); \ __uaccess_begin(); \ - __put_user_size(__pu_val, (ptr), (size), __pu_label); \ + __put_user_size(__pu_val, __pu_ptr, __pu_size, __pu_label); \ __pu_err = 0; \ __pu_label: \ __uaccess_end(); \ From 29da93fea3ea39ab9b12270cc6be1b70ef201c9e Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 24 Apr 2019 09:19:25 +0200 Subject: [PATCH 27/27] mm/uaccess: Use 'unsigned long' to placate UBSAN warnings on older GCC versions Randy reported objtool triggered on his (GCC-7.4) build: lib/strncpy_from_user.o: warning: objtool: strncpy_from_user()+0x315: call to __ubsan_handle_add_overflow() with UACCESS enabled lib/strnlen_user.o: warning: objtool: strnlen_user()+0x337: call to __ubsan_handle_sub_overflow() with UACCESS enabled This is due to UBSAN generating signed-overflow-UB warnings where it should not. Prior to GCC-8 UBSAN ignored -fwrapv (which the kernel uses through -fno-strict-overflow). Make the functions use 'unsigned long' throughout. Reported-by: Randy Dunlap Signed-off-by: Peter Zijlstra (Intel) Acked-by: Randy Dunlap # build-tested Acked-by: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: luto@kernel.org Link: http://lkml.kernel.org/r/20190424072208.754094071@infradead.org Signed-off-by: Ingo Molnar --- lib/strncpy_from_user.c | 5 +++-- lib/strnlen_user.c | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c index 58eacd41526c..023ba9f3b99f 100644 --- a/lib/strncpy_from_user.c +++ b/lib/strncpy_from_user.c @@ -23,10 +23,11 @@ * hit it), 'max' is the address space maximum (and we return * -EFAULT if we hit it). */ -static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max) +static inline long do_strncpy_from_user(char *dst, const char __user *src, + unsigned long count, unsigned long max) { const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; - long res = 0; + unsigned long res = 0; /* * Truncate 'max' to the user-specified limit, so that diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c index 1c1a1b0e38a5..7f2db3fe311f 100644 --- a/lib/strnlen_user.c +++ b/lib/strnlen_user.c @@ -28,7 +28,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max) { const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS; - long align, res = 0; + unsigned long align, res = 0; unsigned long c; /* @@ -42,7 +42,7 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count, * Do everything aligned. But that means that we * need to also expand the maximum.. */ - align = (sizeof(long) - 1) & (unsigned long)src; + align = (sizeof(unsigned long) - 1) & (unsigned long)src; src -= align; max += align;