mirror of
https://github.com/edk2-porting/linux-next.git
synced 2024-12-23 04:34:11 +08:00
1ff865e343
As reported by objtool: lib/ubsan.o: warning: objtool: .altinstr_replacement+0x0: alternative modifies stack lib/ubsan.o: warning: objtool: .altinstr_replacement+0x7: alternative modifies stack the smap_{save,restore}() alternatives violate (the newly enforced) rule on stack invariance. That is, due to there only being a single ORC table it must be valid to any alternative. These alternatives violate this with the direct result that unwinds will not be correct when it hits between the PUSH and POP instructions. Rewrite the functions to only have a conditional jump. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Miroslav Benes <mbenes@suse.cz> Acked-by: Josh Poimboeuf <jpoimboe@redhat.com> Link: https://lkml.kernel.org/r/20200429101802.GI13592@hirez.programming.kicks-ass.net
100 lines
2.1 KiB
C
100 lines
2.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Supervisor Mode Access Prevention support
|
|
*
|
|
* Copyright (C) 2012 Intel Corporation
|
|
* Author: H. Peter Anvin <hpa@linux.intel.com>
|
|
*/
|
|
|
|
#ifndef _ASM_X86_SMAP_H
|
|
#define _ASM_X86_SMAP_H
|
|
|
|
#include <asm/nops.h>
|
|
#include <asm/cpufeatures.h>
|
|
|
|
/* "Raw" instruction opcodes */
|
|
#define __ASM_CLAC ".byte 0x0f,0x01,0xca"
|
|
#define __ASM_STAC ".byte 0x0f,0x01,0xcb"
|
|
|
|
#ifdef __ASSEMBLY__
|
|
|
|
#include <asm/alternative-asm.h>
|
|
|
|
#ifdef CONFIG_X86_SMAP
|
|
|
|
#define ASM_CLAC \
|
|
ALTERNATIVE "", __ASM_CLAC, X86_FEATURE_SMAP
|
|
|
|
#define ASM_STAC \
|
|
ALTERNATIVE "", __ASM_STAC, X86_FEATURE_SMAP
|
|
|
|
#else /* CONFIG_X86_SMAP */
|
|
|
|
#define ASM_CLAC
|
|
#define ASM_STAC
|
|
|
|
#endif /* CONFIG_X86_SMAP */
|
|
|
|
#else /* __ASSEMBLY__ */
|
|
|
|
#include <asm/alternative.h>
|
|
|
|
#ifdef CONFIG_X86_SMAP
|
|
|
|
static __always_inline void clac(void)
|
|
{
|
|
/* Note: a barrier is implicit in alternative() */
|
|
alternative("", __ASM_CLAC, X86_FEATURE_SMAP);
|
|
}
|
|
|
|
static __always_inline void stac(void)
|
|
{
|
|
/* Note: a barrier is implicit in alternative() */
|
|
alternative("", __ASM_STAC, X86_FEATURE_SMAP);
|
|
}
|
|
|
|
static __always_inline unsigned long smap_save(void)
|
|
{
|
|
unsigned long flags;
|
|
|
|
asm volatile ("# smap_save\n\t"
|
|
ALTERNATIVE("jmp 1f", "", X86_FEATURE_SMAP)
|
|
"pushf; pop %0; " __ASM_CLAC "\n\t"
|
|
"1:"
|
|
: "=rm" (flags) : : "memory", "cc");
|
|
|
|
return flags;
|
|
}
|
|
|
|
static __always_inline void smap_restore(unsigned long flags)
|
|
{
|
|
asm volatile ("# smap_restore\n\t"
|
|
ALTERNATIVE("jmp 1f", "", X86_FEATURE_SMAP)
|
|
"push %0; popf\n\t"
|
|
"1:"
|
|
: : "g" (flags) : "memory", "cc");
|
|
}
|
|
|
|
/* These macros can be used in asm() statements */
|
|
#define ASM_CLAC \
|
|
ALTERNATIVE("", __ASM_CLAC, X86_FEATURE_SMAP)
|
|
#define ASM_STAC \
|
|
ALTERNATIVE("", __ASM_STAC, X86_FEATURE_SMAP)
|
|
|
|
#else /* CONFIG_X86_SMAP */
|
|
|
|
static inline void clac(void) { }
|
|
static inline void stac(void) { }
|
|
|
|
static inline unsigned long smap_save(void) { return 0; }
|
|
static inline void smap_restore(unsigned long flags) { }
|
|
|
|
#define ASM_CLAC
|
|
#define ASM_STAC
|
|
|
|
#endif /* CONFIG_X86_SMAP */
|
|
|
|
#endif /* __ASSEMBLY__ */
|
|
|
|
#endif /* _ASM_X86_SMAP_H */
|