mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-23 12:14:10 +08:00
- A bunch of improvements, cleanups and fixlets to the SRSO mitigation
machinery and other, general cleanups to the hw mitigations code, by Josh Poimboeuf - Improve the return thunk detection by objtool as it is absolutely important that the default return thunk is not used after returns have been patched. Future work to detect and report this better is pending - Other misc cleanups and fixes -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmU7mFEACgkQEsHwGGHe VUpbBxAAtS4X5LCntPWUsDEBU80SBYAunEp0Wd0ttYEj+UrEk4tvnWVGFiIEr47A PrRKK9JCJtC6ko0+dwPtMi66L/T7mCpoNPI1kzfRG1IHJBfvCTGJhzZsesogvkA2 1X9Je+RCVW4xVybIryxhjMGdB6jUoGEU1a4DmQXq481qiLB3ilvA1bIAaNo9BBYP rxKPrPcdOxn2NjxuOWg+FXjSc8LuAVSu3HqsgCW2AHJ6XIKEYWEq9FkXhwj9OJOr ax1F4qD1IY++jYZO9DJiltjeJyj0wC+yp8kDDURoLbcTk85WHlpD5vK0g64mELOA y0375thHep+vsrtQ/qZAmi/eVTaTekgbi7McahjoZebK7FbKOYRk6GZ+5+m29AVr DfQSJ7xQQqbCbpimeFmZ+gQf7mFexyDWvjUPyBl+OelOY1umdPM9IZVTnqib5LPr D2M+uqWfJhSwACi2o05LRv0gyhkAz0bGHrwZPmCVuxE5kBbhOpj4aT87fetUp/MW 8lEFa3PHx/gkh2VOJ7ZgKzpeD75Vjo8TRAXOe4O2jn/L54gNEJ+1mukvrjW3+lp1 ShmcZokl3ldPq6F5ioE+u45hVAfHkaruWM+5Rj3hsA/fdFN3isTVLhIRIsypPTKc p1ITT8Yhek8vkm9PcRBE5xWRmEZ2XE5ooDld930nJxra8QNVVQw= =E7c4 -----END PGP SIGNATURE----- Merge tag 'x86_bugs_for_6.7_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull x86 hw mitigation updates from Borislav Petkov: - A bunch of improvements, cleanups and fixlets to the SRSO mitigation machinery and other, general cleanups to the hw mitigations code, by Josh Poimboeuf - Improve the return thunk detection by objtool as it is absolutely important that the default return thunk is not used after returns have been patched. Future work to detect and report this better is pending - Other misc cleanups and fixes * tag 'x86_bugs_for_6.7_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (21 commits) x86/retpoline: Document some thunk handling aspects x86/retpoline: Make sure there are no unconverted return thunks due to KCSAN x86/callthunks: Delete unused "struct thunk_desc" x86/vdso: Run objtool on vdso32-setup.o objtool: Fix return thunk patching in retpolines x86/srso: Remove unnecessary semicolon x86/pti: Fix kernel warnings for pti= and nopti cmdline options x86/calldepth: Rename __x86_return_skl() to call_depth_return_thunk() x86/nospec: Refactor UNTRAIN_RET[_*] x86/rethunk: Use SYM_CODE_START[_LOCAL]_NOALIGN macros x86/srso: Disentangle rethunk-dependent options x86/srso: Move retbleed IBPB check into existing 'has_microcode' code block x86/bugs: Remove default case for fully switched enums x86/srso: Remove 'pred_cmd' label x86/srso: Unexport untraining functions x86/srso: Improve i-cache locality for alias mitigation x86/srso: Fix unret validation dependencies x86/srso: Fix vulnerability reporting for missing microcode x86/srso: Print mitigation for retbleed IBPB case x86/srso: Print actual mitigation if requested mitigation isn't possible ...
This commit is contained in:
commit
f84a52eef5
@ -46,12 +46,22 @@ The possible values in this file are:
|
||||
|
||||
The processor is not vulnerable
|
||||
|
||||
* 'Vulnerable: no microcode':
|
||||
* 'Vulnerable':
|
||||
|
||||
The processor is vulnerable and no mitigations have been applied.
|
||||
|
||||
* 'Vulnerable: No microcode':
|
||||
|
||||
The processor is vulnerable, no microcode extending IBPB
|
||||
functionality to address the vulnerability has been applied.
|
||||
|
||||
* 'Mitigation: microcode':
|
||||
* 'Vulnerable: Safe RET, no microcode':
|
||||
|
||||
The "Safe RET" mitigation (see below) has been applied to protect the
|
||||
kernel, but the IBPB-extending microcode has not been applied. User
|
||||
space tasks may still be vulnerable.
|
||||
|
||||
* 'Vulnerable: Microcode, no safe RET':
|
||||
|
||||
Extended IBPB functionality microcode patch has been applied. It does
|
||||
not address User->Kernel and Guest->Host transitions protection but it
|
||||
@ -72,11 +82,11 @@ The possible values in this file are:
|
||||
|
||||
(spec_rstack_overflow=microcode)
|
||||
|
||||
* 'Mitigation: safe RET':
|
||||
* 'Mitigation: Safe RET':
|
||||
|
||||
Software-only mitigation. It complements the extended IBPB microcode
|
||||
patch functionality by addressing User->Kernel and Guest->Host
|
||||
transitions protection.
|
||||
Combined microcode/software mitigation. It complements the
|
||||
extended IBPB microcode patch functionality by addressing
|
||||
User->Kernel and Guest->Host transitions protection.
|
||||
|
||||
Selected by default or by spec_rstack_overflow=safe-ret
|
||||
|
||||
@ -129,7 +139,7 @@ an indrect branch prediction barrier after having applied the required
|
||||
microcode patch for one's system. This mitigation comes also at
|
||||
a performance cost.
|
||||
|
||||
Mitigation: safe RET
|
||||
Mitigation: Safe RET
|
||||
--------------------
|
||||
|
||||
The mitigation works by ensuring all RET instructions speculate to
|
||||
|
@ -42,7 +42,8 @@ vdso_img-$(VDSO64-y) += 64
|
||||
vdso_img-$(VDSOX32-y) += x32
|
||||
vdso_img-$(VDSO32-y) += 32
|
||||
|
||||
obj-$(VDSO32-y) += vdso32-setup.o
|
||||
obj-$(VDSO32-y) += vdso32-setup.o
|
||||
OBJECT_FILES_NON_STANDARD_vdso32-setup.o := n
|
||||
|
||||
vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
|
||||
vobjs32 := $(foreach F,$(vobjs32-y),$(obj)/$F)
|
||||
|
@ -271,7 +271,7 @@
|
||||
.Lskip_rsb_\@:
|
||||
.endm
|
||||
|
||||
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
|
||||
#define CALL_UNTRAIN_RET "call entry_untrain_ret"
|
||||
#else
|
||||
#define CALL_UNTRAIN_RET ""
|
||||
@ -288,38 +288,24 @@
|
||||
* As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
|
||||
* where we have a stack but before any RET instruction.
|
||||
*/
|
||||
.macro UNTRAIN_RET
|
||||
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
|
||||
defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
|
||||
.macro __UNTRAIN_RET ibpb_feature, call_depth_insns
|
||||
#if defined(CONFIG_RETHUNK) || defined(CONFIG_CPU_IBPB_ENTRY)
|
||||
VALIDATE_UNRET_END
|
||||
ALTERNATIVE_3 "", \
|
||||
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
|
||||
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
|
||||
__stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
|
||||
"call entry_ibpb", \ibpb_feature, \
|
||||
__stringify(\call_depth_insns), X86_FEATURE_CALL_DEPTH
|
||||
#endif
|
||||
.endm
|
||||
|
||||
.macro UNTRAIN_RET_VM
|
||||
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
|
||||
defined(CONFIG_CALL_DEPTH_TRACKING) || defined(CONFIG_CPU_SRSO)
|
||||
VALIDATE_UNRET_END
|
||||
ALTERNATIVE_3 "", \
|
||||
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
|
||||
"call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT, \
|
||||
__stringify(RESET_CALL_DEPTH), X86_FEATURE_CALL_DEPTH
|
||||
#endif
|
||||
.endm
|
||||
#define UNTRAIN_RET \
|
||||
__UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH)
|
||||
|
||||
.macro UNTRAIN_RET_FROM_CALL
|
||||
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
|
||||
defined(CONFIG_CALL_DEPTH_TRACKING)
|
||||
VALIDATE_UNRET_END
|
||||
ALTERNATIVE_3 "", \
|
||||
CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \
|
||||
"call entry_ibpb", X86_FEATURE_ENTRY_IBPB, \
|
||||
__stringify(RESET_CALL_DEPTH_FROM_CALL), X86_FEATURE_CALL_DEPTH
|
||||
#endif
|
||||
.endm
|
||||
#define UNTRAIN_RET_VM \
|
||||
__UNTRAIN_RET X86_FEATURE_IBPB_ON_VMEXIT, __stringify(RESET_CALL_DEPTH)
|
||||
|
||||
#define UNTRAIN_RET_FROM_CALL \
|
||||
__UNTRAIN_RET X86_FEATURE_ENTRY_IBPB, __stringify(RESET_CALL_DEPTH_FROM_CALL)
|
||||
|
||||
|
||||
.macro CALL_DEPTH_ACCOUNT
|
||||
@ -348,26 +334,31 @@ extern void __x86_return_thunk(void);
|
||||
static inline void __x86_return_thunk(void) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||
extern void retbleed_return_thunk(void);
|
||||
#else
|
||||
static inline void retbleed_return_thunk(void) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
extern void srso_return_thunk(void);
|
||||
extern void srso_alias_return_thunk(void);
|
||||
#else
|
||||
static inline void srso_return_thunk(void) {}
|
||||
static inline void srso_alias_return_thunk(void) {}
|
||||
#endif
|
||||
|
||||
extern void retbleed_return_thunk(void);
|
||||
extern void srso_return_thunk(void);
|
||||
extern void srso_alias_return_thunk(void);
|
||||
|
||||
extern void retbleed_untrain_ret(void);
|
||||
extern void srso_untrain_ret(void);
|
||||
extern void srso_alias_untrain_ret(void);
|
||||
|
||||
extern void entry_untrain_ret(void);
|
||||
extern void entry_ibpb(void);
|
||||
|
||||
extern void (*x86_return_thunk)(void);
|
||||
|
||||
#ifdef CONFIG_CALL_DEPTH_TRACKING
|
||||
extern void __x86_return_skl(void);
|
||||
|
||||
static inline void x86_set_skl_return_thunk(void)
|
||||
{
|
||||
x86_return_thunk = &__x86_return_skl;
|
||||
}
|
||||
extern void call_depth_return_thunk(void);
|
||||
|
||||
#define CALL_DEPTH_ACCOUNT \
|
||||
ALTERNATIVE("", \
|
||||
@ -380,12 +371,12 @@ DECLARE_PER_CPU(u64, __x86_ret_count);
|
||||
DECLARE_PER_CPU(u64, __x86_stuffs_count);
|
||||
DECLARE_PER_CPU(u64, __x86_ctxsw_count);
|
||||
#endif
|
||||
#else
|
||||
static inline void x86_set_skl_return_thunk(void) {}
|
||||
#else /* !CONFIG_CALL_DEPTH_TRACKING */
|
||||
|
||||
static inline void call_depth_return_thunk(void) {}
|
||||
#define CALL_DEPTH_ACCOUNT ""
|
||||
|
||||
#endif
|
||||
#endif /* CONFIG_CALL_DEPTH_TRACKING */
|
||||
|
||||
#ifdef CONFIG_RETPOLINE
|
||||
|
||||
|
@ -48,11 +48,6 @@ EXPORT_SYMBOL_GPL(__x86_call_count);
|
||||
|
||||
extern s32 __call_sites[], __call_sites_end[];
|
||||
|
||||
struct thunk_desc {
|
||||
void *template;
|
||||
unsigned int template_size;
|
||||
};
|
||||
|
||||
struct core_text {
|
||||
unsigned long base;
|
||||
unsigned long end;
|
||||
|
@ -63,7 +63,7 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd);
|
||||
|
||||
static DEFINE_MUTEX(spec_ctrl_mutex);
|
||||
|
||||
void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk;
|
||||
void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
|
||||
|
||||
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
|
||||
static void update_spec_ctrl(u64 val)
|
||||
@ -717,7 +717,7 @@ void update_gds_msr(void)
|
||||
case GDS_MITIGATION_UCODE_NEEDED:
|
||||
case GDS_MITIGATION_HYPERVISOR:
|
||||
return;
|
||||
};
|
||||
}
|
||||
|
||||
wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
|
||||
|
||||
@ -1019,7 +1019,6 @@ static void __init retbleed_select_mitigation(void)
|
||||
|
||||
do_cmd_auto:
|
||||
case RETBLEED_CMD_AUTO:
|
||||
default:
|
||||
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
|
||||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
|
||||
if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY))
|
||||
@ -1042,8 +1041,7 @@ do_cmd_auto:
|
||||
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
|
||||
setup_force_cpu_cap(X86_FEATURE_UNRET);
|
||||
|
||||
if (IS_ENABLED(CONFIG_RETHUNK))
|
||||
x86_return_thunk = retbleed_return_thunk;
|
||||
x86_return_thunk = retbleed_return_thunk;
|
||||
|
||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
||||
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
||||
@ -1061,7 +1059,8 @@ do_cmd_auto:
|
||||
case RETBLEED_MITIGATION_STUFF:
|
||||
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
|
||||
setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
|
||||
x86_set_skl_return_thunk();
|
||||
|
||||
x86_return_thunk = call_depth_return_thunk;
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -1290,6 +1289,8 @@ spectre_v2_user_select_mitigation(void)
|
||||
|
||||
spectre_v2_user_ibpb = mode;
|
||||
switch (cmd) {
|
||||
case SPECTRE_V2_USER_CMD_NONE:
|
||||
break;
|
||||
case SPECTRE_V2_USER_CMD_FORCE:
|
||||
case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
|
||||
case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
|
||||
@ -1301,8 +1302,6 @@ spectre_v2_user_select_mitigation(void)
|
||||
case SPECTRE_V2_USER_CMD_SECCOMP:
|
||||
static_branch_enable(&switch_mm_cond_ibpb);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
|
||||
@ -2160,6 +2159,10 @@ static int l1d_flush_prctl_get(struct task_struct *task)
|
||||
static int ssb_prctl_get(struct task_struct *task)
|
||||
{
|
||||
switch (ssb_mode) {
|
||||
case SPEC_STORE_BYPASS_NONE:
|
||||
if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
|
||||
return PR_SPEC_ENABLE;
|
||||
return PR_SPEC_NOT_AFFECTED;
|
||||
case SPEC_STORE_BYPASS_DISABLE:
|
||||
return PR_SPEC_DISABLE;
|
||||
case SPEC_STORE_BYPASS_SECCOMP:
|
||||
@ -2171,11 +2174,8 @@ static int ssb_prctl_get(struct task_struct *task)
|
||||
if (task_spec_ssb_disable(task))
|
||||
return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
|
||||
return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
|
||||
default:
|
||||
if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
|
||||
return PR_SPEC_ENABLE;
|
||||
return PR_SPEC_NOT_AFFECTED;
|
||||
}
|
||||
BUG();
|
||||
}
|
||||
|
||||
static int ib_prctl_get(struct task_struct *task)
|
||||
@ -2353,6 +2353,8 @@ early_param("l1tf", l1tf_cmdline);
|
||||
|
||||
enum srso_mitigation {
|
||||
SRSO_MITIGATION_NONE,
|
||||
SRSO_MITIGATION_UCODE_NEEDED,
|
||||
SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED,
|
||||
SRSO_MITIGATION_MICROCODE,
|
||||
SRSO_MITIGATION_SAFE_RET,
|
||||
SRSO_MITIGATION_IBPB,
|
||||
@ -2368,11 +2370,13 @@ enum srso_mitigation_cmd {
|
||||
};
|
||||
|
||||
static const char * const srso_strings[] = {
|
||||
[SRSO_MITIGATION_NONE] = "Vulnerable",
|
||||
[SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode",
|
||||
[SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET",
|
||||
[SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
|
||||
[SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
|
||||
[SRSO_MITIGATION_NONE] = "Vulnerable",
|
||||
[SRSO_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
|
||||
[SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED] = "Vulnerable: Safe RET, no microcode",
|
||||
[SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET",
|
||||
[SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET",
|
||||
[SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
|
||||
[SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
|
||||
};
|
||||
|
||||
static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
|
||||
@ -2406,34 +2410,44 @@ static void __init srso_select_mitigation(void)
|
||||
{
|
||||
bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off())
|
||||
goto pred_cmd;
|
||||
if (cpu_mitigations_off())
|
||||
return;
|
||||
|
||||
if (!has_microcode) {
|
||||
pr_warn("IBPB-extending microcode not applied!\n");
|
||||
pr_warn(SRSO_NOTICE);
|
||||
} else {
|
||||
if (!boot_cpu_has_bug(X86_BUG_SRSO)) {
|
||||
if (boot_cpu_has(X86_FEATURE_SBPB))
|
||||
x86_pred_cmd = PRED_CMD_SBPB;
|
||||
return;
|
||||
}
|
||||
|
||||
if (has_microcode) {
|
||||
/*
|
||||
* Zen1/2 with SMT off aren't vulnerable after the right
|
||||
* IBPB microcode has been applied.
|
||||
*
|
||||
* Zen1/2 don't have SBPB, no need to try to enable it here.
|
||||
*/
|
||||
if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
|
||||
setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
|
||||
if (has_microcode) {
|
||||
pr_err("Retbleed IBPB mitigation enabled, using same for SRSO\n");
|
||||
if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
|
||||
srso_mitigation = SRSO_MITIGATION_IBPB;
|
||||
goto pred_cmd;
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
pr_warn("IBPB-extending microcode not applied!\n");
|
||||
pr_warn(SRSO_NOTICE);
|
||||
|
||||
/* may be overwritten by SRSO_CMD_SAFE_RET below */
|
||||
srso_mitigation = SRSO_MITIGATION_UCODE_NEEDED;
|
||||
}
|
||||
|
||||
switch (srso_cmd) {
|
||||
case SRSO_CMD_OFF:
|
||||
goto pred_cmd;
|
||||
if (boot_cpu_has(X86_FEATURE_SBPB))
|
||||
x86_pred_cmd = PRED_CMD_SBPB;
|
||||
return;
|
||||
|
||||
case SRSO_CMD_MICROCODE:
|
||||
if (has_microcode) {
|
||||
@ -2458,10 +2472,12 @@ static void __init srso_select_mitigation(void)
|
||||
setup_force_cpu_cap(X86_FEATURE_SRSO);
|
||||
x86_return_thunk = srso_return_thunk;
|
||||
}
|
||||
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
|
||||
if (has_microcode)
|
||||
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
|
||||
else
|
||||
srso_mitigation = SRSO_MITIGATION_SAFE_RET_UCODE_NEEDED;
|
||||
} else {
|
||||
pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
|
||||
goto pred_cmd;
|
||||
}
|
||||
break;
|
||||
|
||||
@ -2473,7 +2489,6 @@ static void __init srso_select_mitigation(void)
|
||||
}
|
||||
} else {
|
||||
pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
|
||||
goto pred_cmd;
|
||||
}
|
||||
break;
|
||||
|
||||
@ -2485,20 +2500,12 @@ static void __init srso_select_mitigation(void)
|
||||
}
|
||||
} else {
|
||||
pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
|
||||
goto pred_cmd;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode"));
|
||||
|
||||
pred_cmd:
|
||||
if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) &&
|
||||
boot_cpu_has(X86_FEATURE_SBPB))
|
||||
x86_pred_cmd = PRED_CMD_SBPB;
|
||||
out:
|
||||
pr_info("%s\n", srso_strings[srso_mitigation]);
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
@ -2704,9 +2711,7 @@ static ssize_t srso_show_state(char *buf)
|
||||
if (boot_cpu_has(X86_FEATURE_SRSO_NO))
|
||||
return sysfs_emit(buf, "Mitigation: SMT disabled\n");
|
||||
|
||||
return sysfs_emit(buf, "%s%s\n",
|
||||
srso_strings[srso_mitigation],
|
||||
boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) ? "" : ", no microcode");
|
||||
return sysfs_emit(buf, "%s\n", srso_strings[srso_mitigation]);
|
||||
}
|
||||
|
||||
static ssize_t gds_show_state(char *buf)
|
||||
|
@ -139,10 +139,7 @@ SECTIONS
|
||||
STATIC_CALL_TEXT
|
||||
|
||||
ALIGN_ENTRY_TEXT_BEGIN
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
*(.text..__x86.rethunk_untrain)
|
||||
#endif
|
||||
|
||||
ENTRY_TEXT
|
||||
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
@ -520,12 +517,12 @@ INIT_PER_CPU(irq_stack_backing_store);
|
||||
"fixed_percpu_data is not at start of per-cpu area");
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_RETHUNK
|
||||
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||
. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned");
|
||||
. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned");
|
||||
/*
|
||||
* GNU ld cannot do XOR until 2.41.
|
||||
* https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1
|
||||
|
@ -126,12 +126,20 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
|
||||
#include <asm/GEN-for-each-reg.h>
|
||||
#undef GEN
|
||||
#endif
|
||||
/*
|
||||
* This function name is magical and is used by -mfunction-return=thunk-extern
|
||||
* for the compiler to generate JMPs to it.
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_RETHUNK
|
||||
|
||||
/*
|
||||
* Be careful here: that label cannot really be removed because in
|
||||
* some configurations and toolchains, the JMP __x86_return_thunk the
|
||||
* compiler issues is either a short one or the compiler doesn't use
|
||||
* relocations for same-section JMPs and that breaks the returns
|
||||
* detection logic in apply_returns() and in objtool.
|
||||
*/
|
||||
.section .text..__x86.return_thunk
|
||||
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
|
||||
/*
|
||||
* srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at
|
||||
* special addresses:
|
||||
@ -147,29 +155,18 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array)
|
||||
*
|
||||
* As a result, srso_alias_safe_ret() becomes a safe return.
|
||||
*/
|
||||
#ifdef CONFIG_CPU_SRSO
|
||||
.section .text..__x86.rethunk_untrain
|
||||
|
||||
SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
.pushsection .text..__x86.rethunk_untrain
|
||||
SYM_CODE_START_NOALIGN(srso_alias_untrain_ret)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
ASM_NOP2
|
||||
lfence
|
||||
jmp srso_alias_return_thunk
|
||||
SYM_FUNC_END(srso_alias_untrain_ret)
|
||||
__EXPORT_THUNK(srso_alias_untrain_ret)
|
||||
.popsection
|
||||
|
||||
.section .text..__x86.rethunk_safe
|
||||
#else
|
||||
/* dummy definition for alternatives */
|
||||
SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_FUNC_END(srso_alias_untrain_ret)
|
||||
#endif
|
||||
|
||||
SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
.pushsection .text..__x86.rethunk_safe
|
||||
SYM_CODE_START_NOALIGN(srso_alias_safe_ret)
|
||||
lea 8(%_ASM_SP), %_ASM_SP
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_UNRET_SAFE
|
||||
@ -177,14 +174,63 @@ SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
int3
|
||||
SYM_FUNC_END(srso_alias_safe_ret)
|
||||
|
||||
.section .text..__x86.return_thunk
|
||||
|
||||
SYM_CODE_START(srso_alias_return_thunk)
|
||||
SYM_CODE_START_NOALIGN(srso_alias_return_thunk)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
call srso_alias_safe_ret
|
||||
ud2
|
||||
SYM_CODE_END(srso_alias_return_thunk)
|
||||
.popsection
|
||||
|
||||
/*
|
||||
* SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
|
||||
* above. On kernel entry, srso_untrain_ret() is executed which is a
|
||||
*
|
||||
* movabs $0xccccc30824648d48,%rax
|
||||
*
|
||||
* and when the return thunk executes the inner label srso_safe_ret()
|
||||
* later, it is a stack manipulation and a RET which is mispredicted and
|
||||
* thus a "safe" one to use.
|
||||
*/
|
||||
.align 64
|
||||
.skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
|
||||
SYM_CODE_START_LOCAL_NOALIGN(srso_untrain_ret)
|
||||
ANNOTATE_NOENDBR
|
||||
.byte 0x48, 0xb8
|
||||
|
||||
/*
|
||||
* This forces the function return instruction to speculate into a trap
|
||||
* (UD2 in srso_return_thunk() below). This RET will then mispredict
|
||||
* and execution will continue at the return site read from the top of
|
||||
* the stack.
|
||||
*/
|
||||
SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
|
||||
lea 8(%_ASM_SP), %_ASM_SP
|
||||
ret
|
||||
int3
|
||||
int3
|
||||
/* end of movabs */
|
||||
lfence
|
||||
call srso_safe_ret
|
||||
ud2
|
||||
SYM_CODE_END(srso_safe_ret)
|
||||
SYM_FUNC_END(srso_untrain_ret)
|
||||
|
||||
SYM_CODE_START(srso_return_thunk)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
call srso_safe_ret
|
||||
ud2
|
||||
SYM_CODE_END(srso_return_thunk)
|
||||
|
||||
#define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret"
|
||||
#define JMP_SRSO_ALIAS_UNTRAIN_RET "jmp srso_alias_untrain_ret"
|
||||
#else /* !CONFIG_CPU_SRSO */
|
||||
#define JMP_SRSO_UNTRAIN_RET "ud2"
|
||||
#define JMP_SRSO_ALIAS_UNTRAIN_RET "ud2"
|
||||
#endif /* CONFIG_CPU_SRSO */
|
||||
|
||||
#ifdef CONFIG_CPU_UNRET_ENTRY
|
||||
|
||||
/*
|
||||
* Some generic notes on the untraining sequences:
|
||||
@ -216,7 +262,7 @@ SYM_CODE_END(srso_alias_return_thunk)
|
||||
*/
|
||||
.align 64
|
||||
.skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc
|
||||
SYM_START(retbleed_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
SYM_CODE_START_LOCAL_NOALIGN(retbleed_untrain_ret)
|
||||
ANNOTATE_NOENDBR
|
||||
/*
|
||||
* As executed from retbleed_untrain_ret, this is:
|
||||
@ -264,72 +310,27 @@ SYM_CODE_END(retbleed_return_thunk)
|
||||
jmp retbleed_return_thunk
|
||||
int3
|
||||
SYM_FUNC_END(retbleed_untrain_ret)
|
||||
__EXPORT_THUNK(retbleed_untrain_ret)
|
||||
|
||||
/*
|
||||
* SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret()
|
||||
* above. On kernel entry, srso_untrain_ret() is executed which is a
|
||||
*
|
||||
* movabs $0xccccc30824648d48,%rax
|
||||
*
|
||||
* and when the return thunk executes the inner label srso_safe_ret()
|
||||
* later, it is a stack manipulation and a RET which is mispredicted and
|
||||
* thus a "safe" one to use.
|
||||
*/
|
||||
.align 64
|
||||
.skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc
|
||||
SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE)
|
||||
ANNOTATE_NOENDBR
|
||||
.byte 0x48, 0xb8
|
||||
#define JMP_RETBLEED_UNTRAIN_RET "jmp retbleed_untrain_ret"
|
||||
#else /* !CONFIG_CPU_UNRET_ENTRY */
|
||||
#define JMP_RETBLEED_UNTRAIN_RET "ud2"
|
||||
#endif /* CONFIG_CPU_UNRET_ENTRY */
|
||||
|
||||
/*
|
||||
* This forces the function return instruction to speculate into a trap
|
||||
* (UD2 in srso_return_thunk() below). This RET will then mispredict
|
||||
* and execution will continue at the return site read from the top of
|
||||
* the stack.
|
||||
*/
|
||||
SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL)
|
||||
lea 8(%_ASM_SP), %_ASM_SP
|
||||
ret
|
||||
int3
|
||||
int3
|
||||
/* end of movabs */
|
||||
lfence
|
||||
call srso_safe_ret
|
||||
ud2
|
||||
SYM_CODE_END(srso_safe_ret)
|
||||
SYM_FUNC_END(srso_untrain_ret)
|
||||
__EXPORT_THUNK(srso_untrain_ret)
|
||||
|
||||
SYM_CODE_START(srso_return_thunk)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
call srso_safe_ret
|
||||
ud2
|
||||
SYM_CODE_END(srso_return_thunk)
|
||||
#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)
|
||||
|
||||
SYM_FUNC_START(entry_untrain_ret)
|
||||
ALTERNATIVE_2 "jmp retbleed_untrain_ret", \
|
||||
"jmp srso_untrain_ret", X86_FEATURE_SRSO, \
|
||||
"jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
|
||||
ALTERNATIVE_2 JMP_RETBLEED_UNTRAIN_RET, \
|
||||
JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO, \
|
||||
JMP_SRSO_ALIAS_UNTRAIN_RET, X86_FEATURE_SRSO_ALIAS
|
||||
SYM_FUNC_END(entry_untrain_ret)
|
||||
__EXPORT_THUNK(entry_untrain_ret)
|
||||
|
||||
SYM_CODE_START(__x86_return_thunk)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_CODE_END(__x86_return_thunk)
|
||||
EXPORT_SYMBOL(__x86_return_thunk)
|
||||
|
||||
#endif /* CONFIG_RETHUNK */
|
||||
#endif /* CONFIG_CPU_UNRET_ENTRY || CONFIG_CPU_SRSO */
|
||||
|
||||
#ifdef CONFIG_CALL_DEPTH_TRACKING
|
||||
|
||||
.align 64
|
||||
SYM_FUNC_START(__x86_return_skl)
|
||||
SYM_FUNC_START(call_depth_return_thunk)
|
||||
ANNOTATE_NOENDBR
|
||||
/*
|
||||
* Keep the hotpath in a 16byte I-fetch for the non-debug
|
||||
@ -356,6 +357,33 @@ SYM_FUNC_START(__x86_return_skl)
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_FUNC_END(__x86_return_skl)
|
||||
SYM_FUNC_END(call_depth_return_thunk)
|
||||
|
||||
#endif /* CONFIG_CALL_DEPTH_TRACKING */
|
||||
|
||||
/*
|
||||
* This function name is magical and is used by -mfunction-return=thunk-extern
|
||||
* for the compiler to generate JMPs to it.
|
||||
*
|
||||
* This code is only used during kernel boot or module init. All
|
||||
* 'JMP __x86_return_thunk' sites are changed to something else by
|
||||
* apply_returns().
|
||||
*
|
||||
* This should be converted eventually to call a warning function which
|
||||
* should scream loudly when the default return thunk is called after
|
||||
* alternatives have been applied.
|
||||
*
|
||||
* That warning function cannot BUG() because the bug splat cannot be
|
||||
* displayed in all possible configurations, leading to users not really
|
||||
* knowing why the machine froze.
|
||||
*/
|
||||
SYM_CODE_START(__x86_return_thunk)
|
||||
UNWIND_HINT_FUNC
|
||||
ANNOTATE_NOENDBR
|
||||
ANNOTATE_UNRET_SAFE
|
||||
ret
|
||||
int3
|
||||
SYM_CODE_END(__x86_return_thunk)
|
||||
EXPORT_SYMBOL(__x86_return_thunk)
|
||||
|
||||
#endif /* CONFIG_RETHUNK */
|
||||
|
@ -69,6 +69,7 @@ static void __init pti_print_if_secure(const char *reason)
|
||||
pr_info("%s\n", reason);
|
||||
}
|
||||
|
||||
/* Assume mode is auto unless overridden via cmdline below. */
|
||||
static enum pti_mode {
|
||||
PTI_AUTO = 0,
|
||||
PTI_FORCE_OFF,
|
||||
@ -77,50 +78,49 @@ static enum pti_mode {
|
||||
|
||||
void __init pti_check_boottime_disable(void)
|
||||
{
|
||||
char arg[5];
|
||||
int ret;
|
||||
|
||||
/* Assume mode is auto unless overridden. */
|
||||
pti_mode = PTI_AUTO;
|
||||
|
||||
if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
|
||||
pti_mode = PTI_FORCE_OFF;
|
||||
pti_print_if_insecure("disabled on XEN PV.");
|
||||
return;
|
||||
}
|
||||
|
||||
ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
|
||||
if (ret > 0) {
|
||||
if (ret == 3 && !strncmp(arg, "off", 3)) {
|
||||
pti_mode = PTI_FORCE_OFF;
|
||||
pti_print_if_insecure("disabled on command line.");
|
||||
return;
|
||||
}
|
||||
if (ret == 2 && !strncmp(arg, "on", 2)) {
|
||||
pti_mode = PTI_FORCE_ON;
|
||||
pti_print_if_secure("force enabled on command line.");
|
||||
goto enable;
|
||||
}
|
||||
if (ret == 4 && !strncmp(arg, "auto", 4)) {
|
||||
pti_mode = PTI_AUTO;
|
||||
goto autosel;
|
||||
}
|
||||
}
|
||||
|
||||
if (cmdline_find_option_bool(boot_command_line, "nopti") ||
|
||||
cpu_mitigations_off()) {
|
||||
if (cpu_mitigations_off())
|
||||
pti_mode = PTI_FORCE_OFF;
|
||||
if (pti_mode == PTI_FORCE_OFF) {
|
||||
pti_print_if_insecure("disabled on command line.");
|
||||
return;
|
||||
}
|
||||
|
||||
autosel:
|
||||
if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
|
||||
if (pti_mode == PTI_FORCE_ON)
|
||||
pti_print_if_secure("force enabled on command line.");
|
||||
|
||||
if (pti_mode == PTI_AUTO && !boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
|
||||
return;
|
||||
enable:
|
||||
|
||||
setup_force_cpu_cap(X86_FEATURE_PTI);
|
||||
}
|
||||
|
||||
static int __init pti_parse_cmdline(char *arg)
|
||||
{
|
||||
if (!strcmp(arg, "off"))
|
||||
pti_mode = PTI_FORCE_OFF;
|
||||
else if (!strcmp(arg, "on"))
|
||||
pti_mode = PTI_FORCE_ON;
|
||||
else if (!strcmp(arg, "auto"))
|
||||
pti_mode = PTI_AUTO;
|
||||
else
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
early_param("pti", pti_parse_cmdline);
|
||||
|
||||
static int __init pti_parse_cmdline_nopti(char *arg)
|
||||
{
|
||||
pti_mode = PTI_FORCE_OFF;
|
||||
return 0;
|
||||
}
|
||||
early_param("nopti", pti_parse_cmdline_nopti);
|
||||
|
||||
pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
|
||||
{
|
||||
/*
|
||||
|
@ -130,7 +130,8 @@
|
||||
* it will be ignored.
|
||||
*/
|
||||
.macro VALIDATE_UNRET_BEGIN
|
||||
#if defined(CONFIG_NOINSTR_VALIDATION) && defined(CONFIG_CPU_UNRET_ENTRY)
|
||||
#if defined(CONFIG_NOINSTR_VALIDATION) && \
|
||||
(defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO))
|
||||
.Lhere_\@:
|
||||
.pushsection .discard.validate_unret
|
||||
.long .Lhere_\@ - .
|
||||
|
@ -60,4 +60,5 @@ include/generated/utsversion.h: FORCE
|
||||
$(obj)/version-timestamp.o: include/generated/utsversion.h
|
||||
CFLAGS_version-timestamp.o := -include include/generated/utsversion.h
|
||||
KASAN_SANITIZE_version-timestamp.o := n
|
||||
KCSAN_SANITIZE_version-timestamp.o := n
|
||||
GCOV_PROFILE_version-timestamp.o := n
|
||||
|
@ -19,6 +19,7 @@ quiet_cmd_cc_o_c = CC $@
|
||||
|
||||
ifdef CONFIG_MODULES
|
||||
KASAN_SANITIZE_.vmlinux.export.o := n
|
||||
KCSAN_SANITIZE_.vmlinux.export.o := n
|
||||
GCOV_PROFILE_.vmlinux.export.o := n
|
||||
targets += .vmlinux.export.o
|
||||
vmlinux: .vmlinux.export.o
|
||||
|
@ -37,7 +37,8 @@ objtool-enabled := $(or $(delay-objtool),$(CONFIG_NOINSTR_VALIDATION))
|
||||
|
||||
vmlinux-objtool-args-$(delay-objtool) += $(objtool-args-y)
|
||||
vmlinux-objtool-args-$(CONFIG_GCOV_KERNEL) += --no-unreachable
|
||||
vmlinux-objtool-args-$(CONFIG_NOINSTR_VALIDATION) += --noinstr $(if $(CONFIG_CPU_UNRET_ENTRY), --unret)
|
||||
vmlinux-objtool-args-$(CONFIG_NOINSTR_VALIDATION) += --noinstr \
|
||||
$(if $(or $(CONFIG_CPU_UNRET_ENTRY),$(CONFIG_CPU_SRSO)), --unret)
|
||||
|
||||
objtool-args = $(vmlinux-objtool-args-y) --link
|
||||
|
||||
|
@ -1610,6 +1610,22 @@ static int add_jump_destinations(struct objtool_file *file)
|
||||
return -1;
|
||||
}
|
||||
|
||||
/*
|
||||
* An intra-TU jump in retpoline.o might not have a relocation
|
||||
* for its jump dest, in which case the above
|
||||
* add_{retpoline,return}_call() didn't happen.
|
||||
*/
|
||||
if (jump_dest->sym && jump_dest->offset == jump_dest->sym->offset) {
|
||||
if (jump_dest->sym->retpoline_thunk) {
|
||||
add_retpoline_call(file, insn);
|
||||
continue;
|
||||
}
|
||||
if (jump_dest->sym->return_thunk) {
|
||||
add_return_call(file, insn, true);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Cross-function jump.
|
||||
*/
|
||||
|
Loading…
Reference in New Issue
Block a user