- Make sure a kdump kernel with CONFIG_IMA_KEXEC enabled and booted on an AMD

SME enabled hardware properly decrypts the ima_kexec buffer information
   passed to it from the previous kernel
 
 - Fix building the kernel with Clang where a non-TLS definition of the stack
   protector guard cookie leads to bogus code generation
 
 - Clear a wrongly advertised virtualized VMLOAD/VMSAVE feature flag on some
   Zen4 client systems as those insns are not supported on client
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmc54zkACgkQEsHwGGHe
 VUplzw//d1ysRrT+YNFwQrvGUc6mjP3dtOo2NCBxKhgvC2OKzDlj8ZIx20Yn97At
 pboOPbewV+cCSd7PXo9ymNsJMjFD0NAyKasRaHhUEGU6/AvQ2EwwN7d6q444HFso
 eIFLak17zha1VhSyu0ozZJSWjUbebwBcI2I3Re5VzJXGzBybw2nLM/PPQn94tsAQ
 w6ag0Ol+VbIPd/EdMOcRI1VXeFl44V5kiAYNvCunBvpTae9i/pC/k7iHW6tWDJUu
 Ud0hpIsq/Rn9Bj2oG/ovz1CvlcEryhL23HpQFyj/C1Gwl3fjpSqWUDm8U366VIVV
 I0EFSpYpH7CD9ArL37R6ziaCH1yojMcrXZSu1kjtc9rPFoqmcxOBkW0dVidWwKv4
 3hFVrWMkFm/qMW+SPss7ZX48z87bnAwplQpgF/sFBeHJSXr909ne3nOdPAS8Hp4t
 5VMGQkXWQCWGgRNxAqpU8pZaPKsvuFvJIJg6B6JcDe/qSv3i78geg64lT2WNhs2m
 AGH/E+ERF3X5jaPn/2oRXc6o9Kfmoy0+QLE4UvLVGunhE8ZNK0Zg3097Dn7N1flR
 ayhAt6otgeksiRpskfCtnxlfZoWIwnS0ueVkb0qJDS+1PWlMkk48cvxwpikEn6rD
 ebCllm8dBysnxgyf+Mf2rKsDbsnrICT7olPr1MTuEUHwf6Uqs5Q=
 =5i7R
 -----END PGP SIGNATURE-----

Merge tag 'x86_urgent_for_v6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

 - Make sure a kdump kernel with CONFIG_IMA_KEXEC enabled and booted on
   an AMD SME enabled hardware properly decrypts the ima_kexec buffer
   information passed to it from the previous kernel

 - Fix building the kernel with Clang where a non-TLS definition of the
   stack protector guard cookie leads to bogus code generation

 - Clear a wrongly advertised virtualized VMLOAD/VMSAVE feature flag on
   some Zen4 client systems as those insns are not supported on client

* tag 'x86_urgent_for_v6.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mm: Fix a kdump kernel failure on SME system when CONFIG_IMA_KEXEC=y
  x86/stackprotector: Work around strict Clang TLS symbol requirements
  x86/CPU/AMD: Clear virtualized VMLOAD/VMSAVE on Zen4 client
This commit is contained in:
Linus Torvalds 2024-11-17 09:35:51 -08:00
commit f66d6acccb
7 changed files with 42 additions and 4 deletions

View File

@ -142,9 +142,10 @@ ifeq ($(CONFIG_X86_32),y)
ifeq ($(CONFIG_STACKPROTECTOR),y)
ifeq ($(CONFIG_SMP),y)
KBUILD_CFLAGS += -mstack-protector-guard-reg=fs -mstack-protector-guard-symbol=__stack_chk_guard
KBUILD_CFLAGS += -mstack-protector-guard-reg=fs \
-mstack-protector-guard-symbol=__ref_stack_chk_guard
else
KBUILD_CFLAGS += -mstack-protector-guard=global
KBUILD_CFLAGS += -mstack-protector-guard=global
endif
endif
else

View File

@ -51,3 +51,19 @@ EXPORT_SYMBOL_GPL(mds_verw_sel);
.popsection
THUNK warn_thunk_thunk, __warn_thunk
#ifndef CONFIG_X86_64
/*
* Clang's implementation of TLS stack cookies requires the variable in
* question to be a TLS variable. If the variable happens to be defined as an
* ordinary variable with external linkage in the same compilation unit (which
* amounts to the whole of vmlinux with LTO enabled), Clang will drop the
* segment register prefix from the references, resulting in broken code. Work
* around this by avoiding the symbol used in -mstack-protector-guard-symbol=
* entirely in the C code, and use an alias emitted by the linker script
* instead.
*/
#ifdef CONFIG_STACKPROTECTOR
EXPORT_SYMBOL(__ref_stack_chk_guard);
#endif
#endif

View File

@ -20,3 +20,6 @@
extern void cmpxchg8b_emu(void);
#endif
#if defined(__GENKSYMS__) && defined(CONFIG_STACKPROTECTOR)
extern unsigned long __ref_stack_chk_guard;
#endif

View File

@ -924,6 +924,17 @@ static void init_amd_zen4(struct cpuinfo_x86 *c)
{
if (!cpu_has(c, X86_FEATURE_HYPERVISOR))
msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
/*
* These Zen4 SoCs advertise support for virtualized VMLOAD/VMSAVE
* in some BIOS versions but they can lead to random host reboots.
*/
switch (c->x86_model) {
case 0x18 ... 0x1f:
case 0x60 ... 0x7f:
clear_cpu_cap(c, X86_FEATURE_V_VMSAVE_VMLOAD);
break;
}
}
static void init_amd_zen5(struct cpuinfo_x86 *c)

View File

@ -2089,8 +2089,10 @@ void syscall_init(void)
#ifdef CONFIG_STACKPROTECTOR
DEFINE_PER_CPU(unsigned long, __stack_chk_guard);
#ifndef CONFIG_SMP
EXPORT_PER_CPU_SYMBOL(__stack_chk_guard);
#endif
#endif
#endif /* CONFIG_X86_64 */

View File

@ -491,6 +491,9 @@ SECTIONS
. = ASSERT((_end - LOAD_OFFSET <= KERNEL_IMAGE_SIZE),
"kernel image bigger than KERNEL_IMAGE_SIZE");
/* needed for Clang - see arch/x86/entry/entry.S */
PROVIDE(__ref_stack_chk_guard = __stack_chk_guard);
#ifdef CONFIG_X86_64
/*
* Per-cpu symbols which need to be offset from __per_cpu_load

View File

@ -656,7 +656,8 @@ static bool memremap_is_setup_data(resource_size_t phys_addr,
paddr_next = data->next;
len = data->len;
if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
if ((phys_addr > paddr) &&
(phys_addr < (paddr + sizeof(struct setup_data) + len))) {
memunmap(data);
return true;
}
@ -718,7 +719,8 @@ static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
paddr_next = data->next;
len = data->len;
if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
if ((phys_addr > paddr) &&
(phys_addr < (paddr + sizeof(struct setup_data) + len))) {
early_memunmap(data, sizeof(*data));
return true;
}