A set of x86 fixes:

- Prevent a infinite loop in the MCE recovery on return to user space,
     which was caused by a second MCE queueing work for the same page and
     thereby creating a circular work list.
 
   - Make kern_addr_valid() handle existing PMD entries, which are marked not
     present in the higher level page table, correctly instead of blindly
     dereferencing them.
 
   - Pass a valid address to sanitize_phys(). This was caused by the mixture
     of inclusive and exclusive ranges. memtype_reserve() expect 'end' being
     exclusive, but sanitize_phys() wants it inclusive. This worked so far,
     but with end being the end of the physical address space the fail is
     exposed.
 
  - Increase the maximum supported GPIO numbers for 64bit. Newer SoCs exceed
    the previous maximum.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAmFHhPIACgkQEsHwGGHe
 VUqqQA/+MHQ2HxVOPxnJ0i/D1nK8ccNqTEkSN08z23RGnjqKQun/VaNIIceJY25f
 Abeb2tI+0qRrdWVPVd5YqcTHuBLmnPs6Je3MfOrG47eQNW4/SmkXYuOexK80Bew3
 YDgEV73d40rHcolXZCaonVajx+FmjoNvkDt5LpLvLcCxIyv0GClFBcZrFAm70AxI
 Feax30koh3/MIFxHoXyADN8D+MJu1GxA6QWuoTK40s3G/gTTAwimkDgnNU1JXbcj
 VvVVZaNnnAxjxrCa81blr9nDpHJCDinG9bdvDT3UDLous52hGMZTsHoHogxwfogT
 EhIgPvL8hf+wm1WXA4NyvSNKZxsGfdkvIXaUq9XYHpLRD6Ao6x7jQDL039imucqb
 9YtaH52GhG0SgJlYjkm/zrKezIjKLDen0ZYr/2iNTDM1p2GqQEFo07wC/ME8TkQ6
 /BvtbkIvOuUz3nJeV4/AO+O4kaNvto9O2eHq9oodIN9nrwmlO5fMg8XO9nrhWB11
 ChXEz6kPqta1nyZXy0mwOrlXlqzcusiroG4G9F7IBBz+t/gNwlu3uZuIgkQCXyYw
 DgKz9cnQ3RdgCFknbmEwV5oCjewm7UdcgwaDAaelHIDuWMcshZFvMf1uSjnyg4Z/
 39WI8W7W2aZnIoKWpvu8s7Gr8f1krE7C3xrkvl2WmbKPkxNAin8=
 =7cq3
 -----END PGP SIGNATURE-----

Merge tag 'x86_urgent_for_v5.15_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Borislav Petkov:

 - Prevent a infinite loop in the MCE recovery on return to user space,
   which was caused by a second MCE queueing work for the same page and
   thereby creating a circular work list.

 - Make kern_addr_valid() handle existing PMD entries, which are marked
   not present in the higher level page table, correctly instead of
   blindly dereferencing them.

 - Pass a valid address to sanitize_phys(). This was caused by the
   mixture of inclusive and exclusive ranges. memtype_reserve() expect
   'end' being exclusive, but sanitize_phys() wants it inclusive. This
   worked so far, but with end being the end of the physical address
   space the fail is exposed.

 - Increase the maximum supported GPIO numbers for 64bit. Newer SoCs
   exceed the previous maximum.

* tag 'x86_urgent_for_v5.15_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/mce: Avoid infinite loop for copy from user recovery
  x86/mm: Fix kern_addr_valid() to cope with existing but not present entries
  x86/platform: Increase maximum GPIO number for X86_64
  x86/pat: Pass valid address to sanitize_phys()
This commit is contained in:
Linus Torvalds 2021-09-19 13:29:36 -07:00
commit 20621d2f27
5 changed files with 47 additions and 15 deletions

View File

@ -339,6 +339,11 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK
config ARCH_HIBERNATION_POSSIBLE
def_bool y
config ARCH_NR_GPIO
int
default 1024 if X86_64
default 512
config ARCH_SUSPEND_POSSIBLE
def_bool y

View File

@ -1253,6 +1253,9 @@ static void __mc_scan_banks(struct mce *m, struct pt_regs *regs, struct mce *fin
static void kill_me_now(struct callback_head *ch)
{
struct task_struct *p = container_of(ch, struct task_struct, mce_kill_me);
p->mce_count = 0;
force_sig(SIGBUS);
}
@ -1262,6 +1265,7 @@ static void kill_me_maybe(struct callback_head *cb)
int flags = MF_ACTION_REQUIRED;
int ret;
p->mce_count = 0;
pr_err("Uncorrected hardware memory error in user-access at %llx", p->mce_addr);
if (!p->mce_ripv)
@ -1290,17 +1294,34 @@ static void kill_me_maybe(struct callback_head *cb)
}
}
static void queue_task_work(struct mce *m, int kill_current_task)
static void queue_task_work(struct mce *m, char *msg, int kill_current_task)
{
current->mce_addr = m->addr;
current->mce_kflags = m->kflags;
current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
current->mce_whole_page = whole_page(m);
int count = ++current->mce_count;
if (kill_current_task)
current->mce_kill_me.func = kill_me_now;
else
current->mce_kill_me.func = kill_me_maybe;
/* First call, save all the details */
if (count == 1) {
current->mce_addr = m->addr;
current->mce_kflags = m->kflags;
current->mce_ripv = !!(m->mcgstatus & MCG_STATUS_RIPV);
current->mce_whole_page = whole_page(m);
if (kill_current_task)
current->mce_kill_me.func = kill_me_now;
else
current->mce_kill_me.func = kill_me_maybe;
}
/* Ten is likely overkill. Don't expect more than two faults before task_work() */
if (count > 10)
mce_panic("Too many consecutive machine checks while accessing user data", m, msg);
/* Second or later call, make sure page address matches the one from first call */
if (count > 1 && (current->mce_addr >> PAGE_SHIFT) != (m->addr >> PAGE_SHIFT))
mce_panic("Consecutive machine checks to different user pages", m, msg);
/* Do not call task_work_add() more than once */
if (count > 1)
return;
task_work_add(current, &current->mce_kill_me, TWA_RESUME);
}
@ -1438,7 +1459,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
/* If this triggers there is no way to recover. Die hard. */
BUG_ON(!on_thread_stack() || !user_mode(regs));
queue_task_work(&m, kill_current_task);
queue_task_work(&m, msg, kill_current_task);
} else {
/*
@ -1456,7 +1477,7 @@ noinstr void do_machine_check(struct pt_regs *regs)
}
if (m.kflags & MCE_IN_KERNEL_COPYIN)
queue_task_work(&m, kill_current_task);
queue_task_work(&m, msg, kill_current_task);
}
out:
mce_wrmsrl(MSR_IA32_MCG_STATUS, 0);

View File

@ -1432,18 +1432,18 @@ int kern_addr_valid(unsigned long addr)
return 0;
p4d = p4d_offset(pgd, addr);
if (p4d_none(*p4d))
if (!p4d_present(*p4d))
return 0;
pud = pud_offset(p4d, addr);
if (pud_none(*pud))
if (!pud_present(*pud))
return 0;
if (pud_large(*pud))
return pfn_valid(pud_pfn(*pud));
pmd = pmd_offset(pud, addr);
if (pmd_none(*pmd))
if (!pmd_present(*pmd))
return 0;
if (pmd_large(*pmd))

View File

@ -583,7 +583,12 @@ int memtype_reserve(u64 start, u64 end, enum page_cache_mode req_type,
int err = 0;
start = sanitize_phys(start);
end = sanitize_phys(end);
/*
* The end address passed into this function is exclusive, but
* sanitize_phys() expects an inclusive address.
*/
end = sanitize_phys(end - 1) + 1;
if (start >= end) {
WARN(1, "%s failed: [mem %#010Lx-%#010Lx], req %s\n", __func__,
start, end - 1, cattr_name(req_type));

View File

@ -1471,6 +1471,7 @@ struct task_struct {
mce_whole_page : 1,
__mce_reserved : 62;
struct callback_head mce_kill_me;
int mce_count;
#endif
#ifdef CONFIG_KRETPROBES