mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-23 19:14:30 +08:00
ca1b66922a
encounter an MCE in kernel space but while copying from user memory by sending them a SIGBUS on return to user space and umapping the faulty memory, by Tony Luck and Youquan Song. * memcpy_mcsafe() rework by splitting the functionality into copy_mc_to_user() and copy_mc_to_kernel(). This, as a result, enables support for new hardware which can recover from a machine check encountered during a fast string copy and makes that the default and lets the older hardware which does not support that advance recovery, opt in to use the old, fragile, slow variant, by Dan Williams. * New AMD hw enablement, by Yazen Ghannam and Akshay Gupta. * Do not use MSR-tracing accessors in #MC context and flag any fault while accessing MCA architectural MSRs as an architectural violation with the hope that such hw/fw misdesigns are caught early during the hw eval phase and they don't make it into production. * Misc fixes, improvements and cleanups, as always. -----BEGIN PGP SIGNATURE----- iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAl+EIpUACgkQEsHwGGHe VUouoBAAgwb+NkWZtIqGImV4f+LOyFjhTR/r/7ZyiijXdbhOIuAdc/jQM31mQxug sX2jxaRYnf1n6SLA0ggX99gwr2deRQ/hsNf5Abw55GC+Z1dOxpGL0k59A3ELl1IR H9KYmCAFQIHvzfk38qcdND73XHcgthQoXFBOG9wAPAdgDWnaiWt6lcLAq8OiJTmp D8pInAYhcnL8YXwMGyQQ1KkFn9HwydoWDsK5Ff2shaw2/+dMQqd1zetenbVtjhLb iNYGvV7Bi/RQ8PyMbzmtTWa4kwQJAHC2gptkGxty//2ADGVBbqUQdqF9TjIWCNy5 V6Ldv5zo0/1s7DOzji3htzqkSs/K1Ea6d2LtZjejkJipHKV5x068UC6Fu+PlfS2D VZfcICeapU4G2F3Zvks2DlZ7dVTbHCvoI78Qi7bBgczPUVmk6iqah4xuQaiHyBJc kTFDA4Nnf/026GpoWRiFry9vqdnHBZyLet5A6Y+SoWF0FbhYnCVPpq4MnussYoav lUIi9ZZav6X2RZp9DDM1f9d5xubtKq0DKt93wvzqAhjK0T2DikckJ+riOYkI6N8t fHCBNUkdfgyMzJUTBPAzYQ7RmjbjKWJi7xWP0oz6+GqOJkQfSTVC5/2yEffbb3ya whYRS6iklbl7yshzaOeecXsZcAeK2oGPfoHg34WkHFgXdF5mNgA= =u1Wg -----END PGP SIGNATURE----- Merge tag 'ras_updates_for_v5.10' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull RAS updates from Borislav Petkov: - Extend the recovery from MCE in kernel space also to processes which encounter an MCE in kernel space but while copying from user memory by sending them a SIGBUS on return to user space and umapping the faulty memory, by Tony Luck and Youquan Song. - memcpy_mcsafe() rework by splitting the functionality into copy_mc_to_user() and copy_mc_to_kernel(). This, as a result, enables support for new hardware which can recover from a machine check encountered during a fast string copy and makes that the default and lets the older hardware which does not support that advance recovery, opt in to use the old, fragile, slow variant, by Dan Williams. - New AMD hw enablement, by Yazen Ghannam and Akshay Gupta. - Do not use MSR-tracing accessors in #MC context and flag any fault while accessing MCA architectural MSRs as an architectural violation with the hope that such hw/fw misdesigns are caught early during the hw eval phase and they don't make it into production. - Misc fixes, improvements and cleanups, as always. * tag 'ras_updates_for_v5.10' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/mce: Allow for copy_mc_fragile symbol checksum to be generated x86/mce: Decode a kernel instruction to determine if it is copying from user x86/mce: Recover from poison found while copying from user space x86/mce: Avoid tail copy when machine check terminated a copy from user x86/mce: Add _ASM_EXTABLE_CPY for copy user access x86/mce: Provide method to find out the type of an exception handler x86/mce: Pass pointer to saved pt_regs to severity calculation routines x86/copy_mc: Introduce copy_mc_enhanced_fast_string() x86, powerpc: Rename memcpy_mcsafe() to copy_mc_to_{user, kernel}() x86/mce: Drop AMD-specific "DEFERRED" case from Intel severity rule list x86/mce: Add Skylake quirk for patrol scrub reported errors RAS/CEC: Convert to DEFINE_SHOW_ATTRIBUTE() x86/mce: Annotate mce_rd/wrmsrl() with noinstr x86/mce/dev-mcelog: Do not update kflags on AMD systems x86/mce: Stop mce_reign() from re-computing severity for every CPU x86/mce: Make mce_rdmsrl() panic on an inaccessible MSR x86/mce: Increase maximum number of banks to 64 x86/mce: Delay clearing IA32_MCG_STATUS to the end of do_machine_check() x86/MCE/AMD, EDAC/mce_amd: Remove struct smca_hwid.xec_bitmap RAS/CEC: Fix cec_init() prototype
191 lines
4.6 KiB
C
191 lines
4.6 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* User address space access functions.
|
|
*
|
|
* Copyright 1997 Andi Kleen <ak@muc.de>
|
|
* Copyright 1997 Linus Torvalds
|
|
* Copyright 2002 Andi Kleen <ak@suse.de>
|
|
*/
|
|
#include <linux/export.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/highmem.h>
|
|
|
|
/*
|
|
* Zero Userspace
|
|
*/
|
|
|
|
unsigned long __clear_user(void __user *addr, unsigned long size)
|
|
{
|
|
long __d0;
|
|
might_fault();
|
|
/* no memory constraint because it doesn't change any memory gcc knows
|
|
about */
|
|
stac();
|
|
asm volatile(
|
|
" testq %[size8],%[size8]\n"
|
|
" jz 4f\n"
|
|
" .align 16\n"
|
|
"0: movq $0,(%[dst])\n"
|
|
" addq $8,%[dst]\n"
|
|
" decl %%ecx ; jnz 0b\n"
|
|
"4: movq %[size1],%%rcx\n"
|
|
" testl %%ecx,%%ecx\n"
|
|
" jz 2f\n"
|
|
"1: movb $0,(%[dst])\n"
|
|
" incq %[dst]\n"
|
|
" decl %%ecx ; jnz 1b\n"
|
|
"2:\n"
|
|
".section .fixup,\"ax\"\n"
|
|
"3: lea 0(%[size1],%[size8],8),%[size8]\n"
|
|
" jmp 2b\n"
|
|
".previous\n"
|
|
_ASM_EXTABLE_UA(0b, 3b)
|
|
_ASM_EXTABLE_UA(1b, 2b)
|
|
: [size8] "=&c"(size), [dst] "=&D" (__d0)
|
|
: [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr));
|
|
clac();
|
|
return size;
|
|
}
|
|
EXPORT_SYMBOL(__clear_user);
|
|
|
|
unsigned long clear_user(void __user *to, unsigned long n)
|
|
{
|
|
if (access_ok(to, n))
|
|
return __clear_user(to, n);
|
|
return n;
|
|
}
|
|
EXPORT_SYMBOL(clear_user);
|
|
|
|
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
|
|
/**
|
|
* clean_cache_range - write back a cache range with CLWB
|
|
* @vaddr: virtual start address
|
|
* @size: number of bytes to write back
|
|
*
|
|
* Write back a cache range using the CLWB (cache line write back)
|
|
* instruction. Note that @size is internally rounded up to be cache
|
|
* line size aligned.
|
|
*/
|
|
static void clean_cache_range(void *addr, size_t size)
|
|
{
|
|
u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
|
|
unsigned long clflush_mask = x86_clflush_size - 1;
|
|
void *vend = addr + size;
|
|
void *p;
|
|
|
|
for (p = (void *)((unsigned long)addr & ~clflush_mask);
|
|
p < vend; p += x86_clflush_size)
|
|
clwb(p);
|
|
}
|
|
|
|
void arch_wb_cache_pmem(void *addr, size_t size)
|
|
{
|
|
clean_cache_range(addr, size);
|
|
}
|
|
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
|
|
|
|
long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
|
|
{
|
|
unsigned long flushed, dest = (unsigned long) dst;
|
|
long rc = __copy_user_nocache(dst, src, size, 0);
|
|
|
|
/*
|
|
* __copy_user_nocache() uses non-temporal stores for the bulk
|
|
* of the transfer, but we need to manually flush if the
|
|
* transfer is unaligned. A cached memory copy is used when
|
|
* destination or size is not naturally aligned. That is:
|
|
* - Require 8-byte alignment when size is 8 bytes or larger.
|
|
* - Require 4-byte alignment when size is 4 bytes.
|
|
*/
|
|
if (size < 8) {
|
|
if (!IS_ALIGNED(dest, 4) || size != 4)
|
|
clean_cache_range(dst, size);
|
|
} else {
|
|
if (!IS_ALIGNED(dest, 8)) {
|
|
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
|
|
clean_cache_range(dst, 1);
|
|
}
|
|
|
|
flushed = dest - (unsigned long) dst;
|
|
if (size > flushed && !IS_ALIGNED(size - flushed, 8))
|
|
clean_cache_range(dst + size - 1, 1);
|
|
}
|
|
|
|
return rc;
|
|
}
|
|
|
|
void __memcpy_flushcache(void *_dst, const void *_src, size_t size)
|
|
{
|
|
unsigned long dest = (unsigned long) _dst;
|
|
unsigned long source = (unsigned long) _src;
|
|
|
|
/* cache copy and flush to align dest */
|
|
if (!IS_ALIGNED(dest, 8)) {
|
|
unsigned len = min_t(unsigned, size, ALIGN(dest, 8) - dest);
|
|
|
|
memcpy((void *) dest, (void *) source, len);
|
|
clean_cache_range((void *) dest, len);
|
|
dest += len;
|
|
source += len;
|
|
size -= len;
|
|
if (!size)
|
|
return;
|
|
}
|
|
|
|
/* 4x8 movnti loop */
|
|
while (size >= 32) {
|
|
asm("movq (%0), %%r8\n"
|
|
"movq 8(%0), %%r9\n"
|
|
"movq 16(%0), %%r10\n"
|
|
"movq 24(%0), %%r11\n"
|
|
"movnti %%r8, (%1)\n"
|
|
"movnti %%r9, 8(%1)\n"
|
|
"movnti %%r10, 16(%1)\n"
|
|
"movnti %%r11, 24(%1)\n"
|
|
:: "r" (source), "r" (dest)
|
|
: "memory", "r8", "r9", "r10", "r11");
|
|
dest += 32;
|
|
source += 32;
|
|
size -= 32;
|
|
}
|
|
|
|
/* 1x8 movnti loop */
|
|
while (size >= 8) {
|
|
asm("movq (%0), %%r8\n"
|
|
"movnti %%r8, (%1)\n"
|
|
:: "r" (source), "r" (dest)
|
|
: "memory", "r8");
|
|
dest += 8;
|
|
source += 8;
|
|
size -= 8;
|
|
}
|
|
|
|
/* 1x4 movnti loop */
|
|
while (size >= 4) {
|
|
asm("movl (%0), %%r8d\n"
|
|
"movnti %%r8d, (%1)\n"
|
|
:: "r" (source), "r" (dest)
|
|
: "memory", "r8");
|
|
dest += 4;
|
|
source += 4;
|
|
size -= 4;
|
|
}
|
|
|
|
/* cache copy for remaining bytes */
|
|
if (size) {
|
|
memcpy((void *) dest, (void *) source, size);
|
|
clean_cache_range((void *) dest, size);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL_GPL(__memcpy_flushcache);
|
|
|
|
void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
|
|
size_t len)
|
|
{
|
|
char *from = kmap_atomic(page);
|
|
|
|
memcpy_flushcache(to, from + offset, len);
|
|
kunmap_atomic(from);
|
|
}
|
|
#endif
|