mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-15 00:04:15 +08:00
bdb7de7ed5
[ Upstream commit066baf92be
] copy_mc_to_user() has the destination marked __user on powerpc, but not on x86; the latter results in a sparse warning in lib/iov_iter.c. Fix this by applying the tag on x86 too. Fixes:ec6347bb43
("x86, powerpc: Rename memcpy_mcsafe() to copy_mc_to_{user, kernel}()") Signed-off-by: David Howells <dhowells@redhat.com> Link: https://lore.kernel.org/r/20230925120309.1731676-3-dhowells@redhat.com cc: Dan Williams <dan.j.williams@intel.com> cc: Thomas Gleixner <tglx@linutronix.de> cc: Ingo Molnar <mingo@redhat.com> cc: Borislav Petkov <bp@alien8.de> cc: Dave Hansen <dave.hansen@linux.intel.com> cc: "H. Peter Anvin" <hpa@zytor.com> cc: Alexander Viro <viro@zeniv.linux.org.uk> cc: Jens Axboe <axboe@kernel.dk> cc: Christoph Hellwig <hch@lst.de> cc: Christian Brauner <christian@brauner.io> cc: Matthew Wilcox <willy@infradead.org> cc: Linus Torvalds <torvalds@linux-foundation.org> cc: David Laight <David.Laight@ACULAB.COM> cc: x86@kernel.org cc: linux-block@vger.kernel.org cc: linux-fsdevel@vger.kernel.org cc: linux-mm@kvack.org Signed-off-by: Christian Brauner <brauner@kernel.org> Signed-off-by: Sasha Levin <sashal@kernel.org>
97 lines
2.5 KiB
C
97 lines
2.5 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/* Copyright(c) 2016-2020 Intel Corporation. All rights reserved. */
|
|
|
|
#include <linux/jump_label.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/export.h>
|
|
#include <linux/string.h>
|
|
#include <linux/types.h>
|
|
|
|
#include <asm/mce.h>
|
|
|
|
#ifdef CONFIG_X86_MCE
|
|
/*
|
|
* See COPY_MC_TEST for self-test of the copy_mc_fragile()
|
|
* implementation.
|
|
*/
|
|
static DEFINE_STATIC_KEY_FALSE(copy_mc_fragile_key);
|
|
|
|
void enable_copy_mc_fragile(void)
|
|
{
|
|
static_branch_inc(©_mc_fragile_key);
|
|
}
|
|
#define copy_mc_fragile_enabled (static_branch_unlikely(©_mc_fragile_key))
|
|
|
|
/*
|
|
* Similar to copy_user_handle_tail, probe for the write fault point, or
|
|
* source exception point.
|
|
*/
|
|
__visible notrace unsigned long
|
|
copy_mc_fragile_handle_tail(char *to, char *from, unsigned len)
|
|
{
|
|
for (; len; --len, to++, from++)
|
|
if (copy_mc_fragile(to, from, 1))
|
|
break;
|
|
return len;
|
|
}
|
|
#else
|
|
/*
|
|
* No point in doing careful copying, or consulting a static key when
|
|
* there is no #MC handler in the CONFIG_X86_MCE=n case.
|
|
*/
|
|
void enable_copy_mc_fragile(void)
|
|
{
|
|
}
|
|
#define copy_mc_fragile_enabled (0)
|
|
#endif
|
|
|
|
unsigned long copy_mc_enhanced_fast_string(void *dst, const void *src, unsigned len);
|
|
|
|
/**
|
|
* copy_mc_to_kernel - memory copy that handles source exceptions
|
|
*
|
|
* @dst: destination address
|
|
* @src: source address
|
|
* @len: number of bytes to copy
|
|
*
|
|
* Call into the 'fragile' version on systems that benefit from avoiding
|
|
* corner case poison consumption scenarios, For example, accessing
|
|
* poison across 2 cachelines with a single instruction. Almost all
|
|
* other uses case can use copy_mc_enhanced_fast_string() for a fast
|
|
* recoverable copy, or fallback to plain memcpy.
|
|
*
|
|
* Return 0 for success, or number of bytes not copied if there was an
|
|
* exception.
|
|
*/
|
|
unsigned long __must_check copy_mc_to_kernel(void *dst, const void *src, unsigned len)
|
|
{
|
|
if (copy_mc_fragile_enabled)
|
|
return copy_mc_fragile(dst, src, len);
|
|
if (static_cpu_has(X86_FEATURE_ERMS))
|
|
return copy_mc_enhanced_fast_string(dst, src, len);
|
|
memcpy(dst, src, len);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL_GPL(copy_mc_to_kernel);
|
|
|
|
unsigned long __must_check copy_mc_to_user(void __user *dst, const void *src, unsigned len)
|
|
{
|
|
unsigned long ret;
|
|
|
|
if (copy_mc_fragile_enabled) {
|
|
__uaccess_begin();
|
|
ret = copy_mc_fragile((__force void *)dst, src, len);
|
|
__uaccess_end();
|
|
return ret;
|
|
}
|
|
|
|
if (static_cpu_has(X86_FEATURE_ERMS)) {
|
|
__uaccess_begin();
|
|
ret = copy_mc_enhanced_fast_string((__force void *)dst, src, len);
|
|
__uaccess_end();
|
|
return ret;
|
|
}
|
|
|
|
return copy_user_generic((__force void *)dst, src, len);
|
|
}
|