mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-12-21 18:14:48 +08:00
Modern compilers are perfectly capable of extracting parallelism from the XOR routines, provided that the prototypes reflect the nature of the input accurately, in particular, the fact that the input vectors are expected not to overlap. This is not documented explicitly, but is implied by the interchangeability of the various C routines, some of which use temporary variables while others don't: this means that these routines only behave identically for non-overlapping inputs. So let's decorate these input vectors with the __restrict modifier, which informs the compiler that there is no overlap. While at it, make the input-only vectors pointer-to-const as well. Tested-by: Nathan Chancellor <nathan@kernel.org> Signed-off-by: Ard Biesheuvel <ardb@kernel.org> Reviewed-by: Nick Desaulniers <ndesaulniers@google.com> Link: https://github.com/ClangBuiltLinux/linux/issues/563 Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
78 lines
1.9 KiB
C
78 lines
1.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* arch/arm64/include/asm/xor.h
|
|
*
|
|
* Authors: Jackie Liu <liuyun01@kylinos.cn>
|
|
* Copyright (C) 2018,Tianjin KYLIN Information Technology Co., Ltd.
|
|
*/
|
|
|
|
#include <linux/hardirq.h>
|
|
#include <asm-generic/xor.h>
|
|
#include <asm/hwcap.h>
|
|
#include <asm/neon.h>
|
|
|
|
#ifdef CONFIG_KERNEL_MODE_NEON
|
|
|
|
extern struct xor_block_template const xor_block_inner_neon;
|
|
|
|
static void
|
|
xor_neon_2(unsigned long bytes, unsigned long * __restrict p1,
|
|
const unsigned long * __restrict p2)
|
|
{
|
|
kernel_neon_begin();
|
|
xor_block_inner_neon.do_2(bytes, p1, p2);
|
|
kernel_neon_end();
|
|
}
|
|
|
|
static void
|
|
xor_neon_3(unsigned long bytes, unsigned long * __restrict p1,
|
|
const unsigned long * __restrict p2,
|
|
const unsigned long * __restrict p3)
|
|
{
|
|
kernel_neon_begin();
|
|
xor_block_inner_neon.do_3(bytes, p1, p2, p3);
|
|
kernel_neon_end();
|
|
}
|
|
|
|
static void
|
|
xor_neon_4(unsigned long bytes, unsigned long * __restrict p1,
|
|
const unsigned long * __restrict p2,
|
|
const unsigned long * __restrict p3,
|
|
const unsigned long * __restrict p4)
|
|
{
|
|
kernel_neon_begin();
|
|
xor_block_inner_neon.do_4(bytes, p1, p2, p3, p4);
|
|
kernel_neon_end();
|
|
}
|
|
|
|
static void
|
|
xor_neon_5(unsigned long bytes, unsigned long * __restrict p1,
|
|
const unsigned long * __restrict p2,
|
|
const unsigned long * __restrict p3,
|
|
const unsigned long * __restrict p4,
|
|
const unsigned long * __restrict p5)
|
|
{
|
|
kernel_neon_begin();
|
|
xor_block_inner_neon.do_5(bytes, p1, p2, p3, p4, p5);
|
|
kernel_neon_end();
|
|
}
|
|
|
|
static struct xor_block_template xor_block_arm64 = {
|
|
.name = "arm64_neon",
|
|
.do_2 = xor_neon_2,
|
|
.do_3 = xor_neon_3,
|
|
.do_4 = xor_neon_4,
|
|
.do_5 = xor_neon_5
|
|
};
|
|
#undef XOR_TRY_TEMPLATES
|
|
#define XOR_TRY_TEMPLATES \
|
|
do { \
|
|
xor_speed(&xor_block_8regs); \
|
|
xor_speed(&xor_block_32regs); \
|
|
if (cpu_has_neon()) { \
|
|
xor_speed(&xor_block_arm64);\
|
|
} \
|
|
} while (0)
|
|
|
|
#endif /* ! CONFIG_KERNEL_MODE_NEON */
|