mirror of
https://mirrors.bfsu.edu.cn/git/linux.git
synced 2024-11-24 20:54:10 +08:00
0f61f6be1f
Now that we have SYM_FUNC_ALIAS() and SYM_FUNC_ALIAS_WEAK(), use those to simplify and more consistently define function aliases across arch/arm64. Aliases are now defined in terms of a canonical function name. For position-independent functions I've made the __pi_<func> name the canonical name, and defined other alises in terms of this. The SYM_FUNC_{START,END}_PI(func) macros obscure the __pi_<func> name, and make this hard to seatch for. The SYM_FUNC_START_WEAK_PI() macro also obscures the fact that the __pi_<func> fymbol is global and the <func> symbol is weak. For clarity, I have removed these macros and used SYM_FUNC_{START,END}() directly with the __pi_<func> name. For example: SYM_FUNC_START_WEAK_PI(func) ... asm insns ... SYM_FUNC_END_PI(func) EXPORT_SYMBOL(func) ... becomes: SYM_FUNC_START(__pi_func) ... asm insns ... SYM_FUNC_END(__pi_func) SYM_FUNC_ALIAS_WEAK(func, __pi_func) EXPORT_SYMBOL(func) For clarity, where there are multiple annotations such as EXPORT_SYMBOL(), I've tried to keep annotations grouped by symbol. For example, where a function has a name and an alias which are both exported, this is organised as: SYM_FUNC_START(func) ... asm insns ... SYM_FUNC_END(func) EXPORT_SYMBOL(func) SYM_FUNC_ALIAS(alias, func) EXPORT_SYMBOL(alias) For consistency with the other string functions, I've defined strrchr as a position-independent function, as it can safely be used as such even though we have no users today. As we no longer use SYM_FUNC_{START,END}_ALIAS(), our local copies are removed. The common versions will be removed by a subsequent patch. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Acked-by: Ard Biesheuvel <ardb@kernel.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Acked-by: Josh Poimboeuf <jpoimboe@redhat.com> Acked-by: Mark Brown <broonie@kernel.org> Cc: Joey Gouly <joey.gouly@arm.com> Cc: Will Deacon <will@kernel.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20220216162229.1076788-3-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
163 lines
4.2 KiB
ArmAsm
163 lines
4.2 KiB
ArmAsm
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright (C) 2013 ARM Ltd.
|
|
* Copyright (C) 2013 Linaro.
|
|
*
|
|
* This code is based on glibc cortex strings work originally authored by Linaro
|
|
* be found @
|
|
*
|
|
* http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
|
|
* files/head:/src/aarch64/
|
|
*/
|
|
|
|
#include <linux/linkage.h>
|
|
#include <asm/assembler.h>
|
|
|
|
/*
|
|
* determine the length of a fixed-size string
|
|
*
|
|
* Parameters:
|
|
* x0 - const string pointer
|
|
* x1 - maximal string length
|
|
* Returns:
|
|
* x0 - the return length of specific string
|
|
*/
|
|
|
|
/* Arguments and results. */
|
|
srcin .req x0
|
|
len .req x0
|
|
limit .req x1
|
|
|
|
/* Locals and temporaries. */
|
|
src .req x2
|
|
data1 .req x3
|
|
data2 .req x4
|
|
data2a .req x5
|
|
has_nul1 .req x6
|
|
has_nul2 .req x7
|
|
tmp1 .req x8
|
|
tmp2 .req x9
|
|
tmp3 .req x10
|
|
tmp4 .req x11
|
|
zeroones .req x12
|
|
pos .req x13
|
|
limit_wd .req x14
|
|
|
|
#define REP8_01 0x0101010101010101
|
|
#define REP8_7f 0x7f7f7f7f7f7f7f7f
|
|
#define REP8_80 0x8080808080808080
|
|
|
|
SYM_FUNC_START(__pi_strnlen)
|
|
cbz limit, .Lhit_limit
|
|
mov zeroones, #REP8_01
|
|
bic src, srcin, #15
|
|
ands tmp1, srcin, #15
|
|
b.ne .Lmisaligned
|
|
/* Calculate the number of full and partial words -1. */
|
|
sub limit_wd, limit, #1 /* Limit != 0, so no underflow. */
|
|
lsr limit_wd, limit_wd, #4 /* Convert to Qwords. */
|
|
|
|
/*
|
|
* NUL detection works on the principle that (X - 1) & (~X) & 0x80
|
|
* (=> (X - 1) & ~(X | 0x7f)) is non-zero iff a byte is zero, and
|
|
* can be done in parallel across the entire word.
|
|
*/
|
|
/*
|
|
* The inner loop deals with two Dwords at a time. This has a
|
|
* slightly higher start-up cost, but we should win quite quickly,
|
|
* especially on cores with a high number of issue slots per
|
|
* cycle, as we get much better parallelism out of the operations.
|
|
*/
|
|
.Lloop:
|
|
ldp data1, data2, [src], #16
|
|
.Lrealigned:
|
|
sub tmp1, data1, zeroones
|
|
orr tmp2, data1, #REP8_7f
|
|
sub tmp3, data2, zeroones
|
|
orr tmp4, data2, #REP8_7f
|
|
bic has_nul1, tmp1, tmp2
|
|
bic has_nul2, tmp3, tmp4
|
|
subs limit_wd, limit_wd, #1
|
|
orr tmp1, has_nul1, has_nul2
|
|
ccmp tmp1, #0, #0, pl /* NZCV = 0000 */
|
|
b.eq .Lloop
|
|
|
|
cbz tmp1, .Lhit_limit /* No null in final Qword. */
|
|
|
|
/*
|
|
* We know there's a null in the final Qword. The easiest thing
|
|
* to do now is work out the length of the string and return
|
|
* MIN (len, limit).
|
|
*/
|
|
sub len, src, srcin
|
|
cbz has_nul1, .Lnul_in_data2
|
|
CPU_BE( mov data2, data1 ) /*perpare data to re-calculate the syndrome*/
|
|
|
|
sub len, len, #8
|
|
mov has_nul2, has_nul1
|
|
.Lnul_in_data2:
|
|
/*
|
|
* For big-endian, carry propagation (if the final byte in the
|
|
* string is 0x01) means we cannot use has_nul directly. The
|
|
* easiest way to get the correct byte is to byte-swap the data
|
|
* and calculate the syndrome a second time.
|
|
*/
|
|
CPU_BE( rev data2, data2 )
|
|
CPU_BE( sub tmp1, data2, zeroones )
|
|
CPU_BE( orr tmp2, data2, #REP8_7f )
|
|
CPU_BE( bic has_nul2, tmp1, tmp2 )
|
|
|
|
sub len, len, #8
|
|
rev has_nul2, has_nul2
|
|
clz pos, has_nul2
|
|
add len, len, pos, lsr #3 /* Bits to bytes. */
|
|
cmp len, limit
|
|
csel len, len, limit, ls /* Return the lower value. */
|
|
ret
|
|
|
|
.Lmisaligned:
|
|
/*
|
|
* Deal with a partial first word.
|
|
* We're doing two things in parallel here;
|
|
* 1) Calculate the number of words (but avoiding overflow if
|
|
* limit is near ULONG_MAX) - to do this we need to work out
|
|
* limit + tmp1 - 1 as a 65-bit value before shifting it;
|
|
* 2) Load and mask the initial data words - we force the bytes
|
|
* before the ones we are interested in to 0xff - this ensures
|
|
* early bytes will not hit any zero detection.
|
|
*/
|
|
ldp data1, data2, [src], #16
|
|
|
|
sub limit_wd, limit, #1
|
|
and tmp3, limit_wd, #15
|
|
lsr limit_wd, limit_wd, #4
|
|
|
|
add tmp3, tmp3, tmp1
|
|
add limit_wd, limit_wd, tmp3, lsr #4
|
|
|
|
neg tmp4, tmp1
|
|
lsl tmp4, tmp4, #3 /* Bytes beyond alignment -> bits. */
|
|
|
|
mov tmp2, #~0
|
|
/* Big-endian. Early bytes are at MSB. */
|
|
CPU_BE( lsl tmp2, tmp2, tmp4 ) /* Shift (tmp1 & 63). */
|
|
/* Little-endian. Early bytes are at LSB. */
|
|
CPU_LE( lsr tmp2, tmp2, tmp4 ) /* Shift (tmp1 & 63). */
|
|
|
|
cmp tmp1, #8
|
|
|
|
orr data1, data1, tmp2
|
|
orr data2a, data2, tmp2
|
|
|
|
csinv data1, data1, xzr, le
|
|
csel data2, data2, data2a, le
|
|
b .Lrealigned
|
|
|
|
.Lhit_limit:
|
|
mov len, limit
|
|
ret
|
|
SYM_FUNC_END(__pi_strnlen)
|
|
|
|
SYM_FUNC_ALIAS_WEAK(strnlen, __pi_strnlen)
|
|
EXPORT_SYMBOL_NOKASAN(strnlen)
|