linux/arch/riscv/lib/memcpy.S
Alexandre Ghiti 26e7aacb83
riscv: Allow to downgrade paging mode from the command line
Add 2 early command line parameters that allow to downgrade satp mode
(using the same naming as x86):
- "no5lvl": use a 4-level page table (down from sv57 to sv48)
- "no4lvl": use a 3-level page table (down from sv57/sv48 to sv39)

Note that going through the device tree to get the kernel command line
works with ACPI too since the efi stub creates a device tree anyway with
the command line.

In KASAN kernels, we can't use the libfdt that early in the boot process
since we are not ready to execute instrumented functions. So instead of
using the "generic" libfdt, we compile our own versions of those functions
that are not instrumented and that are prefixed so that they do not
conflict with the generic ones. We also need the non-instrumented versions
of the string functions and the prefixed versions of memcpy/memmove.

This is largely inspired by commit aacd149b62 ("arm64: head: avoid
relocating the kernel twice for KASLR") from which I removed compilation
flags that were not relevant to RISC-V at the moment (LTO, SCS). Also
note that we have to link with -z norelro to avoid ld.lld to throw a
warning with the new .got sections, like in commit 311bea3cb9 ("arm64:
link with -z norelro for LLD or aarch64-elf").

Signed-off-by: Alexandre Ghiti <alexghiti@rivosinc.com>
Tested-by: Björn Töpel <bjorn@rivosinc.com>
Reviewed-by: Björn Töpel <bjorn@rivosinc.com>
Link: https://lore.kernel.org/r/20230424092313.178699-2-alexghiti@rivosinc.com
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-04-26 07:30:52 -07:00

111 lines
2.2 KiB
ArmAsm

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2013 Regents of the University of California
*/
#include <linux/linkage.h>
#include <asm/asm.h>
/* void *memcpy(void *, const void *, size_t) */
ENTRY(__memcpy)
WEAK(memcpy)
move t6, a0 /* Preserve return value */
/* Defer to byte-oriented copy for small sizes */
sltiu a3, a2, 128
bnez a3, 4f
/* Use word-oriented copy only if low-order bits match */
andi a3, t6, SZREG-1
andi a4, a1, SZREG-1
bne a3, a4, 4f
beqz a3, 2f /* Skip if already aligned */
/*
* Round to nearest double word-aligned address
* greater than or equal to start address
*/
andi a3, a1, ~(SZREG-1)
addi a3, a3, SZREG
/* Handle initial misalignment */
sub a4, a3, a1
1:
lb a5, 0(a1)
addi a1, a1, 1
sb a5, 0(t6)
addi t6, t6, 1
bltu a1, a3, 1b
sub a2, a2, a4 /* Update count */
2:
andi a4, a2, ~((16*SZREG)-1)
beqz a4, 4f
add a3, a1, a4
3:
REG_L a4, 0(a1)
REG_L a5, SZREG(a1)
REG_L a6, 2*SZREG(a1)
REG_L a7, 3*SZREG(a1)
REG_L t0, 4*SZREG(a1)
REG_L t1, 5*SZREG(a1)
REG_L t2, 6*SZREG(a1)
REG_L t3, 7*SZREG(a1)
REG_L t4, 8*SZREG(a1)
REG_L t5, 9*SZREG(a1)
REG_S a4, 0(t6)
REG_S a5, SZREG(t6)
REG_S a6, 2*SZREG(t6)
REG_S a7, 3*SZREG(t6)
REG_S t0, 4*SZREG(t6)
REG_S t1, 5*SZREG(t6)
REG_S t2, 6*SZREG(t6)
REG_S t3, 7*SZREG(t6)
REG_S t4, 8*SZREG(t6)
REG_S t5, 9*SZREG(t6)
REG_L a4, 10*SZREG(a1)
REG_L a5, 11*SZREG(a1)
REG_L a6, 12*SZREG(a1)
REG_L a7, 13*SZREG(a1)
REG_L t0, 14*SZREG(a1)
REG_L t1, 15*SZREG(a1)
addi a1, a1, 16*SZREG
REG_S a4, 10*SZREG(t6)
REG_S a5, 11*SZREG(t6)
REG_S a6, 12*SZREG(t6)
REG_S a7, 13*SZREG(t6)
REG_S t0, 14*SZREG(t6)
REG_S t1, 15*SZREG(t6)
addi t6, t6, 16*SZREG
bltu a1, a3, 3b
andi a2, a2, (16*SZREG)-1 /* Update count */
4:
/* Handle trailing misalignment */
beqz a2, 6f
add a3, a1, a2
/* Use word-oriented copy if co-aligned to word boundary */
or a5, a1, t6
or a5, a5, a3
andi a5, a5, 3
bnez a5, 5f
7:
lw a4, 0(a1)
addi a1, a1, 4
sw a4, 0(t6)
addi t6, t6, 4
bltu a1, a3, 7b
ret
5:
lb a4, 0(a1)
addi a1, a1, 1
sb a4, 0(t6)
addi t6, t6, 1
bltu a1, a3, 5b
6:
ret
END(__memcpy)
SYM_FUNC_ALIAS(__pi_memcpy, __memcpy)
SYM_FUNC_ALIAS(__pi___memcpy, __memcpy)