2017-07-11 09:08:08 +08:00
|
|
|
# This file is included by the global makefile so that you can add your own
|
2021-10-13 14:36:22 +08:00
|
|
|
# architecture-specific flags and dependencies.
|
2017-07-11 09:08:08 +08:00
|
|
|
#
|
|
|
|
# This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
# License. See the file "COPYING" in the main directory of this archive
|
|
|
|
# for more details.
|
|
|
|
#
|
|
|
|
|
2023-04-24 17:23:13 +08:00
|
|
|
LDFLAGS_vmlinux := -z norelro
|
2023-03-29 12:53:26 +08:00
|
|
|
ifeq ($(CONFIG_RELOCATABLE),y)
|
2023-04-24 17:23:13 +08:00
|
|
|
LDFLAGS_vmlinux += -shared -Bsymbolic -z notext --emit-relocs
|
2023-03-29 12:53:26 +08:00
|
|
|
KBUILD_CFLAGS += -fPIE
|
|
|
|
endif
|
2018-02-13 13:13:16 +08:00
|
|
|
ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
|
2023-03-29 12:53:26 +08:00
|
|
|
LDFLAGS_vmlinux += --no-relax
|
riscv: Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT
This patch changes the current detour mechanism of dynamic ftrace
which has been discussed during LPC 2020 RISCV-MC [1].
Before the patch, we used mcount for detour:
<funca>:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
addi s0,sp,16
mv a5,ra
mv a0,a5
auipc ra,0x0 -> nop
jalr -296(ra) <_mcount@plt> ->nop
...
After the patch, we use nop call site area for detour:
<funca>:
nop -> REG_S ra, -SZREG(sp)
nop -> auipc ra, 0x?
nop -> jalr ?(ra)
nop -> REG_L ra, -SZREG(sp)
...
The mcount mechanism is mixed with gcc function prologue which is
not very clear. The patchable function entry just put 16 bytes nop
before the front of the function prologue which could be filled
with a separated detour mechanism.
[1] https://www.linuxplumbersconf.org/event/7/contributions/807/
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:01:41 +08:00
|
|
|
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
|
riscv: ftrace: Remove wasted nops for !RISCV_ISA_C
When CONFIG_RISCV_ISA_C=n, -fpatchable-function-entry=8 would generate
more nops than we expect. Because it treat nop opcode as 0x00000013
instead of 0x0001.
Dump of assembler code for function dw_pcie_free_msi:
0xffffffff806fce94 <+0>: sd ra,-8(sp)
0xffffffff806fce98 <+4>: auipc ra,0xff90f
0xffffffff806fce9c <+8>: jalr -684(ra) # 0xffffffff8000bbec
<ftrace_caller>
0xffffffff806fcea0 <+12>: ld ra,-8(sp)
0xffffffff806fcea4 <+16>: nop /* wasted */
0xffffffff806fcea8 <+20>: nop /* wasted */
0xffffffff806fceac <+24>: nop /* wasted */
0xffffffff806fceb0 <+28>: nop /* wasted */
0xffffffff806fceb4 <+0>: addi sp,sp,-48
0xffffffff806fceb8 <+4>: sd s0,32(sp)
0xffffffff806fcebc <+8>: sd s1,24(sp)
0xffffffff806fcec0 <+12>: sd s2,16(sp)
0xffffffff806fcec4 <+16>: sd s3,8(sp)
0xffffffff806fcec8 <+20>: sd ra,40(sp)
0xffffffff806fcecc <+24>: addi s0,sp,48
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Link: https://lore.kernel.org/r/20230112090603.1295340-3-guoren@kernel.org
Cc: stable@vger.kernel.org
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-01-12 17:05:58 +08:00
|
|
|
ifeq ($(CONFIG_RISCV_ISA_C),y)
|
|
|
|
CC_FLAGS_FTRACE := -fpatchable-function-entry=4
|
riscv: ftrace: Reduce the detour code size to half
Use a temporary register to reduce the size of detour code from 16 bytes to
8 bytes. The previous implementation is from 'commit afc76b8b8011 ("riscv:
Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT")'.
Before the patch:
<func_prolog>:
0: REG_S ra, -SZREG(sp)
4: auipc ra, ?
8: jalr ?(ra)
12: REG_L ra, -SZREG(sp)
(func_boddy)
After the patch:
<func_prolog>:
0: auipc t0, ?
4: jalr t0, ?(t0)
(func_boddy)
This patch not just reduces the size of detour code, but also fixes an
important issue:
An Ftrace callback registered with FTRACE_OPS_FL_IPMODIFY flag can
actually change the instruction pointer, e.g. to "replace" the given
kernel function with a new one, which is needed for livepatching, etc.
In this case, the trampoline (ftrace_regs_caller) would not return to
<func_prolog+12> but would rather jump to the new function. So, "REG_L
ra, -SZREG(sp)" would not run and the original return address would not
be restored. The kernel is likely to hang or crash as a result.
This can be easily demonstrated if one tries to "replace", say,
cmdline_proc_show() with a new function with the same signature using
instruction_pointer_set(&fregs->regs, new_func_addr) in the Ftrace
callback.
Link: https://lore.kernel.org/linux-riscv/20221122075440.1165172-1-suagrfillet@gmail.com/
Link: https://lore.kernel.org/linux-riscv/d7d5730b-ebef-68e5-5046-e763e1ee6164@yadro.com/
Co-developed-by: Song Shuai <suagrfillet@gmail.com>
Signed-off-by: Song Shuai <suagrfillet@gmail.com>
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Cc: Evgenii Shatokhin <e.shatokhin@yadro.com>
Reviewed-by: Evgenii Shatokhin <e.shatokhin@yadro.com>
Link: https://lore.kernel.org/r/20230112090603.1295340-4-guoren@kernel.org
Cc: stable@vger.kernel.org
Fixes: 10626c32e382 ("riscv/ftrace: Add basic support")
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-01-12 17:05:59 +08:00
|
|
|
else
|
|
|
|
CC_FLAGS_FTRACE := -fpatchable-function-entry=2
|
riscv: ftrace: Remove wasted nops for !RISCV_ISA_C
When CONFIG_RISCV_ISA_C=n, -fpatchable-function-entry=8 would generate
more nops than we expect. Because it treat nop opcode as 0x00000013
instead of 0x0001.
Dump of assembler code for function dw_pcie_free_msi:
0xffffffff806fce94 <+0>: sd ra,-8(sp)
0xffffffff806fce98 <+4>: auipc ra,0xff90f
0xffffffff806fce9c <+8>: jalr -684(ra) # 0xffffffff8000bbec
<ftrace_caller>
0xffffffff806fcea0 <+12>: ld ra,-8(sp)
0xffffffff806fcea4 <+16>: nop /* wasted */
0xffffffff806fcea8 <+20>: nop /* wasted */
0xffffffff806fceac <+24>: nop /* wasted */
0xffffffff806fceb0 <+28>: nop /* wasted */
0xffffffff806fceb4 <+0>: addi sp,sp,-48
0xffffffff806fceb8 <+4>: sd s0,32(sp)
0xffffffff806fcebc <+8>: sd s1,24(sp)
0xffffffff806fcec0 <+12>: sd s2,16(sp)
0xffffffff806fcec4 <+16>: sd s3,8(sp)
0xffffffff806fcec8 <+20>: sd ra,40(sp)
0xffffffff806fcecc <+24>: addi s0,sp,48
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Link: https://lore.kernel.org/r/20230112090603.1295340-3-guoren@kernel.org
Cc: stable@vger.kernel.org
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-01-12 17:05:58 +08:00
|
|
|
endif
|
2018-02-13 13:13:16 +08:00
|
|
|
endif
|
2020-02-21 10:47:55 +08:00
|
|
|
|
2021-06-07 06:09:40 +08:00
|
|
|
ifeq ($(CONFIG_CMODEL_MEDLOW),y)
|
2020-02-21 10:47:55 +08:00
|
|
|
KBUILD_CFLAGS_MODULE += -mcmodel=medany
|
|
|
|
endif
|
2017-07-11 09:08:08 +08:00
|
|
|
|
|
|
|
export BITS
|
|
|
|
ifeq ($(CONFIG_ARCH_RV64I),y)
|
|
|
|
BITS := 64
|
|
|
|
UTS_MACHINE := riscv64
|
|
|
|
|
|
|
|
KBUILD_CFLAGS += -mabi=lp64
|
|
|
|
KBUILD_AFLAGS += -mabi=lp64
|
2018-07-29 09:14:47 +08:00
|
|
|
|
2018-08-24 07:20:39 +08:00
|
|
|
KBUILD_LDFLAGS += -melf64lriscv
|
2017-07-11 09:08:08 +08:00
|
|
|
else
|
|
|
|
BITS := 32
|
|
|
|
UTS_MACHINE := riscv32
|
|
|
|
|
|
|
|
KBUILD_CFLAGS += -mabi=ilp32
|
|
|
|
KBUILD_AFLAGS += -mabi=ilp32
|
2018-08-24 07:20:39 +08:00
|
|
|
KBUILD_LDFLAGS += -melf32lriscv
|
2017-07-11 09:08:08 +08:00
|
|
|
endif
|
|
|
|
|
2023-12-06 07:53:50 +08:00
|
|
|
ifndef CONFIG_RISCV_USE_LINKER_RELAXATION
|
2021-05-15 05:37:41 +08:00
|
|
|
KBUILD_CFLAGS += -mno-relax
|
|
|
|
KBUILD_AFLAGS += -mno-relax
|
2021-08-05 23:01:02 +08:00
|
|
|
ifndef CONFIG_AS_IS_LLVM
|
2021-05-15 05:37:41 +08:00
|
|
|
KBUILD_CFLAGS += -Wa,-mno-relax
|
|
|
|
KBUILD_AFLAGS += -Wa,-mno-relax
|
|
|
|
endif
|
|
|
|
endif
|
|
|
|
|
2023-09-28 06:48:02 +08:00
|
|
|
ifeq ($(CONFIG_SHADOW_CALL_STACK),y)
|
|
|
|
KBUILD_LDFLAGS += --no-relax-gp
|
|
|
|
endif
|
|
|
|
|
2018-10-09 10:18:32 +08:00
|
|
|
# ISA string setting
|
2019-04-15 17:14:35 +08:00
|
|
|
riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
|
|
|
|
riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
|
2018-10-09 10:18:33 +08:00
|
|
|
riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
|
2018-10-09 10:18:32 +08:00
|
|
|
riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
|
2023-06-05 19:07:21 +08:00
|
|
|
riscv-march-$(CONFIG_RISCV_ISA_V) := $(riscv-march-y)v
|
2022-01-27 01:14:42 +08:00
|
|
|
|
riscv: Handle zicsr/zifencei issues between clang and binutils
There are two related issues that appear in certain combinations with
clang and GNU binutils.
The first occurs when a version of clang that supports zicsr or zifencei
via '-march=' [1] (i.e, >= 17.x) is used in combination with a version
of GNU binutils that do not recognize zicsr and zifencei in the
'-march=' value (i.e., < 2.36):
riscv64-linux-gnu-ld: -march=rv64i2p0_m2p0_a2p0_c2p0_zicsr2p0_zifencei2p0: Invalid or unknown z ISA extension: 'zifencei'
riscv64-linux-gnu-ld: failed to merge target specific data of file fs/efivarfs/file.o
riscv64-linux-gnu-ld: -march=rv64i2p0_m2p0_a2p0_c2p0_zicsr2p0_zifencei2p0: Invalid or unknown z ISA extension: 'zifencei'
riscv64-linux-gnu-ld: failed to merge target specific data of file fs/efivarfs/super.o
The second occurs when a version of clang that does not support zicsr or
zifencei via '-march=' (i.e., <= 16.x) is used in combination with a
version of GNU as that defaults to a newer ISA base spec, which requires
specifying zicsr and zifencei in the '-march=' value explicitly (i.e, >=
2.38):
../arch/riscv/kernel/kexec_relocate.S: Assembler messages:
../arch/riscv/kernel/kexec_relocate.S:147: Error: unrecognized opcode `fence.i', extension `zifencei' required
clang-12: error: assembler command failed with exit code 1 (use -v to see invocation)
This is the same issue addressed by commit 6df2a016c0c8 ("riscv: fix
build with binutils 2.38") (see [2] for additional information) but
older versions of clang miss out on it because the cc-option check
fails:
clang-12: error: invalid arch name 'rv64imac_zicsr_zifencei', unsupported standard user-level extension 'zicsr'
clang-12: error: invalid arch name 'rv64imac_zicsr_zifencei', unsupported standard user-level extension 'zicsr'
To resolve the first issue, only attempt to add zicsr and zifencei to
the march string when using the GNU assembler 2.38 or newer, which is
when the default ISA spec was updated, requiring these extensions to be
specified explicitly. LLVM implements an older version of the base
specification for all currently released versions, so these instructions
are available as part of the 'i' extension. If LLVM's implementation is
updated in the future, a CONFIG_AS_IS_LLVM condition can be added to
CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI.
To resolve the second issue, use version 2.2 of the base ISA spec when
using an older version of clang that does not support zicsr or zifencei
via '-march=', as that is the spec version most compatible with the one
clang/LLVM implements and avoids the need to specify zicsr and zifencei
explicitly due to still being a part of 'i'.
[1]: https://github.com/llvm/llvm-project/commit/22e199e6afb1263c943c0c0d4498694e15bf8a16
[2]: https://lore.kernel.org/ZAxT7T9Xy1Fo3d5W@aurel32.net/
Cc: stable@vger.kernel.org
Link: https://github.com/ClangBuiltLinux/linux/issues/1808
Co-developed-by: Conor Dooley <conor.dooley@microchip.com>
Signed-off-by: Conor Dooley <conor.dooley@microchip.com>
Signed-off-by: Nathan Chancellor <nathan@kernel.org>
Acked-by: Conor Dooley <conor.dooley@microchip.com>
Link: https://lore.kernel.org/r/20230313-riscv-zicsr-zifencei-fiasco-v1-1-dd1b7840a551@kernel.org
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-03-14 07:00:23 +08:00
|
|
|
ifdef CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC
|
|
|
|
KBUILD_CFLAGS += -Wa,-misa-spec=2.2
|
|
|
|
KBUILD_AFLAGS += -Wa,-misa-spec=2.2
|
|
|
|
else
|
|
|
|
riscv-march-$(CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI) := $(riscv-march-y)_zicsr_zifencei
|
|
|
|
endif
|
2022-01-27 01:14:42 +08:00
|
|
|
|
2022-06-21 04:15:25 +08:00
|
|
|
# Check if the toolchain supports Zihintpause extension
|
2022-10-07 01:35:21 +08:00
|
|
|
riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE) := $(riscv-march-y)_zihintpause
|
2022-06-21 04:15:25 +08:00
|
|
|
|
2023-06-05 19:07:21 +08:00
|
|
|
# Remove F,D,V from isa string for all. Keep extensions between "fd" and "v" by
|
|
|
|
# matching non-v and non-multi-letter extensions out with the filter ([^v_]*)
|
|
|
|
KBUILD_CFLAGS += -march=$(shell echo $(riscv-march-y) | sed -E 's/(rv32ima|rv64ima)fd([^v_]*)v?/\1\2/')
|
|
|
|
|
2018-10-09 10:18:32 +08:00
|
|
|
KBUILD_AFLAGS += -march=$(riscv-march-y)
|
2017-07-11 09:08:08 +08:00
|
|
|
|
|
|
|
KBUILD_CFLAGS += -mno-save-restore
|
|
|
|
KBUILD_CFLAGS += -DCONFIG_PAGE_OFFSET=$(CONFIG_PAGE_OFFSET)
|
|
|
|
|
|
|
|
ifeq ($(CONFIG_CMODEL_MEDLOW),y)
|
|
|
|
KBUILD_CFLAGS += -mcmodel=medlow
|
|
|
|
endif
|
|
|
|
ifeq ($(CONFIG_CMODEL_MEDANY),y)
|
|
|
|
KBUILD_CFLAGS += -mcmodel=medany
|
|
|
|
endif
|
2018-03-15 16:50:41 +08:00
|
|
|
|
2023-02-01 17:29:45 +08:00
|
|
|
# Avoid generating .eh_frame sections.
|
|
|
|
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
|
|
|
|
|
2023-02-24 06:46:05 +08:00
|
|
|
# The RISC-V attributes frequently cause compatibility issues and provide no
|
|
|
|
# information, so just turn them off.
|
|
|
|
KBUILD_CFLAGS += $(call cc-option,-mno-riscv-attribute)
|
|
|
|
KBUILD_AFLAGS += $(call cc-option,-mno-riscv-attribute)
|
|
|
|
KBUILD_CFLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr)
|
|
|
|
KBUILD_AFLAGS += $(call as-option,-Wa$(comma)-mno-arch-attr)
|
|
|
|
|
2018-03-15 16:50:41 +08:00
|
|
|
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
|
2022-05-29 23:22:00 +08:00
|
|
|
KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
|
2017-07-11 09:08:08 +08:00
|
|
|
|
|
|
|
# GCC versions that support the "-mstrict-align" option default to allowing
|
|
|
|
# unaligned accesses. While unaligned accesses are explicitly allowed in the
|
|
|
|
# RISC-V ISA, they're emulated by machine mode traps on all extant
|
|
|
|
# architectures. It's faster to have GCC emit only aligned accesses.
|
riscv: introduce RISCV_EFFICIENT_UNALIGNED_ACCESS
Some riscv implementations such as T-HEAD's C906, C908, C910 and C920
support efficient unaligned access, for performance reason we want
to enable HAVE_EFFICIENT_UNALIGNED_ACCESS on these platforms. To
avoid performance regressions on other non efficient unaligned access
platforms, HAVE_EFFICIENT_UNALIGNED_ACCESS can't be globally selected.
To solve this problem, runtime code patching based on the detected
speed is a good solution. But that's not easy, it involves lots of
work to modify vairous subsystems such as net, mm, lib and so on.
This can be done step by step.
So let's take an easier solution: add support to efficient unaligned
access and hide the support under NONPORTABLE.
Now let's introduce RISCV_EFFICIENT_UNALIGNED_ACCESS which depends on
NONPORTABLE, if users know during config time that the kernel will be
only run on those efficient unaligned access hw platforms, they can
enable it. Obviously, generic unified kernel Image shouldn't enable it.
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
Reviewed-by: Charlie Jenkins <charlie@rivosinc.com>
Reviewed-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20231225044207.3821-2-jszhang@kernel.org
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-12-25 12:42:06 +08:00
|
|
|
ifneq ($(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS),y)
|
2017-07-11 09:08:08 +08:00
|
|
|
KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
|
riscv: introduce RISCV_EFFICIENT_UNALIGNED_ACCESS
Some riscv implementations such as T-HEAD's C906, C908, C910 and C920
support efficient unaligned access, for performance reason we want
to enable HAVE_EFFICIENT_UNALIGNED_ACCESS on these platforms. To
avoid performance regressions on other non efficient unaligned access
platforms, HAVE_EFFICIENT_UNALIGNED_ACCESS can't be globally selected.
To solve this problem, runtime code patching based on the detected
speed is a good solution. But that's not easy, it involves lots of
work to modify vairous subsystems such as net, mm, lib and so on.
This can be done step by step.
So let's take an easier solution: add support to efficient unaligned
access and hide the support under NONPORTABLE.
Now let's introduce RISCV_EFFICIENT_UNALIGNED_ACCESS which depends on
NONPORTABLE, if users know during config time that the kernel will be
only run on those efficient unaligned access hw platforms, they can
enable it. Obviously, generic unified kernel Image shouldn't enable it.
Signed-off-by: Jisheng Zhang <jszhang@kernel.org>
Reviewed-by: Charlie Jenkins <charlie@rivosinc.com>
Reviewed-by: Eric Biggers <ebiggers@google.com>
Link: https://lore.kernel.org/r/20231225044207.3821-2-jszhang@kernel.org
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-12-25 12:42:06 +08:00
|
|
|
endif
|
2017-07-11 09:08:08 +08:00
|
|
|
|
riscv: Enable per-task stack canaries
This enables the use of per-task stack canary values if GCC has
support for emitting the stack canary reference relative to the
value of tp, which holds the task struct pointer in the riscv
kernel.
After compare arm64 and x86 implementations, seems arm64's is more
flexible and readable. The key point is how gcc get the offset of
stack_canary from gs/el0_sp.
x86: Use a fix offset from gs, not flexible.
struct fixed_percpu_data {
/*
* GCC hardcodes the stack canary as %gs:40. Since the
* irq_stack is the object at %gs:0, we reserve the bottom
* 48 bytes of the irq stack for the canary.
*/
char gs_base[40]; // :(
unsigned long stack_canary;
};
arm64: Use -mstack-protector-guard-offset & guard-reg
gcc options:
-mstack-protector-guard=sysreg
-mstack-protector-guard-reg=sp_el0
-mstack-protector-guard-offset=xxx
riscv: Use -mstack-protector-guard-offset & guard-reg
gcc options:
-mstack-protector-guard=tls
-mstack-protector-guard-reg=tp
-mstack-protector-guard-offset=xxx
GCC's implementation has been merged:
commit c931e8d5a96463427040b0d11f9c4352ac22b2b0
Author: Cooper Qu <cooper.qu@linux.alibaba.com>
Date: Mon Jul 13 16:15:08 2020 +0800
RISC-V: Add support for TLS stack protector canary access
In the end, these codes are inserted by gcc before return:
* 0xffffffe00020b396 <+120>: ld a5,1008(tp) # 0x3f0
* 0xffffffe00020b39a <+124>: xor a5,a5,a4
* 0xffffffe00020b39c <+126>: mv a0,s5
* 0xffffffe00020b39e <+128>: bnez a5,0xffffffe00020b61c <_do_fork+766>
0xffffffe00020b3a2 <+132>: ld ra,136(sp)
0xffffffe00020b3a4 <+134>: ld s0,128(sp)
0xffffffe00020b3a6 <+136>: ld s1,120(sp)
0xffffffe00020b3a8 <+138>: ld s2,112(sp)
0xffffffe00020b3aa <+140>: ld s3,104(sp)
0xffffffe00020b3ac <+142>: ld s4,96(sp)
0xffffffe00020b3ae <+144>: ld s5,88(sp)
0xffffffe00020b3b0 <+146>: ld s6,80(sp)
0xffffffe00020b3b2 <+148>: ld s7,72(sp)
0xffffffe00020b3b4 <+150>: addi sp,sp,144
0xffffffe00020b3b6 <+152>: ret
...
* 0xffffffe00020b61c <+766>: auipc ra,0x7f8
* 0xffffffe00020b620 <+770>: jalr -1764(ra) # 0xffffffe000a02f38 <__stack_chk_fail>
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Cooper Qu <cooper.qu@linux.alibaba.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:29:18 +08:00
|
|
|
ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
|
|
|
|
prepare: stack_protector_prepare
|
|
|
|
stack_protector_prepare: prepare0
|
|
|
|
$(eval KBUILD_CFLAGS += -mstack-protector-guard=tls \
|
|
|
|
-mstack-protector-guard-reg=tp \
|
|
|
|
-mstack-protector-guard-offset=$(shell \
|
|
|
|
awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}' \
|
|
|
|
include/generated/asm-offsets.h))
|
|
|
|
endif
|
|
|
|
|
2018-05-31 23:42:01 +08:00
|
|
|
# arch specific predefines for sparse
|
|
|
|
CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS)
|
|
|
|
|
2018-11-12 13:55:15 +08:00
|
|
|
# Default target when executing plain make
|
|
|
|
boot := arch/riscv/boot
|
2021-04-13 14:35:14 +08:00
|
|
|
ifeq ($(CONFIG_XIP_KERNEL),y)
|
|
|
|
KBUILD_IMAGE := $(boot)/xipImage
|
|
|
|
else
|
2018-11-12 13:55:15 +08:00
|
|
|
KBUILD_IMAGE := $(boot)/Image.gz
|
2021-04-13 14:35:14 +08:00
|
|
|
endif
|
2018-11-12 13:55:15 +08:00
|
|
|
|
2017-07-11 09:08:08 +08:00
|
|
|
libs-y += arch/riscv/lib/
|
2020-09-18 06:37:14 +08:00
|
|
|
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
|
2017-07-11 09:08:08 +08:00
|
|
|
|
2021-11-02 23:51:43 +08:00
|
|
|
ifeq ($(KBUILD_EXTMOD),)
|
2021-08-26 13:52:45 +08:00
|
|
|
ifeq ($(CONFIG_MMU),y)
|
2021-08-05 01:32:14 +08:00
|
|
|
prepare: vdso_prepare
|
|
|
|
vdso_prepare: prepare0
|
|
|
|
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso include/generated/vdso-offsets.h
|
2022-04-05 15:13:10 +08:00
|
|
|
$(if $(CONFIG_COMPAT),$(Q)$(MAKE) \
|
|
|
|
$(build)=arch/riscv/kernel/compat_vdso include/generated/compat_vdso-offsets.h)
|
|
|
|
|
2021-08-26 13:52:45 +08:00
|
|
|
endif
|
2021-11-02 23:51:43 +08:00
|
|
|
endif
|
2021-08-05 01:32:14 +08:00
|
|
|
|
kbuild: unify vdso_install rules
Currently, there is no standard implementation for vdso_install,
leading to various issues:
1. Code duplication
Many architectures duplicate similar code just for copying files
to the install destination.
Some architectures (arm, sparc, x86) create build-id symlinks,
introducing more code duplication.
2. Unintended updates of in-tree build artifacts
The vdso_install rule depends on the vdso files to install.
It may update in-tree build artifacts. This can be problematic,
as explained in commit 19514fc665ff ("arm, kbuild: make
"make install" not depend on vmlinux").
3. Broken code in some architectures
Makefile code is often copied from one architecture to another
without proper adaptation.
'make vdso_install' for parisc does not work.
'make vdso_install' for s390 installs vdso64, but not vdso32.
To address these problems, this commit introduces a generic vdso_install
rule.
Architectures that support vdso_install need to define vdso-install-y
in arch/*/Makefile. vdso-install-y lists the files to install.
For example, arch/x86/Makefile looks like this:
vdso-install-$(CONFIG_X86_64) += arch/x86/entry/vdso/vdso64.so.dbg
vdso-install-$(CONFIG_X86_X32_ABI) += arch/x86/entry/vdso/vdsox32.so.dbg
vdso-install-$(CONFIG_X86_32) += arch/x86/entry/vdso/vdso32.so.dbg
vdso-install-$(CONFIG_IA32_EMULATION) += arch/x86/entry/vdso/vdso32.so.dbg
These files will be installed to $(MODLIB)/vdso/ with the .dbg suffix,
if exists, stripped away.
vdso-install-y can optionally take the second field after the colon
separator. This is needed because some architectures install a vdso
file as a different base name.
The following is a snippet from arch/arm64/Makefile.
vdso-install-$(CONFIG_COMPAT_VDSO) += arch/arm64/kernel/vdso32/vdso.so.dbg:vdso32.so
This will rename vdso.so.dbg to vdso32.so during installation. If such
architectures change their implementation so that the base names match,
this workaround will go away.
Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
Acked-by: Sven Schnelle <svens@linux.ibm.com> # s390
Reviewed-by: Nicolas Schier <nicolas@fjasle.eu>
Reviewed-by: Guo Ren <guoren@kernel.org>
Acked-by: Helge Deller <deller@gmx.de> # parisc
Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Acked-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
2023-10-14 18:54:35 +08:00
|
|
|
vdso-install-y += arch/riscv/kernel/vdso/vdso.so.dbg
|
|
|
|
vdso-install-$(CONFIG_COMPAT) += arch/riscv/kernel/compat_vdso/compat_vdso.so.dbg:../compat_vdso/compat_vdso.so
|
|
|
|
|
2021-04-13 14:35:14 +08:00
|
|
|
ifneq ($(CONFIG_XIP_KERNEL),y)
|
2022-11-21 05:34:44 +08:00
|
|
|
ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_ARCH_CANAAN),yy)
|
2020-03-16 08:47:43 +08:00
|
|
|
KBUILD_IMAGE := $(boot)/loader.bin
|
2019-10-28 20:10:42 +08:00
|
|
|
else
|
2022-05-02 23:40:34 +08:00
|
|
|
ifeq ($(CONFIG_EFI_ZBOOT),)
|
2019-10-28 20:10:42 +08:00
|
|
|
KBUILD_IMAGE := $(boot)/Image.gz
|
2022-05-02 23:40:34 +08:00
|
|
|
else
|
|
|
|
KBUILD_IMAGE := $(boot)/vmlinuz.efi
|
|
|
|
endif
|
2019-10-28 20:10:42 +08:00
|
|
|
endif
|
2021-04-13 14:35:14 +08:00
|
|
|
endif
|
2022-05-02 23:40:34 +08:00
|
|
|
BOOT_TARGETS := Image Image.gz loader loader.bin xipImage vmlinuz.efi
|
2018-11-12 13:55:15 +08:00
|
|
|
|
2019-10-28 20:10:42 +08:00
|
|
|
all: $(notdir $(KBUILD_IMAGE))
|
2018-11-12 13:55:15 +08:00
|
|
|
|
2023-11-19 18:00:24 +08:00
|
|
|
loader.bin: loader
|
|
|
|
Image.gz loader vmlinuz.efi: Image
|
2019-10-28 20:10:42 +08:00
|
|
|
$(BOOT_TARGETS): vmlinux
|
2018-11-12 13:55:15 +08:00
|
|
|
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
2019-10-28 20:10:42 +08:00
|
|
|
@$(kecho) ' Kernel: $(boot)/$@ is ready'
|
2018-11-12 13:55:15 +08:00
|
|
|
|
2020-11-04 14:14:59 +08:00
|
|
|
Image.%: Image
|
|
|
|
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
|
|
|
|
2022-05-03 10:47:16 +08:00
|
|
|
install: KBUILD_IMAGE := $(boot)/Image
|
|
|
|
zinstall: KBUILD_IMAGE := $(boot)/Image.gz
|
2021-07-29 22:21:47 +08:00
|
|
|
install zinstall:
|
2022-05-03 10:47:16 +08:00
|
|
|
$(call cmd,install)
|
2020-11-04 14:15:00 +08:00
|
|
|
|
2021-09-12 11:45:38 +08:00
|
|
|
PHONY += rv32_randconfig
|
|
|
|
rv32_randconfig:
|
|
|
|
$(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/riscv/configs/32-bit.config \
|
|
|
|
-f $(srctree)/Makefile randconfig
|
|
|
|
|
|
|
|
PHONY += rv64_randconfig
|
|
|
|
rv64_randconfig:
|
|
|
|
$(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/riscv/configs/64-bit.config \
|
|
|
|
-f $(srctree)/Makefile randconfig
|
2022-04-05 15:13:02 +08:00
|
|
|
|
|
|
|
PHONY += rv32_defconfig
|
|
|
|
rv32_defconfig:
|
|
|
|
$(Q)$(MAKE) -f $(srctree)/Makefile defconfig 32-bit.config
|
2023-03-01 08:26:57 +08:00
|
|
|
|
|
|
|
PHONY += rv32_nommu_virt_defconfig
|
|
|
|
rv32_nommu_virt_defconfig:
|
|
|
|
$(Q)$(MAKE) -f $(srctree)/Makefile nommu_virt_defconfig 32-bit.config
|