2017-07-11 09:08:08 +08:00
|
|
|
# This file is included by the global makefile so that you can add your own
|
2021-10-13 14:36:22 +08:00
|
|
|
# architecture-specific flags and dependencies.
|
2017-07-11 09:08:08 +08:00
|
|
|
#
|
|
|
|
# This file is subject to the terms and conditions of the GNU General Public
|
|
|
|
# License. See the file "COPYING" in the main directory of this archive
|
|
|
|
# for more details.
|
|
|
|
#
|
|
|
|
|
|
|
|
OBJCOPYFLAGS := -O binary
|
|
|
|
LDFLAGS_vmlinux :=
|
2018-02-13 13:13:16 +08:00
|
|
|
ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
|
|
|
|
LDFLAGS_vmlinux := --no-relax
|
riscv: Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT
This patch changes the current detour mechanism of dynamic ftrace
which has been discussed during LPC 2020 RISCV-MC [1].
Before the patch, we used mcount for detour:
<funca>:
addi sp,sp,-16
sd ra,8(sp)
sd s0,0(sp)
addi s0,sp,16
mv a5,ra
mv a0,a5
auipc ra,0x0 -> nop
jalr -296(ra) <_mcount@plt> ->nop
...
After the patch, we use nop call site area for detour:
<funca>:
nop -> REG_S ra, -SZREG(sp)
nop -> auipc ra, 0x?
nop -> jalr ?(ra)
nop -> REG_L ra, -SZREG(sp)
...
The mcount mechanism is mixed with gcc function prologue which is
not very clear. The patchable function entry just put 16 bytes nop
before the front of the function prologue which could be filled
with a separated detour mechanism.
[1] https://www.linuxplumbersconf.org/event/7/contributions/807/
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:01:41 +08:00
|
|
|
KBUILD_CPPFLAGS += -DCC_USING_PATCHABLE_FUNCTION_ENTRY
|
riscv: ftrace: Remove wasted nops for !RISCV_ISA_C
When CONFIG_RISCV_ISA_C=n, -fpatchable-function-entry=8 would generate
more nops than we expect. Because it treat nop opcode as 0x00000013
instead of 0x0001.
Dump of assembler code for function dw_pcie_free_msi:
0xffffffff806fce94 <+0>: sd ra,-8(sp)
0xffffffff806fce98 <+4>: auipc ra,0xff90f
0xffffffff806fce9c <+8>: jalr -684(ra) # 0xffffffff8000bbec
<ftrace_caller>
0xffffffff806fcea0 <+12>: ld ra,-8(sp)
0xffffffff806fcea4 <+16>: nop /* wasted */
0xffffffff806fcea8 <+20>: nop /* wasted */
0xffffffff806fceac <+24>: nop /* wasted */
0xffffffff806fceb0 <+28>: nop /* wasted */
0xffffffff806fceb4 <+0>: addi sp,sp,-48
0xffffffff806fceb8 <+4>: sd s0,32(sp)
0xffffffff806fcebc <+8>: sd s1,24(sp)
0xffffffff806fcec0 <+12>: sd s2,16(sp)
0xffffffff806fcec4 <+16>: sd s3,8(sp)
0xffffffff806fcec8 <+20>: sd ra,40(sp)
0xffffffff806fcecc <+24>: addi s0,sp,48
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Link: https://lore.kernel.org/r/20230112090603.1295340-3-guoren@kernel.org
Cc: stable@vger.kernel.org
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-01-12 17:05:58 +08:00
|
|
|
ifeq ($(CONFIG_RISCV_ISA_C),y)
|
|
|
|
CC_FLAGS_FTRACE := -fpatchable-function-entry=4
|
riscv: ftrace: Reduce the detour code size to half
Use a temporary register to reduce the size of detour code from 16 bytes to
8 bytes. The previous implementation is from 'commit afc76b8b8011 ("riscv:
Using PATCHABLE_FUNCTION_ENTRY instead of MCOUNT")'.
Before the patch:
<func_prolog>:
0: REG_S ra, -SZREG(sp)
4: auipc ra, ?
8: jalr ?(ra)
12: REG_L ra, -SZREG(sp)
(func_boddy)
After the patch:
<func_prolog>:
0: auipc t0, ?
4: jalr t0, ?(t0)
(func_boddy)
This patch not just reduces the size of detour code, but also fixes an
important issue:
An Ftrace callback registered with FTRACE_OPS_FL_IPMODIFY flag can
actually change the instruction pointer, e.g. to "replace" the given
kernel function with a new one, which is needed for livepatching, etc.
In this case, the trampoline (ftrace_regs_caller) would not return to
<func_prolog+12> but would rather jump to the new function. So, "REG_L
ra, -SZREG(sp)" would not run and the original return address would not
be restored. The kernel is likely to hang or crash as a result.
This can be easily demonstrated if one tries to "replace", say,
cmdline_proc_show() with a new function with the same signature using
instruction_pointer_set(&fregs->regs, new_func_addr) in the Ftrace
callback.
Link: https://lore.kernel.org/linux-riscv/20221122075440.1165172-1-suagrfillet@gmail.com/
Link: https://lore.kernel.org/linux-riscv/d7d5730b-ebef-68e5-5046-e763e1ee6164@yadro.com/
Co-developed-by: Song Shuai <suagrfillet@gmail.com>
Signed-off-by: Song Shuai <suagrfillet@gmail.com>
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Cc: Evgenii Shatokhin <e.shatokhin@yadro.com>
Reviewed-by: Evgenii Shatokhin <e.shatokhin@yadro.com>
Link: https://lore.kernel.org/r/20230112090603.1295340-4-guoren@kernel.org
Cc: stable@vger.kernel.org
Fixes: 10626c32e382 ("riscv/ftrace: Add basic support")
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-01-12 17:05:59 +08:00
|
|
|
else
|
|
|
|
CC_FLAGS_FTRACE := -fpatchable-function-entry=2
|
riscv: ftrace: Remove wasted nops for !RISCV_ISA_C
When CONFIG_RISCV_ISA_C=n, -fpatchable-function-entry=8 would generate
more nops than we expect. Because it treat nop opcode as 0x00000013
instead of 0x0001.
Dump of assembler code for function dw_pcie_free_msi:
0xffffffff806fce94 <+0>: sd ra,-8(sp)
0xffffffff806fce98 <+4>: auipc ra,0xff90f
0xffffffff806fce9c <+8>: jalr -684(ra) # 0xffffffff8000bbec
<ftrace_caller>
0xffffffff806fcea0 <+12>: ld ra,-8(sp)
0xffffffff806fcea4 <+16>: nop /* wasted */
0xffffffff806fcea8 <+20>: nop /* wasted */
0xffffffff806fceac <+24>: nop /* wasted */
0xffffffff806fceb0 <+28>: nop /* wasted */
0xffffffff806fceb4 <+0>: addi sp,sp,-48
0xffffffff806fceb8 <+4>: sd s0,32(sp)
0xffffffff806fcebc <+8>: sd s1,24(sp)
0xffffffff806fcec0 <+12>: sd s2,16(sp)
0xffffffff806fcec4 <+16>: sd s3,8(sp)
0xffffffff806fcec8 <+20>: sd ra,40(sp)
0xffffffff806fcecc <+24>: addi s0,sp,48
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Guo Ren <guoren@kernel.org>
Link: https://lore.kernel.org/r/20230112090603.1295340-3-guoren@kernel.org
Cc: stable@vger.kernel.org
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
2023-01-12 17:05:58 +08:00
|
|
|
endif
|
2018-02-13 13:13:16 +08:00
|
|
|
endif
|
2020-02-21 10:47:55 +08:00
|
|
|
|
2021-06-07 06:09:40 +08:00
|
|
|
ifeq ($(CONFIG_CMODEL_MEDLOW),y)
|
2020-02-21 10:47:55 +08:00
|
|
|
KBUILD_CFLAGS_MODULE += -mcmodel=medany
|
|
|
|
endif
|
2017-07-11 09:08:08 +08:00
|
|
|
|
|
|
|
export BITS
|
|
|
|
ifeq ($(CONFIG_ARCH_RV64I),y)
|
|
|
|
BITS := 64
|
|
|
|
UTS_MACHINE := riscv64
|
|
|
|
|
|
|
|
KBUILD_CFLAGS += -mabi=lp64
|
|
|
|
KBUILD_AFLAGS += -mabi=lp64
|
2018-07-29 09:14:47 +08:00
|
|
|
|
2018-08-24 07:20:39 +08:00
|
|
|
KBUILD_LDFLAGS += -melf64lriscv
|
2017-07-11 09:08:08 +08:00
|
|
|
else
|
|
|
|
BITS := 32
|
|
|
|
UTS_MACHINE := riscv32
|
|
|
|
|
|
|
|
KBUILD_CFLAGS += -mabi=ilp32
|
|
|
|
KBUILD_AFLAGS += -mabi=ilp32
|
2018-08-24 07:20:39 +08:00
|
|
|
KBUILD_LDFLAGS += -melf32lriscv
|
2017-07-11 09:08:08 +08:00
|
|
|
endif
|
|
|
|
|
2021-05-15 05:37:41 +08:00
|
|
|
ifeq ($(CONFIG_LD_IS_LLD),y)
|
kbuild: add test-{ge,gt,le,lt} macros
GNU Make 4.4 introduced $(intcmp ...), which is useful to compare two
integers without forking a new process.
Add test-{ge,gt,le,lt} macros, which work more efficiently with GNU
Make >= 4.4. For older Make versions, they fall back to the 'test'
shell command.
The first two parameters to $(intcmp ...) must not be empty. To avoid
the syntax error, I appended '0' to them. Fortunately, '00' is treated
as '0'. This is needed because CONFIG options may expand to an empty
string when the kernel configuration is not included.
Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
Acked-by: Palmer Dabbelt <palmer@rivosinc.com> # RISC-V
Reviewed-by: Nathan Chancellor <nathan@kernel.org>
Reviewed-by: Nicolas Schier <nicolas@fjasle.eu>
2022-12-11 10:46:47 +08:00
|
|
|
ifeq ($(call test-lt, $(CONFIG_LLD_VERSION), 150000),y)
|
2021-05-15 05:37:41 +08:00
|
|
|
KBUILD_CFLAGS += -mno-relax
|
|
|
|
KBUILD_AFLAGS += -mno-relax
|
2021-08-05 23:01:02 +08:00
|
|
|
ifndef CONFIG_AS_IS_LLVM
|
2021-05-15 05:37:41 +08:00
|
|
|
KBUILD_CFLAGS += -Wa,-mno-relax
|
|
|
|
KBUILD_AFLAGS += -Wa,-mno-relax
|
|
|
|
endif
|
|
|
|
endif
|
2022-09-18 17:29:34 +08:00
|
|
|
endif
|
2021-05-15 05:37:41 +08:00
|
|
|
|
2018-10-09 10:18:32 +08:00
|
|
|
# ISA string setting
|
2019-04-15 17:14:35 +08:00
|
|
|
riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
|
|
|
|
riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
|
2018-10-09 10:18:33 +08:00
|
|
|
riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
|
2018-10-09 10:18:32 +08:00
|
|
|
riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
|
2022-01-27 01:14:42 +08:00
|
|
|
|
|
|
|
# Newer binutils versions default to ISA spec version 20191213 which moves some
|
|
|
|
# instructions from the I extension to the Zicsr and Zifencei extensions.
|
|
|
|
toolchain-need-zicsr-zifencei := $(call cc-option-yn, -march=$(riscv-march-y)_zicsr_zifencei)
|
|
|
|
riscv-march-$(toolchain-need-zicsr-zifencei) := $(riscv-march-y)_zicsr_zifencei
|
|
|
|
|
2022-06-21 04:15:25 +08:00
|
|
|
# Check if the toolchain supports Zihintpause extension
|
2022-10-07 01:35:21 +08:00
|
|
|
riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE) := $(riscv-march-y)_zihintpause
|
2022-06-21 04:15:25 +08:00
|
|
|
|
2018-10-09 10:18:32 +08:00
|
|
|
KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
|
|
|
|
KBUILD_AFLAGS += -march=$(riscv-march-y)
|
2017-07-11 09:08:08 +08:00
|
|
|
|
|
|
|
KBUILD_CFLAGS += -mno-save-restore
|
|
|
|
KBUILD_CFLAGS += -DCONFIG_PAGE_OFFSET=$(CONFIG_PAGE_OFFSET)
|
|
|
|
|
|
|
|
ifeq ($(CONFIG_CMODEL_MEDLOW),y)
|
|
|
|
KBUILD_CFLAGS += -mcmodel=medlow
|
|
|
|
endif
|
|
|
|
ifeq ($(CONFIG_CMODEL_MEDANY),y)
|
|
|
|
KBUILD_CFLAGS += -mcmodel=medany
|
|
|
|
endif
|
2019-08-29 14:57:00 +08:00
|
|
|
ifeq ($(CONFIG_PERF_EVENTS),y)
|
|
|
|
KBUILD_CFLAGS += -fno-omit-frame-pointer
|
|
|
|
endif
|
2018-03-15 16:50:41 +08:00
|
|
|
|
2023-02-01 17:29:45 +08:00
|
|
|
# Avoid generating .eh_frame sections.
|
|
|
|
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
|
|
|
|
|
2018-03-15 16:50:41 +08:00
|
|
|
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
|
2022-05-29 23:22:00 +08:00
|
|
|
KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
|
2017-07-11 09:08:08 +08:00
|
|
|
|
|
|
|
# GCC versions that support the "-mstrict-align" option default to allowing
|
|
|
|
# unaligned accesses. While unaligned accesses are explicitly allowed in the
|
|
|
|
# RISC-V ISA, they're emulated by machine mode traps on all extant
|
|
|
|
# architectures. It's faster to have GCC emit only aligned accesses.
|
|
|
|
KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
|
|
|
|
|
riscv: Enable per-task stack canaries
This enables the use of per-task stack canary values if GCC has
support for emitting the stack canary reference relative to the
value of tp, which holds the task struct pointer in the riscv
kernel.
After compare arm64 and x86 implementations, seems arm64's is more
flexible and readable. The key point is how gcc get the offset of
stack_canary from gs/el0_sp.
x86: Use a fix offset from gs, not flexible.
struct fixed_percpu_data {
/*
* GCC hardcodes the stack canary as %gs:40. Since the
* irq_stack is the object at %gs:0, we reserve the bottom
* 48 bytes of the irq stack for the canary.
*/
char gs_base[40]; // :(
unsigned long stack_canary;
};
arm64: Use -mstack-protector-guard-offset & guard-reg
gcc options:
-mstack-protector-guard=sysreg
-mstack-protector-guard-reg=sp_el0
-mstack-protector-guard-offset=xxx
riscv: Use -mstack-protector-guard-offset & guard-reg
gcc options:
-mstack-protector-guard=tls
-mstack-protector-guard-reg=tp
-mstack-protector-guard-offset=xxx
GCC's implementation has been merged:
commit c931e8d5a96463427040b0d11f9c4352ac22b2b0
Author: Cooper Qu <cooper.qu@linux.alibaba.com>
Date: Mon Jul 13 16:15:08 2020 +0800
RISC-V: Add support for TLS stack protector canary access
In the end, these codes are inserted by gcc before return:
* 0xffffffe00020b396 <+120>: ld a5,1008(tp) # 0x3f0
* 0xffffffe00020b39a <+124>: xor a5,a5,a4
* 0xffffffe00020b39c <+126>: mv a0,s5
* 0xffffffe00020b39e <+128>: bnez a5,0xffffffe00020b61c <_do_fork+766>
0xffffffe00020b3a2 <+132>: ld ra,136(sp)
0xffffffe00020b3a4 <+134>: ld s0,128(sp)
0xffffffe00020b3a6 <+136>: ld s1,120(sp)
0xffffffe00020b3a8 <+138>: ld s2,112(sp)
0xffffffe00020b3aa <+140>: ld s3,104(sp)
0xffffffe00020b3ac <+142>: ld s4,96(sp)
0xffffffe00020b3ae <+144>: ld s5,88(sp)
0xffffffe00020b3b0 <+146>: ld s6,80(sp)
0xffffffe00020b3b2 <+148>: ld s7,72(sp)
0xffffffe00020b3b4 <+150>: addi sp,sp,144
0xffffffe00020b3b6 <+152>: ret
...
* 0xffffffe00020b61c <+766>: auipc ra,0x7f8
* 0xffffffe00020b620 <+770>: jalr -1764(ra) # 0xffffffe000a02f38 <__stack_chk_fail>
Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
Signed-off-by: Cooper Qu <cooper.qu@linux.alibaba.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
2020-12-18 00:29:18 +08:00
|
|
|
ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
|
|
|
|
prepare: stack_protector_prepare
|
|
|
|
stack_protector_prepare: prepare0
|
|
|
|
$(eval KBUILD_CFLAGS += -mstack-protector-guard=tls \
|
|
|
|
-mstack-protector-guard-reg=tp \
|
|
|
|
-mstack-protector-guard-offset=$(shell \
|
|
|
|
awk '{if ($$2 == "TSK_STACK_CANARY") print $$3;}' \
|
|
|
|
include/generated/asm-offsets.h))
|
|
|
|
endif
|
|
|
|
|
2018-05-31 23:42:01 +08:00
|
|
|
# arch specific predefines for sparse
|
|
|
|
CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS)
|
|
|
|
|
2018-11-12 13:55:15 +08:00
|
|
|
# Default target when executing plain make
|
|
|
|
boot := arch/riscv/boot
|
2021-04-13 14:35:14 +08:00
|
|
|
ifeq ($(CONFIG_XIP_KERNEL),y)
|
|
|
|
KBUILD_IMAGE := $(boot)/xipImage
|
|
|
|
else
|
2018-11-12 13:55:15 +08:00
|
|
|
KBUILD_IMAGE := $(boot)/Image.gz
|
2021-04-13 14:35:14 +08:00
|
|
|
endif
|
2018-11-12 13:55:15 +08:00
|
|
|
|
2017-07-11 09:08:08 +08:00
|
|
|
libs-y += arch/riscv/lib/
|
2020-09-18 06:37:14 +08:00
|
|
|
libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
|
2017-07-11 09:08:08 +08:00
|
|
|
|
2018-11-05 22:35:37 +08:00
|
|
|
PHONY += vdso_install
|
|
|
|
vdso_install:
|
|
|
|
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
|
2022-04-05 15:13:10 +08:00
|
|
|
$(if $(CONFIG_COMPAT),$(Q)$(MAKE) \
|
2022-06-25 23:42:07 +08:00
|
|
|
$(build)=arch/riscv/kernel/compat_vdso compat_$@)
|
2018-11-05 22:35:37 +08:00
|
|
|
|
2021-11-02 23:51:43 +08:00
|
|
|
ifeq ($(KBUILD_EXTMOD),)
|
2021-08-26 13:52:45 +08:00
|
|
|
ifeq ($(CONFIG_MMU),y)
|
2021-08-05 01:32:14 +08:00
|
|
|
prepare: vdso_prepare
|
|
|
|
vdso_prepare: prepare0
|
|
|
|
$(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso include/generated/vdso-offsets.h
|
2022-04-05 15:13:10 +08:00
|
|
|
$(if $(CONFIG_COMPAT),$(Q)$(MAKE) \
|
|
|
|
$(build)=arch/riscv/kernel/compat_vdso include/generated/compat_vdso-offsets.h)
|
|
|
|
|
2021-08-26 13:52:45 +08:00
|
|
|
endif
|
2021-11-02 23:51:43 +08:00
|
|
|
endif
|
2021-08-05 01:32:14 +08:00
|
|
|
|
2021-04-13 14:35:14 +08:00
|
|
|
ifneq ($(CONFIG_XIP_KERNEL),y)
|
2022-11-21 05:34:44 +08:00
|
|
|
ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_ARCH_CANAAN),yy)
|
2020-03-16 08:47:43 +08:00
|
|
|
KBUILD_IMAGE := $(boot)/loader.bin
|
2019-10-28 20:10:42 +08:00
|
|
|
else
|
2022-05-02 23:40:34 +08:00
|
|
|
ifeq ($(CONFIG_EFI_ZBOOT),)
|
2019-10-28 20:10:42 +08:00
|
|
|
KBUILD_IMAGE := $(boot)/Image.gz
|
2022-05-02 23:40:34 +08:00
|
|
|
else
|
|
|
|
KBUILD_IMAGE := $(boot)/vmlinuz.efi
|
|
|
|
endif
|
2019-10-28 20:10:42 +08:00
|
|
|
endif
|
2021-04-13 14:35:14 +08:00
|
|
|
endif
|
2022-05-02 23:40:34 +08:00
|
|
|
BOOT_TARGETS := Image Image.gz loader loader.bin xipImage vmlinuz.efi
|
2018-11-12 13:55:15 +08:00
|
|
|
|
2019-10-28 20:10:42 +08:00
|
|
|
all: $(notdir $(KBUILD_IMAGE))
|
2018-11-12 13:55:15 +08:00
|
|
|
|
2019-10-28 20:10:42 +08:00
|
|
|
$(BOOT_TARGETS): vmlinux
|
2018-11-12 13:55:15 +08:00
|
|
|
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
2019-10-28 20:10:42 +08:00
|
|
|
@$(kecho) ' Kernel: $(boot)/$@ is ready'
|
2018-11-12 13:55:15 +08:00
|
|
|
|
2020-11-04 14:14:59 +08:00
|
|
|
Image.%: Image
|
|
|
|
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
|
|
|
|
|
2022-05-03 10:47:16 +08:00
|
|
|
install: KBUILD_IMAGE := $(boot)/Image
|
|
|
|
zinstall: KBUILD_IMAGE := $(boot)/Image.gz
|
2021-07-29 22:21:47 +08:00
|
|
|
install zinstall:
|
2022-05-03 10:47:16 +08:00
|
|
|
$(call cmd,install)
|
2020-11-04 14:15:00 +08:00
|
|
|
|
2021-09-12 11:45:38 +08:00
|
|
|
PHONY += rv32_randconfig
|
|
|
|
rv32_randconfig:
|
|
|
|
$(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/riscv/configs/32-bit.config \
|
|
|
|
-f $(srctree)/Makefile randconfig
|
|
|
|
|
|
|
|
PHONY += rv64_randconfig
|
|
|
|
rv64_randconfig:
|
|
|
|
$(Q)$(MAKE) KCONFIG_ALLCONFIG=$(srctree)/arch/riscv/configs/64-bit.config \
|
|
|
|
-f $(srctree)/Makefile randconfig
|
2022-04-05 15:13:02 +08:00
|
|
|
|
|
|
|
PHONY += rv32_defconfig
|
|
|
|
rv32_defconfig:
|
|
|
|
$(Q)$(MAKE) -f $(srctree)/Makefile defconfig 32-bit.config
|