From 6da111574baffb3399a6bd03a98b269eac9713f2 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Fri, 26 Apr 2024 12:08:21 +0200 Subject: [PATCH 1/6] riscv: Provide a definition for 'pause' If we're going to provide the encoding for 'pause' in cpu_relax() anyway, then we can drop the toolchain checks and just always use it. The advantage of doing this is that other code that need pause don't need to also define it (yes, another use is coming). Add the definition to insn-def.h since it's an instruction definition and also because insn-def.h doesn't include much, so it's safe to include from asm/vdso/processor.h without concern for circular dependencies. Signed-off-by: Andrew Jones Link: https://lore.kernel.org/r/20240426100820.14762-9-ajones@ventanamicro.com Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 7 ------- arch/riscv/Makefile | 3 --- arch/riscv/include/asm/insn-def.h | 2 ++ arch/riscv/include/asm/vdso/processor.h | 8 ++------ 4 files changed, 4 insertions(+), 16 deletions(-) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index be09c8836d56..7427d8088337 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -639,13 +639,6 @@ config RISCV_ISA_ZICBOZ If you don't know what to do here, say Y. -config TOOLCHAIN_HAS_ZIHINTPAUSE - bool - default y - depends on !64BIT || $(cc-option,-mabi=lp64 -march=rv64ima_zihintpause) - depends on !32BIT || $(cc-option,-mabi=ilp32 -march=rv32ima_zihintpause) - depends on LLD_VERSION >= 150000 || LD_VERSION >= 23600 - config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI def_bool y # https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 252d63942f34..f1792ac03335 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -75,9 +75,6 @@ else riscv-march-$(CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI) := $(riscv-march-y)_zicsr_zifencei endif -# Check if the toolchain supports Zihintpause extension -riscv-march-$(CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE) := $(riscv-march-y)_zihintpause - # Remove F,D,V from isa string for all. Keep extensions between "fd" and "v" by # matching non-v and non-multi-letter extensions out with the filter ([^v_]*) KBUILD_CFLAGS += -march=$(shell echo $(riscv-march-y) | sed -E 's/(rv32ima|rv64ima)fd([^v_]*)v?/\1\2/') diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h index e27179b26086..64dffaa21bfa 100644 --- a/arch/riscv/include/asm/insn-def.h +++ b/arch/riscv/include/asm/insn-def.h @@ -196,4 +196,6 @@ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ RS1(base), SIMM12(4)) +#define RISCV_PAUSE ".4byte 0x100000f" + #endif /* __ASM_INSN_DEF_H */ diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h index 96b65a5396df..8f383f05a290 100644 --- a/arch/riscv/include/asm/vdso/processor.h +++ b/arch/riscv/include/asm/vdso/processor.h @@ -5,6 +5,7 @@ #ifndef __ASSEMBLY__ #include +#include static inline void cpu_relax(void) { @@ -14,16 +15,11 @@ static inline void cpu_relax(void) __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy)); #endif -#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE /* * Reduce instruction retirement. * This assumes the PC changes. */ - __asm__ __volatile__ ("pause"); -#else - /* Encoding of the pause instruction */ - __asm__ __volatile__ (".4byte 0x100000F"); -#endif + __asm__ __volatile__ (RISCV_PAUSE); barrier(); } From 6d5852811600086f0a227a4d646b2a20b4dfe533 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Fri, 26 Apr 2024 12:08:22 +0200 Subject: [PATCH 2/6] dt-bindings: riscv: Add Zawrs ISA extension description Add description for the Zawrs (Wait-on-Reservation-Set) ISA extension which was ratified in commit 98918c844281 of riscv-isa-manual. Signed-off-by: Andrew Jones Acked-by: Conor Dooley Link: https://lore.kernel.org/r/20240426100820.14762-10-ajones@ventanamicro.com Signed-off-by: Palmer Dabbelt --- Documentation/devicetree/bindings/riscv/extensions.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Documentation/devicetree/bindings/riscv/extensions.yaml b/Documentation/devicetree/bindings/riscv/extensions.yaml index 468c646247aa..8594e7c33ea2 100644 --- a/Documentation/devicetree/bindings/riscv/extensions.yaml +++ b/Documentation/devicetree/bindings/riscv/extensions.yaml @@ -177,6 +177,13 @@ properties: is supported as ratified at commit 5059e0ca641c ("update to ratified") of the riscv-zacas. + - const: zawrs + description: | + The Zawrs extension for entering a low-power state or for trapping + to a hypervisor while waiting on a store to a memory location, as + ratified in commit 98918c844281 ("Merge pull request #1217 from + riscv/zawrs") of riscv-isa-manual. + - const: zba description: | The standard Zba bit-manipulation extension for address generation From b8ddb0df30f9f6e70422f1e705b7416da115bd24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christoph=20M=C3=BCllner?= Date: Fri, 26 Apr 2024 12:08:23 +0200 Subject: [PATCH 3/6] riscv: Add Zawrs support for spinlocks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit RISC-V code uses the generic ticket lock implementation, which calls the macros smp_cond_load_relaxed() and smp_cond_load_acquire(). Introduce a RISC-V specific implementation of smp_cond_load_relaxed() which applies WRS.NTO of the Zawrs extension in order to reduce power consumption while waiting and allows hypervisors to enable guests to trap while waiting. smp_cond_load_acquire() doesn't need a RISC-V specific implementation as the generic implementation is based on smp_cond_load_relaxed() and smp_acquire__after_ctrl_dep() sufficiently provides the acquire semantics. This implementation is heavily based on Arm's approach which is the approach Andrea Parri also suggested. The Zawrs specification can be found here: https://github.com/riscv/riscv-zawrs/blob/main/zawrs.adoc Signed-off-by: Christoph Müllner Co-developed-by: Andrew Jones Signed-off-by: Andrew Jones Link: https://lore.kernel.org/r/20240426100820.14762-11-ajones@ventanamicro.com Signed-off-by: Palmer Dabbelt --- arch/riscv/Kconfig | 13 +++++++ arch/riscv/include/asm/barrier.h | 45 ++++++++++++++++-------- arch/riscv/include/asm/cmpxchg.h | 58 +++++++++++++++++++++++++++++++ arch/riscv/include/asm/hwcap.h | 1 + arch/riscv/include/asm/insn-def.h | 2 ++ arch/riscv/kernel/cpufeature.c | 1 + 6 files changed, 105 insertions(+), 15 deletions(-) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 7427d8088337..34bbe6b70546 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -578,6 +578,19 @@ config RISCV_ISA_V_PREEMPTIVE preemption. Enabling this config will result in higher memory consumption due to the allocation of per-task's kernel Vector context. +config RISCV_ISA_ZAWRS + bool "Zawrs extension support for more efficient busy waiting" + depends on RISCV_ALTERNATIVE + default y + help + The Zawrs extension defines instructions to be used in polling loops + which allow a hart to enter a low-power state or to trap to the + hypervisor while waiting on a store to a memory location. Enable the + use of these instructions in the kernel when the Zawrs extension is + detected at boot. + + If you don't know what to do here, say Y. + config TOOLCHAIN_HAS_ZBB bool default y diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h index 880b56d8480d..e1d9bf1deca6 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h @@ -11,6 +11,7 @@ #define _ASM_RISCV_BARRIER_H #ifndef __ASSEMBLY__ +#include #include #define nop() __asm__ __volatile__ ("nop") @@ -28,21 +29,6 @@ #define __smp_rmb() RISCV_FENCE(r, r) #define __smp_wmb() RISCV_FENCE(w, w) -#define __smp_store_release(p, v) \ -do { \ - compiletime_assert_atomic_type(*p); \ - RISCV_FENCE(rw, w); \ - WRITE_ONCE(*p, v); \ -} while (0) - -#define __smp_load_acquire(p) \ -({ \ - typeof(*p) ___p1 = READ_ONCE(*p); \ - compiletime_assert_atomic_type(*p); \ - RISCV_FENCE(r, rw); \ - ___p1; \ -}) - /* * This is a very specific barrier: it's currently only used in two places in * the kernel, both in the scheduler. See include/linux/spinlock.h for the two @@ -70,6 +56,35 @@ do { \ */ #define smp_mb__after_spinlock() RISCV_FENCE(iorw, iorw) +#define __smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + RISCV_FENCE(rw, w); \ + WRITE_ONCE(*p, v); \ +} while (0) + +#define __smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + RISCV_FENCE(r, rw); \ + ___p1; \ +}) + +#ifdef CONFIG_RISCV_ISA_ZAWRS +#define smp_cond_load_relaxed(ptr, cond_expr) ({ \ + typeof(ptr) __PTR = (ptr); \ + __unqual_scalar_typeof(*ptr) VAL; \ + for (;;) { \ + VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ + __cmpwait_relaxed(ptr, VAL); \ + } \ + (typeof(*ptr))VAL; \ +}) +#endif + #include #endif /* __ASSEMBLY__ */ diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index 2fee65cc8443..725276dcb996 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -8,7 +8,10 @@ #include +#include #include +#include +#include #define __xchg_relaxed(ptr, new, size) \ ({ \ @@ -359,4 +362,59 @@ arch_cmpxchg_relaxed((ptr), (o), (n)); \ }) +#ifdef CONFIG_RISCV_ISA_ZAWRS +/* + * Despite wrs.nto being "WRS-with-no-timeout", in the absence of changes to + * @val we expect it to still terminate within a "reasonable" amount of time + * for an implementation-specific other reason, a pending, locally-enabled + * interrupt, or because it has been configured to raise an illegal + * instruction exception. + */ +static __always_inline void __cmpwait(volatile void *ptr, + unsigned long val, + int size) +{ + unsigned long tmp; + + asm goto(ALTERNATIVE("j %l[no_zawrs]", "nop", + 0, RISCV_ISA_EXT_ZAWRS, 1) + : : : : no_zawrs); + + switch (size) { + case 4: + asm volatile( + " lr.w %0, %1\n" + " xor %0, %0, %2\n" + " bnez %0, 1f\n" + ZAWRS_WRS_NTO "\n" + "1:" + : "=&r" (tmp), "+A" (*(u32 *)ptr) + : "r" (val)); + break; +#if __riscv_xlen == 64 + case 8: + asm volatile( + " lr.d %0, %1\n" + " xor %0, %0, %2\n" + " bnez %0, 1f\n" + ZAWRS_WRS_NTO "\n" + "1:" + : "=&r" (tmp), "+A" (*(u64 *)ptr) + : "r" (val)); + break; +#endif + default: + BUILD_BUG(); + } + + return; + +no_zawrs: + asm volatile(RISCV_PAUSE : : : "memory"); +} + +#define __cmpwait_relaxed(ptr, val) \ + __cmpwait((ptr), (unsigned long)(val), sizeof(*(ptr))) +#endif + #endif /* _ASM_RISCV_CMPXCHG_H */ diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index e17d0078a651..5b358c3cf212 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -81,6 +81,7 @@ #define RISCV_ISA_EXT_ZTSO 72 #define RISCV_ISA_EXT_ZACAS 73 #define RISCV_ISA_EXT_XANDESPMU 74 +#define RISCV_ISA_EXT_ZAWRS 75 #define RISCV_ISA_EXT_XLINUXENVCFG 127 diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h index 64dffaa21bfa..9a913010cdd9 100644 --- a/arch/riscv/include/asm/insn-def.h +++ b/arch/riscv/include/asm/insn-def.h @@ -197,5 +197,7 @@ RS1(base), SIMM12(4)) #define RISCV_PAUSE ".4byte 0x100000f" +#define ZAWRS_WRS_NTO ".4byte 0x00d00073" +#define ZAWRS_WRS_STO ".4byte 0x01d00073" #endif /* __ASM_INSN_DEF_H */ diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 3ed2359eae35..02de9eaa3f42 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -257,6 +257,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE), __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM), __RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS), + __RISCV_ISA_EXT_DATA(zawrs, RISCV_ISA_EXT_ZAWRS), __RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA), __RISCV_ISA_EXT_DATA(zfh, RISCV_ISA_EXT_ZFH), __RISCV_ISA_EXT_DATA(zfhmin, RISCV_ISA_EXT_ZFHMIN), From 244c18fbf64a33d152645766a033b2935ab0acb5 Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Fri, 26 Apr 2024 12:08:24 +0200 Subject: [PATCH 4/6] riscv: hwprobe: export Zawrs ISA extension MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Export Zawrs ISA extension through hwprobe. [Palmer: there's a gap in the numbers here as there will be a merge conflict when this is picked up. To avoid confusion I just set the hwprobe ID to match what it would be post-merge.] Signed-off-by: Andrew Jones Reviewed-by: Clément Léger Link: https://lore.kernel.org/r/20240426100820.14762-12-ajones@ventanamicro.com Signed-off-by: Palmer Dabbelt --- Documentation/arch/riscv/hwprobe.rst | 4 ++++ arch/riscv/include/uapi/asm/hwprobe.h | 1 + arch/riscv/kernel/sys_hwprobe.c | 1 + 3 files changed, 6 insertions(+) diff --git a/Documentation/arch/riscv/hwprobe.rst b/Documentation/arch/riscv/hwprobe.rst index b2bcc9eed9aa..e072ce8285d8 100644 --- a/Documentation/arch/riscv/hwprobe.rst +++ b/Documentation/arch/riscv/hwprobe.rst @@ -188,6 +188,10 @@ The following keys are defined: manual starting from commit 95cf1f9 ("Add changes requested by Ved during signoff") + * :c:macro:`RISCV_HWPROBE_EXT_ZAWRS`: The Zawrs extension is supported as + ratified in commit 98918c844281 ("Merge pull request #1217 from + riscv/zawrs") of riscv-isa-manual. + * :c:macro:`RISCV_HWPROBE_KEY_CPUPERF_0`: A bitmask that contains performance information about the selected set of processors. diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h index 9f2a8e3ff204..3b5e0adb1d72 100644 --- a/arch/riscv/include/uapi/asm/hwprobe.h +++ b/arch/riscv/include/uapi/asm/hwprobe.h @@ -59,6 +59,7 @@ struct riscv_hwprobe { #define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33) #define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34) #define RISCV_HWPROBE_EXT_ZICOND (1ULL << 35) +#define RISCV_HWPROBE_EXT_ZAWRS (1ULL << 48) #define RISCV_HWPROBE_KEY_CPUPERF_0 5 #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0) #define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0) diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c index 8cae41a502dd..b86e3531a45a 100644 --- a/arch/riscv/kernel/sys_hwprobe.c +++ b/arch/riscv/kernel/sys_hwprobe.c @@ -111,6 +111,7 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, EXT_KEY(ZTSO); EXT_KEY(ZACAS); EXT_KEY(ZICOND); + EXT_KEY(ZAWRS); if (has_vector()) { EXT_KEY(ZVBB); From 86d6a86e59e3aa425d829a935c0d92388e4fe55b Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Fri, 26 Apr 2024 12:08:25 +0200 Subject: [PATCH 5/6] KVM: riscv: Support guest wrs.nto When a guest traps on wrs.nto, call kvm_vcpu_on_spin() to attempt to yield to the lock holding VCPU. Also extend the KVM ISA extension ONE_REG interface to allow KVM userspace to detect and enable the Zawrs extension for the Guest/VM. Signed-off-by: Andrew Jones Acked-by: Anup Patel Link: https://lore.kernel.org/r/20240426100820.14762-13-ajones@ventanamicro.com Signed-off-by: Palmer Dabbelt --- arch/riscv/include/asm/kvm_host.h | 1 + arch/riscv/include/uapi/asm/kvm.h | 1 + arch/riscv/kvm/vcpu.c | 1 + arch/riscv/kvm/vcpu_insn.c | 15 +++++++++++++++ arch/riscv/kvm/vcpu_onereg.c | 2 ++ 5 files changed, 20 insertions(+) diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 484d04a92fa6..e27c56e44783 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -69,6 +69,7 @@ struct kvm_vcpu_stat { struct kvm_vcpu_stat_generic generic; u64 ecall_exit_stat; u64 wfi_exit_stat; + u64 wrs_exit_stat; u64 mmio_exit_user; u64 mmio_exit_kernel; u64 csr_exit_user; diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index b1c503c2959c..89ea06bd07c2 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -167,6 +167,7 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_ZFA, KVM_RISCV_ISA_EXT_ZTSO, KVM_RISCV_ISA_EXT_ZACAS, + KVM_RISCV_ISA_EXT_ZAWRS, KVM_RISCV_ISA_EXT_MAX, }; diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index b5ca9f2e98ac..abcdc78671e0 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -25,6 +25,7 @@ const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = { KVM_GENERIC_VCPU_STATS(), STATS_DESC_COUNTER(VCPU, ecall_exit_stat), STATS_DESC_COUNTER(VCPU, wfi_exit_stat), + STATS_DESC_COUNTER(VCPU, wrs_exit_stat), STATS_DESC_COUNTER(VCPU, mmio_exit_user), STATS_DESC_COUNTER(VCPU, mmio_exit_kernel), STATS_DESC_COUNTER(VCPU, csr_exit_user), diff --git a/arch/riscv/kvm/vcpu_insn.c b/arch/riscv/kvm/vcpu_insn.c index ee7215f4071f..97dec18e6989 100644 --- a/arch/riscv/kvm/vcpu_insn.c +++ b/arch/riscv/kvm/vcpu_insn.c @@ -16,6 +16,9 @@ #define INSN_MASK_WFI 0xffffffff #define INSN_MATCH_WFI 0x10500073 +#define INSN_MASK_WRS 0xffffffff +#define INSN_MATCH_WRS 0x00d00073 + #define INSN_MATCH_CSRRW 0x1073 #define INSN_MASK_CSRRW 0x707f #define INSN_MATCH_CSRRS 0x2073 @@ -203,6 +206,13 @@ static int wfi_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn) return KVM_INSN_CONTINUE_NEXT_SEPC; } +static int wrs_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, ulong insn) +{ + vcpu->stat.wrs_exit_stat++; + kvm_vcpu_on_spin(vcpu, vcpu->arch.guest_context.sstatus & SR_SPP); + return KVM_INSN_CONTINUE_NEXT_SEPC; +} + struct csr_func { unsigned int base; unsigned int count; @@ -378,6 +388,11 @@ static const struct insn_func system_opcode_funcs[] = { .match = INSN_MATCH_WFI, .func = wfi_insn, }, + { + .mask = INSN_MASK_WRS, + .match = INSN_MATCH_WRS, + .func = wrs_insn, + }, }; static int system_opcode_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index f4a6124d25c9..67c5794af3b6 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -41,6 +41,7 @@ static const unsigned long kvm_isa_ext_arr[] = { KVM_ISA_EXT_ARR(SVNAPOT), KVM_ISA_EXT_ARR(SVPBMT), KVM_ISA_EXT_ARR(ZACAS), + KVM_ISA_EXT_ARR(ZAWRS), KVM_ISA_EXT_ARR(ZBA), KVM_ISA_EXT_ARR(ZBB), KVM_ISA_EXT_ARR(ZBC), @@ -120,6 +121,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) case KVM_RISCV_ISA_EXT_SVINVAL: case KVM_RISCV_ISA_EXT_SVNAPOT: case KVM_RISCV_ISA_EXT_ZACAS: + case KVM_RISCV_ISA_EXT_ZAWRS: case KVM_RISCV_ISA_EXT_ZBA: case KVM_RISCV_ISA_EXT_ZBB: case KVM_RISCV_ISA_EXT_ZBC: From f2c43c61160ecba15f073b79abf1ea351e41203d Mon Sep 17 00:00:00 2001 From: Andrew Jones Date: Fri, 26 Apr 2024 12:08:26 +0200 Subject: [PATCH 6/6] KVM: riscv: selftests: Add Zawrs extension to get-reg-list test KVM RISC-V allows the Zawrs extension for the Guest/VM, so add it to the get-reg-list test. Signed-off-by: Andrew Jones Acked-by: Anup Patel Link: https://lore.kernel.org/r/20240426100820.14762-14-ajones@ventanamicro.com Signed-off-by: Palmer Dabbelt --- tools/testing/selftests/kvm/riscv/get-reg-list.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/testing/selftests/kvm/riscv/get-reg-list.c b/tools/testing/selftests/kvm/riscv/get-reg-list.c index b882b7b9b785..8c4c27bd4b88 100644 --- a/tools/testing/selftests/kvm/riscv/get-reg-list.c +++ b/tools/testing/selftests/kvm/riscv/get-reg-list.c @@ -48,6 +48,7 @@ bool filter_reg(__u64 reg) case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVNAPOT: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_SVPBMT: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZACAS: + case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZAWRS: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBA: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBB: case KVM_REG_RISCV_ISA_EXT | KVM_REG_RISCV_ISA_SINGLE | KVM_RISCV_ISA_EXT_ZBC: @@ -413,6 +414,7 @@ static const char *isa_ext_single_id_to_str(__u64 reg_off) KVM_ISA_EXT_ARR(SVNAPOT), KVM_ISA_EXT_ARR(SVPBMT), KVM_ISA_EXT_ARR(ZACAS), + KVM_ISA_EXT_ARR(ZAWRS), KVM_ISA_EXT_ARR(ZBA), KVM_ISA_EXT_ARR(ZBB), KVM_ISA_EXT_ARR(ZBC), @@ -936,6 +938,7 @@ KVM_ISA_EXT_SIMPLE_CONFIG(svinval, SVINVAL); KVM_ISA_EXT_SIMPLE_CONFIG(svnapot, SVNAPOT); KVM_ISA_EXT_SIMPLE_CONFIG(svpbmt, SVPBMT); KVM_ISA_EXT_SIMPLE_CONFIG(zacas, ZACAS); +KVM_ISA_EXT_SIMPLE_CONFIG(zawrs, ZAWRS); KVM_ISA_EXT_SIMPLE_CONFIG(zba, ZBA); KVM_ISA_EXT_SIMPLE_CONFIG(zbb, ZBB); KVM_ISA_EXT_SIMPLE_CONFIG(zbc, ZBC); @@ -991,6 +994,7 @@ struct vcpu_reg_list *vcpu_configs[] = { &config_svnapot, &config_svpbmt, &config_zacas, + &config_zawrs, &config_zba, &config_zbb, &config_zbc,