linux/tools/testing/selftests/arm64/abi/syscall-abi.c
Catalin Marinas 156010ed9c Merge branches 'for-next/sysreg', 'for-next/sme', 'for-next/kselftest', 'for-next/misc', 'for-next/sme2', 'for-next/tpidr2', 'for-next/scs', 'for-next/compat-hwcap', 'for-next/ftrace', 'for-next/efi-boot-mmu-on', 'for-next/ptrauth' and 'for-next/pseudo-nmi', remote-tracking branch 'arm64/for-next/perf' into for-next/core
* arm64/for-next/perf:
  perf: arm_spe: Print the version of SPE detected
  perf: arm_spe: Add support for SPEv1.2 inverted event filtering
  perf: Add perf_event_attr::config3
  drivers/perf: fsl_imx8_ddr_perf: Remove set-but-not-used variable
  perf: arm_spe: Support new SPEv1.2/v8.7 'not taken' event
  perf: arm_spe: Use new PMSIDR_EL1 register enums
  perf: arm_spe: Drop BIT() and use FIELD_GET/PREP accessors
  arm64/sysreg: Convert SPE registers to automatic generation
  arm64: Drop SYS_ from SPE register defines
  perf: arm_spe: Use feature numbering for PMSEVFR_EL1 defines
  perf/marvell: Add ACPI support to TAD uncore driver
  perf/marvell: Add ACPI support to DDR uncore driver
  perf/arm-cmn: Reset DTM_PMU_CONFIG at probe
  drivers/perf: hisi: Extract initialization of "cpa_pmu->pmu"
  drivers/perf: hisi: Simplify the parameters of hisi_pmu_init()
  drivers/perf: hisi: Advertise the PERF_PMU_CAP_NO_EXCLUDE capability

* for-next/sysreg:
  : arm64 sysreg and cpufeature fixes/updates
  KVM: arm64: Use symbolic definition for ISR_EL1.A
  arm64/sysreg: Add definition of ISR_EL1
  arm64/sysreg: Add definition for ICC_NMIAR1_EL1
  arm64/cpufeature: Remove 4 bit assumption in ARM64_FEATURE_MASK()
  arm64/sysreg: Fix errors in 32 bit enumeration values
  arm64/cpufeature: Fix field sign for DIT hwcap detection

* for-next/sme:
  : SME-related updates
  arm64/sme: Optimise SME exit on syscall entry
  arm64/sme: Don't use streaming mode to probe the maximum SME VL
  arm64/ptrace: Use system_supports_tpidr2() to check for TPIDR2 support

* for-next/kselftest: (23 commits)
  : arm64 kselftest fixes and improvements
  kselftest/arm64: Don't require FA64 for streaming SVE+ZA tests
  kselftest/arm64: Copy whole EXTRA context
  kselftest/arm64: Fix enumeration of systems without 128 bit SME for SSVE+ZA
  kselftest/arm64: Fix enumeration of systems without 128 bit SME
  kselftest/arm64: Don't require FA64 for streaming SVE tests
  kselftest/arm64: Limit the maximum VL we try to set via ptrace
  kselftest/arm64: Correct buffer size for SME ZA storage
  kselftest/arm64: Remove the local NUM_VL definition
  kselftest/arm64: Verify simultaneous SSVE and ZA context generation
  kselftest/arm64: Verify that SSVE signal context has SVE_SIG_FLAG_SM set
  kselftest/arm64: Remove spurious comment from MTE test Makefile
  kselftest/arm64: Support build of MTE tests with clang
  kselftest/arm64: Initialise current at build time in signal tests
  kselftest/arm64: Don't pass headers to the compiler as source
  kselftest/arm64: Remove redundant _start labels from FP tests
  kselftest/arm64: Fix .pushsection for strings in FP tests
  kselftest/arm64: Run BTI selftests on systems without BTI
  kselftest/arm64: Fix test numbering when skipping tests
  kselftest/arm64: Skip non-power of 2 SVE vector lengths in fp-stress
  kselftest/arm64: Only enumerate power of two VLs in syscall-abi
  ...

* for-next/misc:
  : Miscellaneous arm64 updates
  arm64/mm: Intercept pfn changes in set_pte_at()
  Documentation: arm64: correct spelling
  arm64: traps: attempt to dump all instructions
  arm64: Apply dynamic shadow call stack patching in two passes
  arm64: el2_setup.h: fix spelling typo in comments
  arm64: Kconfig: fix spelling
  arm64: cpufeature: Use kstrtobool() instead of strtobool()
  arm64: Avoid repeated AA64MMFR1_EL1 register read on pagefault path
  arm64: make ARCH_FORCE_MAX_ORDER selectable

* for-next/sme2: (23 commits)
  : Support for arm64 SME 2 and 2.1
  arm64/sme: Fix __finalise_el2 SMEver check
  kselftest/arm64: Remove redundant _start labels from zt-test
  kselftest/arm64: Add coverage of SME 2 and 2.1 hwcaps
  kselftest/arm64: Add coverage of the ZT ptrace regset
  kselftest/arm64: Add SME2 coverage to syscall-abi
  kselftest/arm64: Add test coverage for ZT register signal frames
  kselftest/arm64: Teach the generic signal context validation about ZT
  kselftest/arm64: Enumerate SME2 in the signal test utility code
  kselftest/arm64: Cover ZT in the FP stress test
  kselftest/arm64: Add a stress test program for ZT0
  arm64/sme: Add hwcaps for SME 2 and 2.1 features
  arm64/sme: Implement ZT0 ptrace support
  arm64/sme: Implement signal handling for ZT
  arm64/sme: Implement context switching for ZT0
  arm64/sme: Provide storage for ZT0
  arm64/sme: Add basic enumeration for SME2
  arm64/sme: Enable host kernel to access ZT0
  arm64/sme: Manually encode ZT0 load and store instructions
  arm64/esr: Document ISS for ZT0 being disabled
  arm64/sme: Document SME 2 and SME 2.1 ABI
  ...

* for-next/tpidr2:
  : Include TPIDR2 in the signal context
  kselftest/arm64: Add test case for TPIDR2 signal frame records
  kselftest/arm64: Add TPIDR2 to the set of known signal context records
  arm64/signal: Include TPIDR2 in the signal context
  arm64/sme: Document ABI for TPIDR2 signal information

* for-next/scs:
  : arm64: harden shadow call stack pointer handling
  arm64: Stash shadow stack pointer in the task struct on interrupt
  arm64: Always load shadow stack pointer directly from the task struct

* for-next/compat-hwcap:
  : arm64: Expose compat ARMv8 AArch32 features (HWCAPs)
  arm64: Add compat hwcap SSBS
  arm64: Add compat hwcap SB
  arm64: Add compat hwcap I8MM
  arm64: Add compat hwcap ASIMDBF16
  arm64: Add compat hwcap ASIMDFHM
  arm64: Add compat hwcap ASIMDDP
  arm64: Add compat hwcap FPHP and ASIMDHP

* for-next/ftrace:
  : Add arm64 support for DYNAMICE_FTRACE_WITH_CALL_OPS
  arm64: avoid executing padding bytes during kexec / hibernation
  arm64: Implement HAVE_DYNAMIC_FTRACE_WITH_CALL_OPS
  arm64: ftrace: Update stale comment
  arm64: patching: Add aarch64_insn_write_literal_u64()
  arm64: insn: Add helpers for BTI
  arm64: Extend support for CONFIG_FUNCTION_ALIGNMENT
  ACPI: Don't build ACPICA with '-Os'
  Compiler attributes: GCC cold function alignment workarounds
  ftrace: Add DYNAMIC_FTRACE_WITH_CALL_OPS

* for-next/efi-boot-mmu-on:
  : Permit arm64 EFI boot with MMU and caches on
  arm64: kprobes: Drop ID map text from kprobes blacklist
  arm64: head: Switch endianness before populating the ID map
  efi: arm64: enter with MMU and caches enabled
  arm64: head: Clean the ID map and the HYP text to the PoC if needed
  arm64: head: avoid cache invalidation when entering with the MMU on
  arm64: head: record the MMU state at primary entry
  arm64: kernel: move identity map out of .text mapping
  arm64: head: Move all finalise_el2 calls to after __enable_mmu

* for-next/ptrauth:
  : arm64 pointer authentication cleanup
  arm64: pauth: don't sign leaf functions
  arm64: unify asm-arch manipulation

* for-next/pseudo-nmi:
  : Pseudo-NMI code generation optimisations
  arm64: irqflags: use alternative branches for pseudo-NMI logic
  arm64: add ARM64_HAS_GIC_PRIO_RELAXED_SYNC cpucap
  arm64: make ARM64_HAS_GIC_PRIO_MASKING depend on ARM64_HAS_GIC_CPUIF_SYSREGS
  arm64: rename ARM64_HAS_IRQ_PRIO_MASKING to ARM64_HAS_GIC_PRIO_MASKING
  arm64: rename ARM64_HAS_SYSREG_GIC_CPUIF to ARM64_HAS_GIC_CPUIF_SYSREGS
2023-02-10 18:51:49 +00:00

558 lines
13 KiB
C

// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2021 ARM Limited.
*/
#include <errno.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <sys/auxv.h>
#include <sys/prctl.h>
#include <asm/hwcap.h>
#include <asm/sigcontext.h>
#include <asm/unistd.h>
#include "../../kselftest.h"
#include "syscall-abi.h"
static int default_sme_vl;
static int sve_vl_count;
static unsigned int sve_vls[SVE_VQ_MAX];
static int sme_vl_count;
static unsigned int sme_vls[SVE_VQ_MAX];
extern void do_syscall(int sve_vl, int sme_vl);
static void fill_random(void *buf, size_t size)
{
int i;
uint32_t *lbuf = buf;
/* random() returns a 32 bit number regardless of the size of long */
for (i = 0; i < size / sizeof(uint32_t); i++)
lbuf[i] = random();
}
/*
* We also repeat the test for several syscalls to try to expose different
* behaviour.
*/
static struct syscall_cfg {
int syscall_nr;
const char *name;
} syscalls[] = {
{ __NR_getpid, "getpid()" },
{ __NR_sched_yield, "sched_yield()" },
};
#define NUM_GPR 31
uint64_t gpr_in[NUM_GPR];
uint64_t gpr_out[NUM_GPR];
static void setup_gpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
fill_random(gpr_in, sizeof(gpr_in));
gpr_in[8] = cfg->syscall_nr;
memset(gpr_out, 0, sizeof(gpr_out));
}
static int check_gpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl, uint64_t svcr)
{
int errors = 0;
int i;
/*
* GPR x0-x7 may be clobbered, and all others should be preserved.
*/
for (i = 9; i < ARRAY_SIZE(gpr_in); i++) {
if (gpr_in[i] != gpr_out[i]) {
ksft_print_msg("%s SVE VL %d mismatch in GPR %d: %llx != %llx\n",
cfg->name, sve_vl, i,
gpr_in[i], gpr_out[i]);
errors++;
}
}
return errors;
}
#define NUM_FPR 32
uint64_t fpr_in[NUM_FPR * 2];
uint64_t fpr_out[NUM_FPR * 2];
uint64_t fpr_zero[NUM_FPR * 2];
static void setup_fpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
fill_random(fpr_in, sizeof(fpr_in));
memset(fpr_out, 0, sizeof(fpr_out));
}
static int check_fpr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
int errors = 0;
int i;
if (!sve_vl && !(svcr & SVCR_SM_MASK)) {
for (i = 0; i < ARRAY_SIZE(fpr_in); i++) {
if (fpr_in[i] != fpr_out[i]) {
ksft_print_msg("%s Q%d/%d mismatch %llx != %llx\n",
cfg->name,
i / 2, i % 2,
fpr_in[i], fpr_out[i]);
errors++;
}
}
}
/*
* In streaming mode the whole register set should be cleared
* by the transition out of streaming mode.
*/
if (svcr & SVCR_SM_MASK) {
if (memcmp(fpr_zero, fpr_out, sizeof(fpr_out)) != 0) {
ksft_print_msg("%s FPSIMD registers non-zero exiting SM\n",
cfg->name);
errors++;
}
}
return errors;
}
#define SVE_Z_SHARED_BYTES (128 / 8)
static uint8_t z_zero[__SVE_ZREG_SIZE(SVE_VQ_MAX)];
uint8_t z_in[SVE_NUM_ZREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)];
uint8_t z_out[SVE_NUM_ZREGS * __SVE_ZREG_SIZE(SVE_VQ_MAX)];
static void setup_z(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
fill_random(z_in, sizeof(z_in));
fill_random(z_out, sizeof(z_out));
}
static int check_z(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
size_t reg_size = sve_vl;
int errors = 0;
int i;
if (!sve_vl)
return 0;
for (i = 0; i < SVE_NUM_ZREGS; i++) {
uint8_t *in = &z_in[reg_size * i];
uint8_t *out = &z_out[reg_size * i];
if (svcr & SVCR_SM_MASK) {
/*
* In streaming mode the whole register should
* be cleared by the transition out of
* streaming mode.
*/
if (memcmp(z_zero, out, reg_size) != 0) {
ksft_print_msg("%s SVE VL %d Z%d non-zero\n",
cfg->name, sve_vl, i);
errors++;
}
} else {
/*
* For standard SVE the low 128 bits should be
* preserved and any additional bits cleared.
*/
if (memcmp(in, out, SVE_Z_SHARED_BYTES) != 0) {
ksft_print_msg("%s SVE VL %d Z%d low 128 bits changed\n",
cfg->name, sve_vl, i);
errors++;
}
if (reg_size > SVE_Z_SHARED_BYTES &&
(memcmp(z_zero, out + SVE_Z_SHARED_BYTES,
reg_size - SVE_Z_SHARED_BYTES) != 0)) {
ksft_print_msg("%s SVE VL %d Z%d high bits non-zero\n",
cfg->name, sve_vl, i);
errors++;
}
}
}
return errors;
}
uint8_t p_in[SVE_NUM_PREGS * __SVE_PREG_SIZE(SVE_VQ_MAX)];
uint8_t p_out[SVE_NUM_PREGS * __SVE_PREG_SIZE(SVE_VQ_MAX)];
static void setup_p(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
fill_random(p_in, sizeof(p_in));
fill_random(p_out, sizeof(p_out));
}
static int check_p(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
size_t reg_size = sve_vq_from_vl(sve_vl) * 2; /* 1 bit per VL byte */
int errors = 0;
int i;
if (!sve_vl)
return 0;
/* After a syscall the P registers should be zeroed */
for (i = 0; i < SVE_NUM_PREGS * reg_size; i++)
if (p_out[i])
errors++;
if (errors)
ksft_print_msg("%s SVE VL %d predicate registers non-zero\n",
cfg->name, sve_vl);
return errors;
}
uint8_t ffr_in[__SVE_PREG_SIZE(SVE_VQ_MAX)];
uint8_t ffr_out[__SVE_PREG_SIZE(SVE_VQ_MAX)];
static void setup_ffr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
/*
* If we are in streaming mode and do not have FA64 then FFR
* is unavailable.
*/
if ((svcr & SVCR_SM_MASK) &&
!(getauxval(AT_HWCAP2) & HWCAP2_SME_FA64)) {
memset(&ffr_in, 0, sizeof(ffr_in));
return;
}
/*
* It is only valid to set a contiguous set of bits starting
* at 0. For now since we're expecting this to be cleared by
* a syscall just set all bits.
*/
memset(ffr_in, 0xff, sizeof(ffr_in));
fill_random(ffr_out, sizeof(ffr_out));
}
static int check_ffr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
size_t reg_size = sve_vq_from_vl(sve_vl) * 2; /* 1 bit per VL byte */
int errors = 0;
int i;
if (!sve_vl)
return 0;
if ((svcr & SVCR_SM_MASK) &&
!(getauxval(AT_HWCAP2) & HWCAP2_SME_FA64))
return 0;
/* After a syscall FFR should be zeroed */
for (i = 0; i < reg_size; i++)
if (ffr_out[i])
errors++;
if (errors)
ksft_print_msg("%s SVE VL %d FFR non-zero\n",
cfg->name, sve_vl);
return errors;
}
uint64_t svcr_in, svcr_out;
static void setup_svcr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
svcr_in = svcr;
}
static int check_svcr(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
int errors = 0;
if (svcr_out & SVCR_SM_MASK) {
ksft_print_msg("%s Still in SM, SVCR %llx\n",
cfg->name, svcr_out);
errors++;
}
if ((svcr_in & SVCR_ZA_MASK) != (svcr_out & SVCR_ZA_MASK)) {
ksft_print_msg("%s PSTATE.ZA changed, SVCR %llx != %llx\n",
cfg->name, svcr_in, svcr_out);
errors++;
}
return errors;
}
uint8_t za_in[ZA_SIG_REGS_SIZE(SVE_VQ_MAX)];
uint8_t za_out[ZA_SIG_REGS_SIZE(SVE_VQ_MAX)];
static void setup_za(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
fill_random(za_in, sizeof(za_in));
memset(za_out, 0, sizeof(za_out));
}
static int check_za(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
size_t reg_size = sme_vl * sme_vl;
int errors = 0;
if (!(svcr & SVCR_ZA_MASK))
return 0;
if (memcmp(za_in, za_out, reg_size) != 0) {
ksft_print_msg("SME VL %d ZA does not match\n", sme_vl);
errors++;
}
return errors;
}
uint8_t zt_in[ZT_SIG_REG_BYTES] __attribute__((aligned(16)));
uint8_t zt_out[ZT_SIG_REG_BYTES] __attribute__((aligned(16)));
static void setup_zt(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
fill_random(zt_in, sizeof(zt_in));
memset(zt_out, 0, sizeof(zt_out));
}
static int check_zt(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
int errors = 0;
if (!(getauxval(AT_HWCAP2) & HWCAP2_SME2))
return 0;
if (!(svcr & SVCR_ZA_MASK))
return 0;
if (memcmp(zt_in, zt_out, sizeof(zt_in)) != 0) {
ksft_print_msg("SME VL %d ZT does not match\n", sme_vl);
errors++;
}
return errors;
}
typedef void (*setup_fn)(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr);
typedef int (*check_fn)(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr);
/*
* Each set of registers has a setup function which is called before
* the syscall to fill values in a global variable for loading by the
* test code and a check function which validates that the results are
* as expected. Vector lengths are passed everywhere, a vector length
* of 0 should be treated as do not test.
*/
static struct {
setup_fn setup;
check_fn check;
} regset[] = {
{ setup_gpr, check_gpr },
{ setup_fpr, check_fpr },
{ setup_z, check_z },
{ setup_p, check_p },
{ setup_ffr, check_ffr },
{ setup_svcr, check_svcr },
{ setup_za, check_za },
{ setup_zt, check_zt },
};
static bool do_test(struct syscall_cfg *cfg, int sve_vl, int sme_vl,
uint64_t svcr)
{
int errors = 0;
int i;
for (i = 0; i < ARRAY_SIZE(regset); i++)
regset[i].setup(cfg, sve_vl, sme_vl, svcr);
do_syscall(sve_vl, sme_vl);
for (i = 0; i < ARRAY_SIZE(regset); i++)
errors += regset[i].check(cfg, sve_vl, sme_vl, svcr);
return errors == 0;
}
static void test_one_syscall(struct syscall_cfg *cfg)
{
int sve, sme;
int ret;
/* FPSIMD only case */
ksft_test_result(do_test(cfg, 0, default_sme_vl, 0),
"%s FPSIMD\n", cfg->name);
for (sve = 0; sve < sve_vl_count; sve++) {
ret = prctl(PR_SVE_SET_VL, sve_vls[sve]);
if (ret == -1)
ksft_exit_fail_msg("PR_SVE_SET_VL failed: %s (%d)\n",
strerror(errno), errno);
ksft_test_result(do_test(cfg, sve_vls[sve], default_sme_vl, 0),
"%s SVE VL %d\n", cfg->name, sve_vls[sve]);
for (sme = 0; sme < sme_vl_count; sme++) {
ret = prctl(PR_SME_SET_VL, sme_vls[sme]);
if (ret == -1)
ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n",
strerror(errno), errno);
ksft_test_result(do_test(cfg, sve_vls[sve],
sme_vls[sme],
SVCR_ZA_MASK | SVCR_SM_MASK),
"%s SVE VL %d/SME VL %d SM+ZA\n",
cfg->name, sve_vls[sve],
sme_vls[sme]);
ksft_test_result(do_test(cfg, sve_vls[sve],
sme_vls[sme], SVCR_SM_MASK),
"%s SVE VL %d/SME VL %d SM\n",
cfg->name, sve_vls[sve],
sme_vls[sme]);
ksft_test_result(do_test(cfg, sve_vls[sve],
sme_vls[sme], SVCR_ZA_MASK),
"%s SVE VL %d/SME VL %d ZA\n",
cfg->name, sve_vls[sve],
sme_vls[sme]);
}
}
for (sme = 0; sme < sme_vl_count; sme++) {
ret = prctl(PR_SME_SET_VL, sme_vls[sme]);
if (ret == -1)
ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n",
strerror(errno), errno);
ksft_test_result(do_test(cfg, 0, sme_vls[sme],
SVCR_ZA_MASK | SVCR_SM_MASK),
"%s SME VL %d SM+ZA\n",
cfg->name, sme_vls[sme]);
ksft_test_result(do_test(cfg, 0, sme_vls[sme], SVCR_SM_MASK),
"%s SME VL %d SM\n",
cfg->name, sme_vls[sme]);
ksft_test_result(do_test(cfg, 0, sme_vls[sme], SVCR_ZA_MASK),
"%s SME VL %d ZA\n",
cfg->name, sme_vls[sme]);
}
}
void sve_count_vls(void)
{
unsigned int vq;
int vl;
if (!(getauxval(AT_HWCAP) & HWCAP_SVE))
return;
/*
* Enumerate up to SVE_VQ_MAX vector lengths
*/
for (vq = SVE_VQ_MAX; vq > 0; vq /= 2) {
vl = prctl(PR_SVE_SET_VL, vq * 16);
if (vl == -1)
ksft_exit_fail_msg("PR_SVE_SET_VL failed: %s (%d)\n",
strerror(errno), errno);
vl &= PR_SVE_VL_LEN_MASK;
if (vq != sve_vq_from_vl(vl))
vq = sve_vq_from_vl(vl);
sve_vls[sve_vl_count++] = vl;
}
}
void sme_count_vls(void)
{
unsigned int vq;
int vl;
if (!(getauxval(AT_HWCAP2) & HWCAP2_SME))
return;
/*
* Enumerate up to SVE_VQ_MAX vector lengths
*/
for (vq = SVE_VQ_MAX; vq > 0; vq /= 2) {
vl = prctl(PR_SME_SET_VL, vq * 16);
if (vl == -1)
ksft_exit_fail_msg("PR_SME_SET_VL failed: %s (%d)\n",
strerror(errno), errno);
vl &= PR_SME_VL_LEN_MASK;
/* Found lowest VL */
if (sve_vq_from_vl(vl) > vq)
break;
if (vq != sve_vq_from_vl(vl))
vq = sve_vq_from_vl(vl);
sme_vls[sme_vl_count++] = vl;
}
/* Ensure we configure a SME VL, used to flag if SVCR is set */
default_sme_vl = sme_vls[0];
}
int main(void)
{
int i;
int tests = 1; /* FPSIMD */
int sme_ver;
srandom(getpid());
ksft_print_header();
sve_count_vls();
sme_count_vls();
tests += sve_vl_count;
tests += sme_vl_count * 3;
tests += (sve_vl_count * sme_vl_count) * 3;
ksft_set_plan(ARRAY_SIZE(syscalls) * tests);
if (getauxval(AT_HWCAP2) & HWCAP2_SME2)
sme_ver = 2;
else
sme_ver = 1;
if (getauxval(AT_HWCAP2) & HWCAP2_SME_FA64)
ksft_print_msg("SME%d with FA64\n", sme_ver);
else if (getauxval(AT_HWCAP2) & HWCAP2_SME)
ksft_print_msg("SME%d without FA64\n", sme_ver);
for (i = 0; i < ARRAY_SIZE(syscalls); i++)
test_one_syscall(&syscalls[i]);
ksft_print_cnts();
return 0;
}