linux-user/aarch64: Move sve record checks into restore

Move the checks out of the parsing loop and into the
restore function.  This more closely mirrors the code
structure in the kernel, and is slightly clearer.

Reject rather than silently skip incorrect VL and SVE record sizes,
bringing our checks in to line with those the kernel does.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20220708151540.18136-40-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Richard Henderson 2022-07-08 20:45:34 +05:30 committed by Peter Maydell
parent 8e5e19ee41
commit d3b4f7170f

View File

@ -250,12 +250,36 @@ static void target_restore_fpsimd_record(CPUARMState *env,
}
}
static void target_restore_sve_record(CPUARMState *env,
struct target_sve_context *sve, int vq)
static bool target_restore_sve_record(CPUARMState *env,
struct target_sve_context *sve,
int size)
{
int i, j;
int i, j, vl, vq;
/* Note that SVE regs are stored as a byte stream, with each byte element
if (!cpu_isar_feature(aa64_sve, env_archcpu(env))) {
return false;
}
__get_user(vl, &sve->vl);
vq = sve_vq(env);
/* Reject mismatched VL. */
if (vl != vq * TARGET_SVE_VQ_BYTES) {
return false;
}
/* Accept empty record -- used to clear PSTATE.SM. */
if (size <= sizeof(*sve)) {
return true;
}
/* Reject non-empty but incomplete record. */
if (size < TARGET_SVE_SIG_CONTEXT_SIZE(vq)) {
return false;
}
/*
* Note that SVE regs are stored as a byte stream, with each byte element
* at a subsequent address. This corresponds to a little-endian load
* of our 64-bit hunks.
*/
@ -277,6 +301,7 @@ static void target_restore_sve_record(CPUARMState *env,
}
}
}
return true;
}
static int target_restore_sigframe(CPUARMState *env,
@ -287,7 +312,7 @@ static int target_restore_sigframe(CPUARMState *env,
struct target_sve_context *sve = NULL;
uint64_t extra_datap = 0;
bool used_extra = false;
int vq = 0, sve_size = 0;
int sve_size = 0;
target_restore_general_frame(env, sf);
@ -321,15 +346,9 @@ static int target_restore_sigframe(CPUARMState *env,
if (sve || size < sizeof(struct target_sve_context)) {
goto err;
}
if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
vq = sve_vq(env);
sve_size = QEMU_ALIGN_UP(TARGET_SVE_SIG_CONTEXT_SIZE(vq), 16);
if (size == sve_size) {
sve = (struct target_sve_context *)ctx;
break;
}
}
goto err;
sve = (struct target_sve_context *)ctx;
sve_size = size;
break;
case TARGET_EXTRA_MAGIC:
if (extra || size != sizeof(struct target_extra_context)) {
@ -362,8 +381,8 @@ static int target_restore_sigframe(CPUARMState *env,
}
/* SVE data, if present, overwrites FPSIMD data. */
if (sve) {
target_restore_sve_record(env, sve, vq);
if (sve && !target_restore_sve_record(env, sve, sve_size)) {
goto err;
}
unlock_user(extra, extra_datap, 0);
return 0;