2003-10-01 04:34:21 +08:00
|
|
|
/*
|
|
|
|
* ARM translation
|
2007-09-17 05:08:06 +08:00
|
|
|
*
|
2003-10-01 04:34:21 +08:00
|
|
|
* Copyright (c) 2003 Fabrice Bellard
|
2007-11-11 08:04:49 +08:00
|
|
|
* Copyright (c) 2005-2007 CodeSourcery
|
2007-04-30 10:02:17 +08:00
|
|
|
* Copyright (c) 2007 OpenedHand, Ltd.
|
2003-10-01 04:34:21 +08:00
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
2020-10-23 20:29:13 +08:00
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
2003-10-01 04:34:21 +08:00
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
2009-07-17 04:47:01 +08:00
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
2003-10-01 04:34:21 +08:00
|
|
|
*/
|
2015-12-08 00:23:44 +08:00
|
|
|
#include "qemu/osdep.h"
|
2003-10-01 04:34:21 +08:00
|
|
|
|
|
|
|
#include "cpu.h"
|
2014-04-16 02:18:37 +08:00
|
|
|
#include "internals.h"
|
2012-10-24 17:12:21 +08:00
|
|
|
#include "disas/disas.h"
|
2016-03-15 20:18:37 +08:00
|
|
|
#include "exec/exec-all.h"
|
2020-01-01 19:23:00 +08:00
|
|
|
#include "tcg/tcg-op.h"
|
|
|
|
#include "tcg/tcg-op-gvec.h"
|
2012-12-18 01:20:00 +08:00
|
|
|
#include "qemu/log.h"
|
2013-09-11 02:09:32 +08:00
|
|
|
#include "qemu/bitops.h"
|
2014-03-29 02:09:49 +08:00
|
|
|
#include "arm_ldst.h"
|
2019-05-13 21:43:57 +08:00
|
|
|
#include "hw/semihosting/semihost.h"
|
2008-03-31 11:45:50 +08:00
|
|
|
|
2014-04-08 13:31:41 +08:00
|
|
|
#include "exec/helper-proto.h"
|
|
|
|
#include "exec/helper-gen.h"
|
2003-10-01 04:34:21 +08:00
|
|
|
|
2014-05-30 20:12:25 +08:00
|
|
|
#include "trace-tcg.h"
|
2016-01-07 21:55:28 +08:00
|
|
|
#include "exec/log.h"
|
2014-05-30 20:12:25 +08:00
|
|
|
|
|
|
|
|
2014-10-29 03:24:00 +08:00
|
|
|
#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
|
|
|
|
#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
|
arm: basic support for ARMv4/ARMv4T emulation
Currently target-arm/ assumes at least ARMv5 core. Add support for
handling also ARMv4/ARMv4T. This changes the following instructions:
BX(v4T and later)
BKPT, BLX, CDP2, CLZ, LDC2, LDRD, MCRR, MCRR2, MRRC, MCRR, MRC2, MRRC,
MRRC2, PLD QADD, QDADD, QDSUB, QSUB, STRD, SMLAxy, SMLALxy, SMLAWxy,
SMULxy, SMULWxy, STC2 (v5 and later)
All instructions that are "v5TE and later" are also bound to just v5, as
that's how it was before.
This patch doesn _not_ include disabling of cp15 access and base-updated
data abort model (that will be required to emulate chips based on a
ARM7TDMI), because:
* no ARM7TDMI chips are currently emulated (or planned)
* those features aren't strictly necessary for my purposes (SA-1 core
emulation).
All v5 models are handled as they are v5T. Internally we still have a
check if the model is a v5(T) or v5TE, but as all emulated cores are
v5TE, those two cases are simply aliased (for now).
Patch is heavily based on patch by Filip Navara <filip.navara@gmail.com>
which in turn is based on work by Ulrich Hecht <uli@suse.de> and Vincent
Sanders <vince@kyllikki.org>.
Signed-off-by: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2011-04-04 21:38:44 +08:00
|
|
|
/* currently all emulated v5 cores are also v5TE, so don't bother */
|
2014-10-29 03:24:00 +08:00
|
|
|
#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
|
2020-02-15 01:50:56 +08:00
|
|
|
#define ENABLE_ARCH_5J dc_isar_feature(aa32_jazelle, s)
|
2014-10-29 03:24:00 +08:00
|
|
|
#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
|
|
|
|
#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
|
|
|
|
#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
|
|
|
|
#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
|
|
|
|
#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
|
2005-11-26 18:38:39 +08:00
|
|
|
|
2013-09-04 03:12:03 +08:00
|
|
|
#include "translate.h"
|
2011-01-15 03:39:19 +08:00
|
|
|
|
2005-11-26 18:38:39 +08:00
|
|
|
#if defined(CONFIG_USER_ONLY)
|
|
|
|
#define IS_USER(s) 1
|
|
|
|
#else
|
|
|
|
#define IS_USER(s) (s->user)
|
|
|
|
#endif
|
|
|
|
|
2020-08-03 21:28:15 +08:00
|
|
|
/* These are TCG temporaries used only by the legacy iwMMXt decoder */
|
2008-11-17 22:43:54 +08:00
|
|
|
static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
|
2020-08-03 21:28:15 +08:00
|
|
|
/* These are TCG globals which alias CPUARMState fields */
|
2009-10-15 18:00:41 +08:00
|
|
|
static TCGv_i32 cpu_R[16];
|
2015-09-14 21:39:47 +08:00
|
|
|
TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
|
|
|
|
TCGv_i64 cpu_exclusive_addr;
|
|
|
|
TCGv_i64 cpu_exclusive_val;
|
2008-03-31 11:48:30 +08:00
|
|
|
|
2012-12-18 01:19:49 +08:00
|
|
|
#include "exec/gen-icount.h"
|
2008-06-29 09:03:05 +08:00
|
|
|
|
2018-10-24 14:50:18 +08:00
|
|
|
static const char * const regnames[] =
|
2009-10-15 18:00:41 +08:00
|
|
|
{ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
|
|
|
|
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
|
|
|
|
|
2018-03-02 18:45:42 +08:00
|
|
|
/* Function prototypes for gen_ functions calling Neon helpers. */
|
|
|
|
typedef void NeonGenThreeOpEnvFn(TCGv_i32, TCGv_env, TCGv_i32,
|
|
|
|
TCGv_i32, TCGv_i32);
|
2019-06-14 00:39:14 +08:00
|
|
|
/* Function prototypes for gen_ functions for fix point conversions */
|
|
|
|
typedef void VFPGenFixPointFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
|
2018-03-02 18:45:42 +08:00
|
|
|
|
2008-03-31 11:44:26 +08:00
|
|
|
/* initialize TCG globals. */
|
|
|
|
void arm_translate_init(void)
|
|
|
|
{
|
2009-10-15 18:00:41 +08:00
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < 16; i++) {
|
2013-09-19 03:53:09 +08:00
|
|
|
cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
|
2012-03-14 08:38:21 +08:00
|
|
|
offsetof(CPUARMState, regs[i]),
|
2009-10-15 18:00:41 +08:00
|
|
|
regnames[i]);
|
|
|
|
}
|
2013-09-19 03:53:09 +08:00
|
|
|
cpu_CF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, CF), "CF");
|
|
|
|
cpu_NF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, NF), "NF");
|
|
|
|
cpu_VF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, VF), "VF");
|
|
|
|
cpu_ZF = tcg_global_mem_new_i32(cpu_env, offsetof(CPUARMState, ZF), "ZF");
|
2012-10-05 22:04:44 +08:00
|
|
|
|
2013-09-19 03:53:09 +08:00
|
|
|
cpu_exclusive_addr = tcg_global_mem_new_i64(cpu_env,
|
2012-03-14 08:38:21 +08:00
|
|
|
offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
|
2013-09-19 03:53:09 +08:00
|
|
|
cpu_exclusive_val = tcg_global_mem_new_i64(cpu_env,
|
2012-03-14 08:38:21 +08:00
|
|
|
offsetof(CPUARMState, exclusive_val), "exclusive_val");
|
2009-10-15 18:00:41 +08:00
|
|
|
|
2013-09-04 03:12:10 +08:00
|
|
|
a64_translate_init();
|
2008-03-31 11:44:26 +08:00
|
|
|
}
|
|
|
|
|
2020-11-20 05:55:53 +08:00
|
|
|
/* Generate a label used for skipping this instruction */
|
|
|
|
static void arm_gen_condlabel(DisasContext *s)
|
|
|
|
{
|
|
|
|
if (!s->condjmp) {
|
|
|
|
s->condlabel = gen_new_label();
|
|
|
|
s->condjmp = 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-20 05:55:57 +08:00
|
|
|
/*
|
|
|
|
* Constant expanders for the decoders.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static int negate(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return -x;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int plus_2(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return x + 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int times_2(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return x * 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int times_4(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return x * 4;
|
|
|
|
}
|
|
|
|
|
2017-02-08 02:30:00 +08:00
|
|
|
/* Flags for the disas_set_da_iss info argument:
|
|
|
|
* lower bits hold the Rt register number, higher bits are flags.
|
|
|
|
*/
|
|
|
|
typedef enum ISSInfo {
|
|
|
|
ISSNone = 0,
|
|
|
|
ISSRegMask = 0x1f,
|
|
|
|
ISSInvalid = (1 << 5),
|
|
|
|
ISSIsAcqRel = (1 << 6),
|
|
|
|
ISSIsWrite = (1 << 7),
|
|
|
|
ISSIs16Bit = (1 << 8),
|
|
|
|
} ISSInfo;
|
|
|
|
|
|
|
|
/* Save the syndrome information for a Data Abort */
|
2019-08-24 02:10:58 +08:00
|
|
|
static void disas_set_da_iss(DisasContext *s, MemOp memop, ISSInfo issinfo)
|
2017-02-08 02:30:00 +08:00
|
|
|
{
|
|
|
|
uint32_t syn;
|
|
|
|
int sas = memop & MO_SIZE;
|
|
|
|
bool sse = memop & MO_SIGN;
|
|
|
|
bool is_acqrel = issinfo & ISSIsAcqRel;
|
|
|
|
bool is_write = issinfo & ISSIsWrite;
|
|
|
|
bool is_16bit = issinfo & ISSIs16Bit;
|
|
|
|
int srt = issinfo & ISSRegMask;
|
|
|
|
|
|
|
|
if (issinfo & ISSInvalid) {
|
|
|
|
/* Some callsites want to conditionally provide ISS info,
|
|
|
|
* eg "only if this was not a writeback"
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (srt == 15) {
|
|
|
|
/* For AArch32, insns where the src/dest is R15 never generate
|
|
|
|
* ISS information. Catching that here saves checking at all
|
|
|
|
* the call sites.
|
|
|
|
*/
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
syn = syn_data_abort_with_iss(0, sas, sse, srt, 0, is_acqrel,
|
|
|
|
0, 0, 0, is_write, 0, is_16bit);
|
|
|
|
disas_set_insn_syndrome(s, syn);
|
|
|
|
}
|
|
|
|
|
2017-06-02 18:51:47 +08:00
|
|
|
static inline int get_a32_user_mem_index(DisasContext *s)
|
2015-02-05 21:37:23 +08:00
|
|
|
{
|
2017-06-02 18:51:47 +08:00
|
|
|
/* Return the core mmu_idx to use for A32/T32 "unprivileged load/store"
|
2015-02-05 21:37:23 +08:00
|
|
|
* insns:
|
|
|
|
* if PL2, UNPREDICTABLE (we choose to implement as if PL0)
|
|
|
|
* otherwise, access as if at PL0.
|
|
|
|
*/
|
|
|
|
switch (s->mmu_idx) {
|
2020-02-07 22:04:23 +08:00
|
|
|
case ARMMMUIdx_E2: /* this one is UNPREDICTABLE */
|
target/arm: Rename ARMMMUIdx*_S12NSE* to ARMMMUIdx*_E10_*
This is part of a reorganization to the set of mmu_idx.
This emphasizes that they apply to the EL1&0 regime.
The ultimate goal is
-- Non-secure regimes:
ARMMMUIdx_E10_0,
ARMMMUIdx_E20_0,
ARMMMUIdx_E10_1,
ARMMMUIdx_E2,
ARMMMUIdx_E20_2,
-- Secure regimes:
ARMMMUIdx_SE10_0,
ARMMMUIdx_SE10_1,
ARMMMUIdx_SE3,
-- Helper mmu_idx for non-secure EL1&0 stage1 and stage2
ARMMMUIdx_Stage2,
ARMMMUIdx_Stage1_E0,
ARMMMUIdx_Stage1_E1,
The 'S' prefix is reserved for "Secure". Unless otherwise specified,
each mmu_idx represents all stages of translation.
Tested-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20200206105448.4726-10-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2020-02-07 22:04:22 +08:00
|
|
|
case ARMMMUIdx_E10_0:
|
|
|
|
case ARMMMUIdx_E10_1:
|
2020-02-08 20:57:58 +08:00
|
|
|
case ARMMMUIdx_E10_1_PAN:
|
target/arm: Rename ARMMMUIdx*_S12NSE* to ARMMMUIdx*_E10_*
This is part of a reorganization to the set of mmu_idx.
This emphasizes that they apply to the EL1&0 regime.
The ultimate goal is
-- Non-secure regimes:
ARMMMUIdx_E10_0,
ARMMMUIdx_E20_0,
ARMMMUIdx_E10_1,
ARMMMUIdx_E2,
ARMMMUIdx_E20_2,
-- Secure regimes:
ARMMMUIdx_SE10_0,
ARMMMUIdx_SE10_1,
ARMMMUIdx_SE3,
-- Helper mmu_idx for non-secure EL1&0 stage1 and stage2
ARMMMUIdx_Stage2,
ARMMMUIdx_Stage1_E0,
ARMMMUIdx_Stage1_E1,
The 'S' prefix is reserved for "Secure". Unless otherwise specified,
each mmu_idx represents all stages of translation.
Tested-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20200206105448.4726-10-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
2020-02-07 22:04:22 +08:00
|
|
|
return arm_to_core_mmu_idx(ARMMMUIdx_E10_0);
|
2020-02-07 22:04:23 +08:00
|
|
|
case ARMMMUIdx_SE3:
|
2020-02-07 22:04:23 +08:00
|
|
|
case ARMMMUIdx_SE10_0:
|
|
|
|
case ARMMMUIdx_SE10_1:
|
2020-02-08 20:57:58 +08:00
|
|
|
case ARMMMUIdx_SE10_1_PAN:
|
2020-02-07 22:04:23 +08:00
|
|
|
return arm_to_core_mmu_idx(ARMMMUIdx_SE10_0);
|
2017-06-02 18:51:47 +08:00
|
|
|
case ARMMMUIdx_MUser:
|
|
|
|
case ARMMMUIdx_MPriv:
|
|
|
|
return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
|
2017-12-14 01:59:23 +08:00
|
|
|
case ARMMMUIdx_MUserNegPri:
|
|
|
|
case ARMMMUIdx_MPrivNegPri:
|
|
|
|
return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
|
2017-10-09 21:48:31 +08:00
|
|
|
case ARMMMUIdx_MSUser:
|
|
|
|
case ARMMMUIdx_MSPriv:
|
|
|
|
return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
|
2017-12-14 01:59:23 +08:00
|
|
|
case ARMMMUIdx_MSUserNegPri:
|
|
|
|
case ARMMMUIdx_MSPrivNegPri:
|
|
|
|
return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
|
2015-02-05 21:37:23 +08:00
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-23 19:59:55 +08:00
|
|
|
static inline TCGv_i32 load_cpu_offset(int offset)
|
2008-03-31 11:46:50 +08:00
|
|
|
{
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
2008-03-31 11:46:50 +08:00
|
|
|
tcg_gen_ld_i32(tmp, cpu_env, offset);
|
|
|
|
return tmp;
|
|
|
|
}
|
|
|
|
|
2012-03-14 08:38:21 +08:00
|
|
|
#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
|
2008-03-31 11:46:50 +08:00
|
|
|
|
2013-05-23 19:59:55 +08:00
|
|
|
static inline void store_cpu_offset(TCGv_i32 var, int offset)
|
2008-03-31 11:46:50 +08:00
|
|
|
{
|
|
|
|
tcg_gen_st_i32(var, cpu_env, offset);
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(var);
|
2008-03-31 11:46:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#define store_cpu_field(var, name) \
|
2012-03-14 08:38:21 +08:00
|
|
|
store_cpu_offset(var, offsetof(CPUARMState, name))
|
2008-03-31 11:46:50 +08:00
|
|
|
|
2019-08-15 16:46:43 +08:00
|
|
|
/* The architectural value of PC. */
|
|
|
|
static uint32_t read_pc(DisasContext *s)
|
|
|
|
{
|
|
|
|
return s->pc_curr + (s->thumb ? 4 : 8);
|
|
|
|
}
|
|
|
|
|
2008-03-31 11:44:26 +08:00
|
|
|
/* Set a variable to the value of a CPU register. */
|
2013-05-23 19:59:55 +08:00
|
|
|
static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
|
2008-03-31 11:44:26 +08:00
|
|
|
{
|
|
|
|
if (reg == 15) {
|
2019-08-15 16:46:43 +08:00
|
|
|
tcg_gen_movi_i32(var, read_pc(s));
|
2008-03-31 11:44:26 +08:00
|
|
|
} else {
|
2009-10-15 18:00:41 +08:00
|
|
|
tcg_gen_mov_i32(var, cpu_R[reg]);
|
2008-03-31 11:44:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Create a new temporary and set it to the value of a CPU register. */
|
2013-05-23 19:59:55 +08:00
|
|
|
static inline TCGv_i32 load_reg(DisasContext *s, int reg)
|
2008-03-31 11:44:26 +08:00
|
|
|
{
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
2008-03-31 11:44:26 +08:00
|
|
|
load_reg_var(s, tmp, reg);
|
|
|
|
return tmp;
|
|
|
|
}
|
|
|
|
|
2019-08-15 16:46:43 +08:00
|
|
|
/*
|
|
|
|
* Create a new temp, REG + OFS, except PC is ALIGN(PC, 4).
|
|
|
|
* This is used for load/store for which use of PC implies (literal),
|
|
|
|
* or ADD that implies ADR.
|
|
|
|
*/
|
|
|
|
static TCGv_i32 add_reg_for_lit(DisasContext *s, int reg, int ofs)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
if (reg == 15) {
|
|
|
|
tcg_gen_movi_i32(tmp, (read_pc(s) & ~3) + ofs);
|
|
|
|
} else {
|
|
|
|
tcg_gen_addi_i32(tmp, cpu_R[reg], ofs);
|
|
|
|
}
|
|
|
|
return tmp;
|
|
|
|
}
|
|
|
|
|
2008-03-31 11:44:26 +08:00
|
|
|
/* Set a CPU register. The source must be a temporary and will be
|
|
|
|
marked as dead. */
|
2013-05-23 19:59:55 +08:00
|
|
|
static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
|
2008-03-31 11:44:26 +08:00
|
|
|
{
|
|
|
|
if (reg == 15) {
|
2016-10-04 20:28:10 +08:00
|
|
|
/* In Thumb mode, we must ignore bit 0.
|
|
|
|
* In ARM mode, for ARMv4 and ARMv5, it is UNPREDICTABLE if bits [1:0]
|
|
|
|
* are not 0b00, but for ARMv6 and above, we must ignore bits [1:0].
|
|
|
|
* We choose to ignore [1:0] in ARM mode for all architecture versions.
|
|
|
|
*/
|
|
|
|
tcg_gen_andi_i32(var, var, s->thumb ? ~1 : ~3);
|
2017-07-14 17:01:59 +08:00
|
|
|
s->base.is_jmp = DISAS_JUMP;
|
2008-03-31 11:44:26 +08:00
|
|
|
}
|
2009-10-15 18:00:41 +08:00
|
|
|
tcg_gen_mov_i32(cpu_R[reg], var);
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(var);
|
2008-03-31 11:44:26 +08:00
|
|
|
}
|
|
|
|
|
2018-10-08 21:55:04 +08:00
|
|
|
/*
|
|
|
|
* Variant of store_reg which applies v8M stack-limit checks before updating
|
|
|
|
* SP. If the check fails this will result in an exception being taken.
|
|
|
|
* We disable the stack checks for CONFIG_USER_ONLY because we have
|
|
|
|
* no idea what the stack limits should be in that case.
|
|
|
|
* If stack checking is not being done this just acts like store_reg().
|
|
|
|
*/
|
|
|
|
static void store_sp_checked(DisasContext *s, TCGv_i32 var)
|
|
|
|
{
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
if (s->v8m_stackcheck) {
|
|
|
|
gen_helper_v8m_stackcheck(cpu_env, var);
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
store_reg(s, 13, var);
|
|
|
|
}
|
|
|
|
|
2008-03-31 11:44:26 +08:00
|
|
|
/* Value extensions. */
|
2008-05-11 20:22:01 +08:00
|
|
|
#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
|
|
|
|
#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
|
2008-03-31 11:44:26 +08:00
|
|
|
#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
|
|
|
|
#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
|
|
|
|
|
2008-03-31 11:45:50 +08:00
|
|
|
#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
|
|
|
|
#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
|
2008-03-31 11:46:03 +08:00
|
|
|
|
2008-03-31 11:44:26 +08:00
|
|
|
|
2013-05-23 19:59:55 +08:00
|
|
|
static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
|
2009-10-22 20:17:36 +08:00
|
|
|
{
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp_mask = tcg_const_i32(mask);
|
2012-09-05 04:08:34 +08:00
|
|
|
gen_helper_cpsr_write(cpu_env, var, tmp_mask);
|
2009-10-22 20:17:36 +08:00
|
|
|
tcg_temp_free_i32(tmp_mask);
|
|
|
|
}
|
2008-03-31 11:46:50 +08:00
|
|
|
/* Set NZCV flags from the high 4 bits of var. */
|
|
|
|
#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
|
|
|
|
|
2014-04-16 02:18:38 +08:00
|
|
|
static void gen_exception_internal(int excp)
|
2008-03-31 11:46:50 +08:00
|
|
|
{
|
2014-04-16 02:18:38 +08:00
|
|
|
TCGv_i32 tcg_excp = tcg_const_i32(excp);
|
|
|
|
|
|
|
|
assert(excp_is_internal(excp));
|
|
|
|
gen_helper_exception_internal(cpu_env, tcg_excp);
|
|
|
|
tcg_temp_free_i32(tcg_excp);
|
|
|
|
}
|
|
|
|
|
2014-08-20 01:56:27 +08:00
|
|
|
static void gen_step_complete_exception(DisasContext *s)
|
|
|
|
{
|
|
|
|
/* We just completed step of an insn. Move from Active-not-pending
|
|
|
|
* to Active-pending, and then also take the swstep exception.
|
|
|
|
* This corresponds to making the (IMPDEF) choice to prioritize
|
|
|
|
* swstep exceptions over asynchronous exceptions taken to an exception
|
|
|
|
* level where debug is disabled. This choice has the advantage that
|
|
|
|
* we do not need to maintain internal state corresponding to the
|
|
|
|
* ISV/EX syndrome bits between completion of the step and generation
|
|
|
|
* of the exception, and our syndrome information is always correct.
|
|
|
|
*/
|
|
|
|
gen_ss_advance(s);
|
2019-08-15 16:46:42 +08:00
|
|
|
gen_swstep_exception(s, 1, s->is_ldex);
|
2017-07-14 17:01:59 +08:00
|
|
|
s->base.is_jmp = DISAS_NORETURN;
|
2014-08-20 01:56:27 +08:00
|
|
|
}
|
|
|
|
|
2017-04-21 00:32:30 +08:00
|
|
|
static void gen_singlestep_exception(DisasContext *s)
|
|
|
|
{
|
|
|
|
/* Generate the right kind of exception for singlestep, which is
|
|
|
|
* either the architectural singlestep or EXCP_DEBUG for QEMU's
|
|
|
|
* gdb singlestepping.
|
|
|
|
*/
|
|
|
|
if (s->ss_active) {
|
|
|
|
gen_step_complete_exception(s);
|
|
|
|
} else {
|
|
|
|
gen_exception_internal(EXCP_DEBUG);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-21 00:32:30 +08:00
|
|
|
static inline bool is_singlestepping(DisasContext *s)
|
|
|
|
{
|
|
|
|
/* Return true if we are singlestepping either because of
|
|
|
|
* architectural singlestep or QEMU gdbstub singlestep. This does
|
|
|
|
* not include the command line '-singlestep' mode which is rather
|
|
|
|
* misnamed as it only means "one instruction per TB" and doesn't
|
|
|
|
* affect the code we generate.
|
|
|
|
*/
|
2017-07-14 17:01:59 +08:00
|
|
|
return s->base.singlestep_enabled || s->ss_active;
|
2017-04-21 00:32:30 +08:00
|
|
|
}
|
|
|
|
|
2013-05-23 19:59:55 +08:00
|
|
|
static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
|
2008-03-31 11:46:19 +08:00
|
|
|
{
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp1 = tcg_temp_new_i32();
|
|
|
|
TCGv_i32 tmp2 = tcg_temp_new_i32();
|
Fix smlald, smlsld, pkhtp, pkhbt, ssat, usat, umul, smul... (Laurent Desnogues).
helper.c
- copy reference c0_c2 to runtime c0_c2 and not c0_c1
op_helper.c
- remove old code (PARAM1, probably some left over from old dyngen)
that broke do_[us]sat
translate.c
- gen_smul_dual should sign-extend from 16 bit to 32 bit and not from
8 to 32
- disas_arm_insn:
* smlalxy: that was completely wrong; now the addition is
performed as for smlald
* pkhtb: optional ASR not taken into account (similar
* to [us]sat)
* pkhtb/pkhbt: tmp2 is dead
* smlald, smlsld, smuad, smusd, smlad, smlsd: rd
* and rn swapped
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4898 c046a42c-6fe2-441c-8c8c-71466251a162
2008-07-19 18:12:22 +08:00
|
|
|
tcg_gen_ext16s_i32(tmp1, a);
|
|
|
|
tcg_gen_ext16s_i32(tmp2, b);
|
2008-03-31 11:46:19 +08:00
|
|
|
tcg_gen_mul_i32(tmp1, tmp1, tmp2);
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp2);
|
2008-03-31 11:46:19 +08:00
|
|
|
tcg_gen_sari_i32(a, a, 16);
|
|
|
|
tcg_gen_sari_i32(b, b, 16);
|
|
|
|
tcg_gen_mul_i32(b, b, a);
|
|
|
|
tcg_gen_mov_i32(a, tmp1);
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp1);
|
2008-03-31 11:46:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Byteswap each halfword. */
|
2019-09-05 03:30:15 +08:00
|
|
|
static void gen_rev16(TCGv_i32 dest, TCGv_i32 var)
|
2008-03-31 11:46:19 +08:00
|
|
|
{
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
2017-05-17 07:01:56 +08:00
|
|
|
TCGv_i32 mask = tcg_const_i32(0x00ff00ff);
|
2008-03-31 11:46:19 +08:00
|
|
|
tcg_gen_shri_i32(tmp, var, 8);
|
2017-05-17 07:01:56 +08:00
|
|
|
tcg_gen_and_i32(tmp, tmp, mask);
|
|
|
|
tcg_gen_and_i32(var, var, mask);
|
2008-03-31 11:46:19 +08:00
|
|
|
tcg_gen_shli_i32(var, var, 8);
|
2019-09-05 03:30:15 +08:00
|
|
|
tcg_gen_or_i32(dest, var, tmp);
|
2017-05-17 07:01:56 +08:00
|
|
|
tcg_temp_free_i32(mask);
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2008-03-31 11:46:19 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Byteswap low halfword and sign extend. */
|
2019-09-05 03:30:15 +08:00
|
|
|
static void gen_revsh(TCGv_i32 dest, TCGv_i32 var)
|
2008-03-31 11:46:19 +08:00
|
|
|
{
|
2010-12-28 02:54:49 +08:00
|
|
|
tcg_gen_ext16u_i32(var, var);
|
|
|
|
tcg_gen_bswap16_i32(var, var);
|
2019-09-05 03:30:15 +08:00
|
|
|
tcg_gen_ext16s_i32(dest, var);
|
2008-03-31 11:46:19 +08:00
|
|
|
}
|
|
|
|
|
2008-03-31 11:46:03 +08:00
|
|
|
/* Swap low and high halfwords. */
|
2020-06-17 01:08:34 +08:00
|
|
|
static void gen_swap_half(TCGv_i32 dest, TCGv_i32 var)
|
2008-03-31 11:46:03 +08:00
|
|
|
{
|
2020-06-17 01:08:34 +08:00
|
|
|
tcg_gen_rotri_i32(dest, var, 16);
|
2008-03-31 11:46:03 +08:00
|
|
|
}
|
|
|
|
|
2008-03-31 11:44:26 +08:00
|
|
|
/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
|
|
|
|
tmp = (t0 ^ t1) & 0x8000;
|
|
|
|
t0 &= ~0x8000;
|
|
|
|
t1 &= ~0x8000;
|
|
|
|
t0 = (t0 + t1) ^ tmp;
|
|
|
|
*/
|
|
|
|
|
2019-09-05 03:30:15 +08:00
|
|
|
static void gen_add16(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
|
2008-03-31 11:44:26 +08:00
|
|
|
{
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
2008-03-31 11:44:26 +08:00
|
|
|
tcg_gen_xor_i32(tmp, t0, t1);
|
|
|
|
tcg_gen_andi_i32(tmp, tmp, 0x8000);
|
|
|
|
tcg_gen_andi_i32(t0, t0, ~0x8000);
|
|
|
|
tcg_gen_andi_i32(t1, t1, ~0x8000);
|
|
|
|
tcg_gen_add_i32(t0, t0, t1);
|
2019-09-05 03:30:15 +08:00
|
|
|
tcg_gen_xor_i32(dest, t0, tmp);
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2008-03-31 11:44:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Set N and Z flags from var. */
|
2013-05-23 19:59:55 +08:00
|
|
|
static inline void gen_logic_CC(TCGv_i32 var)
|
2008-03-31 11:44:26 +08:00
|
|
|
{
|
2012-10-05 22:04:44 +08:00
|
|
|
tcg_gen_mov_i32(cpu_NF, var);
|
|
|
|
tcg_gen_mov_i32(cpu_ZF, var);
|
2008-03-31 11:44:26 +08:00
|
|
|
}
|
|
|
|
|
2009-05-06 14:15:38 +08:00
|
|
|
/* dest = T0 + T1 + CF. */
|
2013-05-23 19:59:55 +08:00
|
|
|
static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
|
2009-05-06 14:15:38 +08:00
|
|
|
{
|
|
|
|
tcg_gen_add_i32(dest, t0, t1);
|
2012-10-05 22:04:44 +08:00
|
|
|
tcg_gen_add_i32(dest, dest, cpu_CF);
|
2009-05-06 14:15:38 +08:00
|
|
|
}
|
|
|
|
|
2008-03-31 11:46:19 +08:00
|
|
|
/* dest = T0 - T1 + CF - 1. */
|
2013-05-23 19:59:55 +08:00
|
|
|
static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
|
2008-03-31 11:46:19 +08:00
|
|
|
{
|
|
|
|
tcg_gen_sub_i32(dest, t0, t1);
|
2012-10-05 22:04:44 +08:00
|
|
|
tcg_gen_add_i32(dest, dest, cpu_CF);
|
2008-03-31 11:46:19 +08:00
|
|
|
tcg_gen_subi_i32(dest, dest, 1);
|
|
|
|
}
|
|
|
|
|
2012-10-05 22:04:44 +08:00
|
|
|
/* dest = T0 + T1. Compute C, N, V and Z flags */
|
2013-05-23 19:59:55 +08:00
|
|
|
static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
|
2012-10-05 22:04:44 +08:00
|
|
|
{
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
2013-02-20 15:52:07 +08:00
|
|
|
tcg_gen_movi_i32(tmp, 0);
|
|
|
|
tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
|
2012-10-05 22:04:44 +08:00
|
|
|
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
|
|
|
|
tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
|
|
|
|
tcg_gen_xor_i32(tmp, t0, t1);
|
|
|
|
tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
tcg_gen_mov_i32(dest, cpu_NF);
|
|
|
|
}
|
|
|
|
|
2013-02-20 15:52:08 +08:00
|
|
|
/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
|
2013-05-23 19:59:55 +08:00
|
|
|
static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
|
2013-02-20 15:52:08 +08:00
|
|
|
{
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
2013-02-20 15:52:08 +08:00
|
|
|
if (TCG_TARGET_HAS_add2_i32) {
|
|
|
|
tcg_gen_movi_i32(tmp, 0);
|
|
|
|
tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
|
2013-02-26 03:41:38 +08:00
|
|
|
tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
|
2013-02-20 15:52:08 +08:00
|
|
|
} else {
|
|
|
|
TCGv_i64 q0 = tcg_temp_new_i64();
|
|
|
|
TCGv_i64 q1 = tcg_temp_new_i64();
|
|
|
|
tcg_gen_extu_i32_i64(q0, t0);
|
|
|
|
tcg_gen_extu_i32_i64(q1, t1);
|
|
|
|
tcg_gen_add_i64(q0, q0, q1);
|
|
|
|
tcg_gen_extu_i32_i64(q1, cpu_CF);
|
|
|
|
tcg_gen_add_i64(q0, q0, q1);
|
|
|
|
tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
|
|
|
|
tcg_temp_free_i64(q0);
|
|
|
|
tcg_temp_free_i64(q1);
|
|
|
|
}
|
|
|
|
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
|
|
|
|
tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
|
|
|
|
tcg_gen_xor_i32(tmp, t0, t1);
|
|
|
|
tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
tcg_gen_mov_i32(dest, cpu_NF);
|
|
|
|
}
|
|
|
|
|
2012-10-05 22:04:44 +08:00
|
|
|
/* dest = T0 - T1. Compute C, N, V and Z flags */
|
2013-05-23 19:59:55 +08:00
|
|
|
static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
|
2012-10-05 22:04:44 +08:00
|
|
|
{
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp;
|
2012-10-05 22:04:44 +08:00
|
|
|
tcg_gen_sub_i32(cpu_NF, t0, t1);
|
|
|
|
tcg_gen_mov_i32(cpu_ZF, cpu_NF);
|
|
|
|
tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
|
|
|
|
tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
tcg_gen_xor_i32(tmp, t0, t1);
|
|
|
|
tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
tcg_gen_mov_i32(dest, cpu_NF);
|
|
|
|
}
|
|
|
|
|
2013-02-26 03:41:39 +08:00
|
|
|
/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
|
2013-05-23 19:59:55 +08:00
|
|
|
static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
|
2013-02-20 15:52:09 +08:00
|
|
|
{
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
2013-02-26 03:41:39 +08:00
|
|
|
tcg_gen_not_i32(tmp, t1);
|
|
|
|
gen_adc_CC(dest, t0, tmp);
|
2013-05-23 19:59:55 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2013-02-20 15:52:09 +08:00
|
|
|
}
|
|
|
|
|
2012-10-05 22:04:44 +08:00
|
|
|
#define GEN_SHIFT(name) \
|
2013-05-23 19:59:55 +08:00
|
|
|
static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
|
2012-10-05 22:04:44 +08:00
|
|
|
{ \
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp1, tmp2, tmp3; \
|
2012-10-05 22:04:44 +08:00
|
|
|
tmp1 = tcg_temp_new_i32(); \
|
|
|
|
tcg_gen_andi_i32(tmp1, t1, 0xff); \
|
|
|
|
tmp2 = tcg_const_i32(0); \
|
|
|
|
tmp3 = tcg_const_i32(0x1f); \
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
|
|
|
|
tcg_temp_free_i32(tmp3); \
|
|
|
|
tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
|
|
|
|
tcg_gen_##name##_i32(dest, tmp2, tmp1); \
|
|
|
|
tcg_temp_free_i32(tmp2); \
|
|
|
|
tcg_temp_free_i32(tmp1); \
|
|
|
|
}
|
|
|
|
GEN_SHIFT(shl)
|
|
|
|
GEN_SHIFT(shr)
|
|
|
|
#undef GEN_SHIFT
|
|
|
|
|
2013-05-23 19:59:55 +08:00
|
|
|
static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
|
2012-10-05 22:04:44 +08:00
|
|
|
{
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp1, tmp2;
|
2012-10-05 22:04:44 +08:00
|
|
|
tmp1 = tcg_temp_new_i32();
|
|
|
|
tcg_gen_andi_i32(tmp1, t1, 0xff);
|
|
|
|
tmp2 = tcg_const_i32(0x1f);
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
|
|
|
|
tcg_temp_free_i32(tmp2);
|
|
|
|
tcg_gen_sar_i32(dest, t0, tmp1);
|
|
|
|
tcg_temp_free_i32(tmp1);
|
|
|
|
}
|
|
|
|
|
2013-05-23 19:59:55 +08:00
|
|
|
static void shifter_out_im(TCGv_i32 var, int shift)
|
2008-03-31 11:44:26 +08:00
|
|
|
{
|
2019-08-09 04:26:10 +08:00
|
|
|
tcg_gen_extract_i32(cpu_CF, var, shift, 1);
|
2008-03-31 11:45:35 +08:00
|
|
|
}
|
2008-03-31 11:44:26 +08:00
|
|
|
|
2008-03-31 11:45:35 +08:00
|
|
|
/* Shift by immediate. Includes special handling for shift == 0. */
|
2013-05-23 19:59:55 +08:00
|
|
|
static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
|
|
|
|
int shift, int flags)
|
2008-03-31 11:45:35 +08:00
|
|
|
{
|
|
|
|
switch (shiftop) {
|
|
|
|
case 0: /* LSL */
|
|
|
|
if (shift != 0) {
|
|
|
|
if (flags)
|
|
|
|
shifter_out_im(var, 32 - shift);
|
|
|
|
tcg_gen_shli_i32(var, var, shift);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 1: /* LSR */
|
|
|
|
if (shift == 0) {
|
|
|
|
if (flags) {
|
2012-10-05 22:04:44 +08:00
|
|
|
tcg_gen_shri_i32(cpu_CF, var, 31);
|
2008-03-31 11:45:35 +08:00
|
|
|
}
|
|
|
|
tcg_gen_movi_i32(var, 0);
|
|
|
|
} else {
|
|
|
|
if (flags)
|
|
|
|
shifter_out_im(var, shift - 1);
|
|
|
|
tcg_gen_shri_i32(var, var, shift);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 2: /* ASR */
|
|
|
|
if (shift == 0)
|
|
|
|
shift = 32;
|
|
|
|
if (flags)
|
|
|
|
shifter_out_im(var, shift - 1);
|
|
|
|
if (shift == 32)
|
|
|
|
shift = 31;
|
|
|
|
tcg_gen_sari_i32(var, var, shift);
|
|
|
|
break;
|
|
|
|
case 3: /* ROR/RRX */
|
|
|
|
if (shift != 0) {
|
|
|
|
if (flags)
|
|
|
|
shifter_out_im(var, shift - 1);
|
2009-10-15 22:45:14 +08:00
|
|
|
tcg_gen_rotri_i32(var, var, shift); break;
|
2008-03-31 11:45:35 +08:00
|
|
|
} else {
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
2012-10-16 17:15:50 +08:00
|
|
|
tcg_gen_shli_i32(tmp, cpu_CF, 31);
|
2008-03-31 11:45:35 +08:00
|
|
|
if (flags)
|
|
|
|
shifter_out_im(var, 0);
|
|
|
|
tcg_gen_shri_i32(var, var, 1);
|
2008-03-31 11:44:26 +08:00
|
|
|
tcg_gen_or_i32(var, var, tmp);
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2008-03-31 11:44:26 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2013-05-23 19:59:55 +08:00
|
|
|
static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
|
|
|
|
TCGv_i32 shift, int flags)
|
2008-03-31 11:47:48 +08:00
|
|
|
{
|
|
|
|
if (flags) {
|
|
|
|
switch (shiftop) {
|
2012-09-05 04:19:15 +08:00
|
|
|
case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
|
|
|
|
case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
|
|
|
|
case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
|
|
|
|
case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
|
2008-03-31 11:47:48 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
switch (shiftop) {
|
2012-10-05 22:04:44 +08:00
|
|
|
case 0:
|
|
|
|
gen_shl(var, var, shift);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
gen_shr(var, var, shift);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
gen_sar(var, var, shift);
|
|
|
|
break;
|
2009-10-15 22:45:14 +08:00
|
|
|
case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
|
|
|
|
tcg_gen_rotr_i32(var, var, shift); break;
|
2008-03-31 11:47:48 +08:00
|
|
|
}
|
|
|
|
}
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(shift);
|
2008-03-31 11:47:48 +08:00
|
|
|
}
|
|
|
|
|
2013-12-18 03:42:33 +08:00
|
|
|
/*
|
2015-09-14 21:39:47 +08:00
|
|
|
* Generate a conditional based on ARM condition code cc.
|
2013-12-18 03:42:33 +08:00
|
|
|
* This is common between ARM and Aarch64 targets.
|
|
|
|
*/
|
2015-09-14 21:39:47 +08:00
|
|
|
void arm_test_cc(DisasCompare *cmp, int cc)
|
2008-03-31 11:46:50 +08:00
|
|
|
{
|
2015-09-14 21:39:47 +08:00
|
|
|
TCGv_i32 value;
|
|
|
|
TCGCond cond;
|
|
|
|
bool global = true;
|
2008-03-31 11:46:50 +08:00
|
|
|
|
|
|
|
switch (cc) {
|
|
|
|
case 0: /* eq: Z */
|
|
|
|
case 1: /* ne: !Z */
|
2015-09-14 21:39:47 +08:00
|
|
|
cond = TCG_COND_EQ;
|
|
|
|
value = cpu_ZF;
|
2008-03-31 11:46:50 +08:00
|
|
|
break;
|
2015-09-14 21:39:47 +08:00
|
|
|
|
2008-03-31 11:46:50 +08:00
|
|
|
case 2: /* cs: C */
|
|
|
|
case 3: /* cc: !C */
|
2015-09-14 21:39:47 +08:00
|
|
|
cond = TCG_COND_NE;
|
|
|
|
value = cpu_CF;
|
2008-03-31 11:46:50 +08:00
|
|
|
break;
|
2015-09-14 21:39:47 +08:00
|
|
|
|
2008-03-31 11:46:50 +08:00
|
|
|
case 4: /* mi: N */
|
|
|
|
case 5: /* pl: !N */
|
2015-09-14 21:39:47 +08:00
|
|
|
cond = TCG_COND_LT;
|
|
|
|
value = cpu_NF;
|
2008-03-31 11:46:50 +08:00
|
|
|
break;
|
2015-09-14 21:39:47 +08:00
|
|
|
|
2008-03-31 11:46:50 +08:00
|
|
|
case 6: /* vs: V */
|
|
|
|
case 7: /* vc: !V */
|
2015-09-14 21:39:47 +08:00
|
|
|
cond = TCG_COND_LT;
|
|
|
|
value = cpu_VF;
|
2008-03-31 11:46:50 +08:00
|
|
|
break;
|
2015-09-14 21:39:47 +08:00
|
|
|
|
2008-03-31 11:46:50 +08:00
|
|
|
case 8: /* hi: C && !Z */
|
2015-09-14 21:39:47 +08:00
|
|
|
case 9: /* ls: !C || Z -> !(C && !Z) */
|
|
|
|
cond = TCG_COND_NE;
|
|
|
|
value = tcg_temp_new_i32();
|
|
|
|
global = false;
|
|
|
|
/* CF is 1 for C, so -CF is an all-bits-set mask for C;
|
|
|
|
ZF is non-zero for !Z; so AND the two subexpressions. */
|
|
|
|
tcg_gen_neg_i32(value, cpu_CF);
|
|
|
|
tcg_gen_and_i32(value, value, cpu_ZF);
|
2008-03-31 11:46:50 +08:00
|
|
|
break;
|
2015-09-14 21:39:47 +08:00
|
|
|
|
2008-03-31 11:46:50 +08:00
|
|
|
case 10: /* ge: N == V -> N ^ V == 0 */
|
|
|
|
case 11: /* lt: N != V -> N ^ V != 0 */
|
2015-09-14 21:39:47 +08:00
|
|
|
/* Since we're only interested in the sign bit, == 0 is >= 0. */
|
|
|
|
cond = TCG_COND_GE;
|
|
|
|
value = tcg_temp_new_i32();
|
|
|
|
global = false;
|
|
|
|
tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
|
2008-03-31 11:46:50 +08:00
|
|
|
break;
|
2015-09-14 21:39:47 +08:00
|
|
|
|
2008-03-31 11:46:50 +08:00
|
|
|
case 12: /* gt: !Z && N == V */
|
|
|
|
case 13: /* le: Z || N != V */
|
2015-09-14 21:39:47 +08:00
|
|
|
cond = TCG_COND_NE;
|
|
|
|
value = tcg_temp_new_i32();
|
|
|
|
global = false;
|
|
|
|
/* (N == V) is equal to the sign bit of ~(NF ^ VF). Propagate
|
|
|
|
* the sign bit then AND with ZF to yield the result. */
|
|
|
|
tcg_gen_xor_i32(value, cpu_VF, cpu_NF);
|
|
|
|
tcg_gen_sari_i32(value, value, 31);
|
|
|
|
tcg_gen_andc_i32(value, cpu_ZF, value);
|
2008-03-31 11:46:50 +08:00
|
|
|
break;
|
2015-09-14 21:39:47 +08:00
|
|
|
|
2015-09-14 21:39:47 +08:00
|
|
|
case 14: /* always */
|
|
|
|
case 15: /* always */
|
|
|
|
/* Use the ALWAYS condition, which will fold early.
|
|
|
|
* It doesn't matter what we use for the value. */
|
|
|
|
cond = TCG_COND_ALWAYS;
|
|
|
|
value = cpu_ZF;
|
|
|
|
goto no_invert;
|
|
|
|
|
2008-03-31 11:46:50 +08:00
|
|
|
default:
|
|
|
|
fprintf(stderr, "Bad condition code 0x%x\n", cc);
|
|
|
|
abort();
|
|
|
|
}
|
2015-09-14 21:39:47 +08:00
|
|
|
|
|
|
|
if (cc & 1) {
|
|
|
|
cond = tcg_invert_cond(cond);
|
|
|
|
}
|
|
|
|
|
2015-09-14 21:39:47 +08:00
|
|
|
no_invert:
|
2015-09-14 21:39:47 +08:00
|
|
|
cmp->cond = cond;
|
|
|
|
cmp->value = value;
|
|
|
|
cmp->value_global = global;
|
|
|
|
}
|
|
|
|
|
|
|
|
void arm_free_cc(DisasCompare *cmp)
|
|
|
|
{
|
|
|
|
if (!cmp->value_global) {
|
|
|
|
tcg_temp_free_i32(cmp->value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void arm_jump_cc(DisasCompare *cmp, TCGLabel *label)
|
|
|
|
{
|
|
|
|
tcg_gen_brcondi_i32(cmp->cond, cmp->value, 0, label);
|
|
|
|
}
|
|
|
|
|
|
|
|
void arm_gen_test_cc(int cc, TCGLabel *label)
|
|
|
|
{
|
|
|
|
DisasCompare cmp;
|
|
|
|
arm_test_cc(&cmp, cc);
|
|
|
|
arm_jump_cc(&cmp, label);
|
|
|
|
arm_free_cc(&cmp);
|
2008-03-31 11:46:50 +08:00
|
|
|
}
|
2003-10-01 04:34:21 +08:00
|
|
|
|
2017-04-21 00:32:30 +08:00
|
|
|
static inline void gen_set_condexec(DisasContext *s)
|
|
|
|
{
|
|
|
|
if (s->condexec_mask) {
|
|
|
|
uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
|
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
|
|
|
tcg_gen_movi_i32(tmp, val);
|
|
|
|
store_cpu_field(tmp, condexec_bits);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
|
|
|
|
{
|
|
|
|
tcg_gen_movi_i32(cpu_R[15], val);
|
|
|
|
}
|
|
|
|
|
2008-03-31 11:46:50 +08:00
|
|
|
/* Set PC and Thumb state from var. var is marked as dead. */
|
2013-05-23 19:59:55 +08:00
|
|
|
static inline void gen_bx(DisasContext *s, TCGv_i32 var)
|
2008-03-31 11:46:50 +08:00
|
|
|
{
|
2017-07-14 17:01:59 +08:00
|
|
|
s->base.is_jmp = DISAS_JUMP;
|
2009-10-15 18:00:41 +08:00
|
|
|
tcg_gen_andi_i32(cpu_R[15], var, ~1);
|
|
|
|
tcg_gen_andi_i32(var, var, 1);
|
|
|
|
store_cpu_field(var, thumb);
|
2008-03-31 11:46:50 +08:00
|
|
|
}
|
|
|
|
|
2019-08-22 21:15:34 +08:00
|
|
|
/*
|
|
|
|
* Set PC and Thumb state from var. var is marked as dead.
|
arm: Implement M profile exception return properly
On M profile, return from exceptions happen when code in Handler mode
executes one of the following function call return instructions:
* POP or LDM which loads the PC
* LDR to PC
* BX register
and the new PC value is 0xFFxxxxxx.
QEMU tries to implement this by not treating the instruction
specially but then catching the attempt to execute from the magic
address value. This is not ideal, because:
* there are guest visible differences from the architecturally
specified behaviour (for instance jumping to 0xFFxxxxxx via a
different instruction should not cause an exception return but it
will in the QEMU implementation)
* we have to account for it in various places (like refusing to take
an interrupt if the PC is at a magic value, and making sure that
the MPU doesn't deny execution at the magic value addresses)
Drop these hacks, and instead implement exception return the way the
architecture specifies -- by having the relevant instructions check
for the magic value and raise the 'do an exception return' QEMU
internal exception immediately.
The effect on the generated code is minor:
bx lr, old code (and new code for Thread mode):
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
exit_tb $0x0
set_label $L0
exit_tb $0x7f2aabd61993
x86_64 generated code:
0x7f2aabe87019: mov %ebx,%ebp
0x7f2aabe8701b: and $0xfffffffffffffffe,%ebp
0x7f2aabe8701e: mov %ebp,0x3c(%r14)
0x7f2aabe87022: and $0x1,%ebx
0x7f2aabe87025: mov %ebx,0x218(%r14)
0x7f2aabe8702c: xor %eax,%eax
0x7f2aabe8702e: jmpq 0x7f2aabe7c016
bx lr, new code when in Handler mode:
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
movi_i32 tmp5,$0xffffffffff000000
brcond_i32 pc,tmp5,geu,$L1
exit_tb $0x0
set_label $L1
movi_i32 tmp5,$0x8
call exception_internal,$0x0,$0,env,tmp5
x86_64 generated code:
0x7fe8fa1264e3: mov %ebp,%ebx
0x7fe8fa1264e5: and $0xfffffffffffffffe,%ebx
0x7fe8fa1264e8: mov %ebx,0x3c(%r14)
0x7fe8fa1264ec: and $0x1,%ebp
0x7fe8fa1264ef: mov %ebp,0x218(%r14)
0x7fe8fa1264f6: cmp $0xff000000,%ebx
0x7fe8fa1264fc: jae 0x7fe8fa126509
0x7fe8fa126502: xor %eax,%eax
0x7fe8fa126504: jmpq 0x7fe8fa122016
0x7fe8fa126509: mov %r14,%rdi
0x7fe8fa12650c: mov $0x8,%esi
0x7fe8fa126511: mov $0x56095dbeccf5,%r10
0x7fe8fa12651b: callq *%r10
which is a difference of one cmp/branch-not-taken. This will
be lost in the noise of having to exit generated code and
look up the next TB anyway.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 1491844419-12485-9-git-send-email-peter.maydell@linaro.org
2017-04-21 00:32:31 +08:00
|
|
|
* For M-profile CPUs, include logic to detect exception-return
|
|
|
|
* branches and handle them. This is needed for Thumb POP/LDM to PC, LDR to PC,
|
|
|
|
* and BX reg, and no others, and happens only for code in Handler mode.
|
2019-08-22 21:15:34 +08:00
|
|
|
* The Security Extension also requires us to check for the FNC_RETURN
|
|
|
|
* which signals a function return from non-secure state; this can happen
|
|
|
|
* in both Handler and Thread mode.
|
|
|
|
* To avoid having to do multiple comparisons in inline generated code,
|
|
|
|
* we make the check we do here loose, so it will match for EXC_RETURN
|
|
|
|
* in Thread mode. For system emulation do_v7m_exception_exit() checks
|
|
|
|
* for these spurious cases and returns without doing anything (giving
|
|
|
|
* the same behaviour as for a branch to a non-magic address).
|
|
|
|
*
|
|
|
|
* In linux-user mode it is unclear what the right behaviour for an
|
|
|
|
* attempted FNC_RETURN should be, because in real hardware this will go
|
|
|
|
* directly to Secure code (ie not the Linux kernel) which will then treat
|
|
|
|
* the error in any way it chooses. For QEMU we opt to make the FNC_RETURN
|
|
|
|
* attempt behave the way it would on a CPU without the security extension,
|
|
|
|
* which is to say "like a normal branch". That means we can simply treat
|
|
|
|
* all branches as normal with no magic address behaviour.
|
arm: Implement M profile exception return properly
On M profile, return from exceptions happen when code in Handler mode
executes one of the following function call return instructions:
* POP or LDM which loads the PC
* LDR to PC
* BX register
and the new PC value is 0xFFxxxxxx.
QEMU tries to implement this by not treating the instruction
specially but then catching the attempt to execute from the magic
address value. This is not ideal, because:
* there are guest visible differences from the architecturally
specified behaviour (for instance jumping to 0xFFxxxxxx via a
different instruction should not cause an exception return but it
will in the QEMU implementation)
* we have to account for it in various places (like refusing to take
an interrupt if the PC is at a magic value, and making sure that
the MPU doesn't deny execution at the magic value addresses)
Drop these hacks, and instead implement exception return the way the
architecture specifies -- by having the relevant instructions check
for the magic value and raise the 'do an exception return' QEMU
internal exception immediately.
The effect on the generated code is minor:
bx lr, old code (and new code for Thread mode):
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
exit_tb $0x0
set_label $L0
exit_tb $0x7f2aabd61993
x86_64 generated code:
0x7f2aabe87019: mov %ebx,%ebp
0x7f2aabe8701b: and $0xfffffffffffffffe,%ebp
0x7f2aabe8701e: mov %ebp,0x3c(%r14)
0x7f2aabe87022: and $0x1,%ebx
0x7f2aabe87025: mov %ebx,0x218(%r14)
0x7f2aabe8702c: xor %eax,%eax
0x7f2aabe8702e: jmpq 0x7f2aabe7c016
bx lr, new code when in Handler mode:
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
movi_i32 tmp5,$0xffffffffff000000
brcond_i32 pc,tmp5,geu,$L1
exit_tb $0x0
set_label $L1
movi_i32 tmp5,$0x8
call exception_internal,$0x0,$0,env,tmp5
x86_64 generated code:
0x7fe8fa1264e3: mov %ebp,%ebx
0x7fe8fa1264e5: and $0xfffffffffffffffe,%ebx
0x7fe8fa1264e8: mov %ebx,0x3c(%r14)
0x7fe8fa1264ec: and $0x1,%ebp
0x7fe8fa1264ef: mov %ebp,0x218(%r14)
0x7fe8fa1264f6: cmp $0xff000000,%ebx
0x7fe8fa1264fc: jae 0x7fe8fa126509
0x7fe8fa126502: xor %eax,%eax
0x7fe8fa126504: jmpq 0x7fe8fa122016
0x7fe8fa126509: mov %r14,%rdi
0x7fe8fa12650c: mov $0x8,%esi
0x7fe8fa126511: mov $0x56095dbeccf5,%r10
0x7fe8fa12651b: callq *%r10
which is a difference of one cmp/branch-not-taken. This will
be lost in the noise of having to exit generated code and
look up the next TB anyway.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 1491844419-12485-9-git-send-email-peter.maydell@linaro.org
2017-04-21 00:32:31 +08:00
|
|
|
*/
|
|
|
|
static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
|
|
|
|
{
|
|
|
|
/* Generate the same code here as for a simple bx, but flag via
|
2017-07-14 17:01:59 +08:00
|
|
|
* s->base.is_jmp that we need to do the rest of the work later.
|
arm: Implement M profile exception return properly
On M profile, return from exceptions happen when code in Handler mode
executes one of the following function call return instructions:
* POP or LDM which loads the PC
* LDR to PC
* BX register
and the new PC value is 0xFFxxxxxx.
QEMU tries to implement this by not treating the instruction
specially but then catching the attempt to execute from the magic
address value. This is not ideal, because:
* there are guest visible differences from the architecturally
specified behaviour (for instance jumping to 0xFFxxxxxx via a
different instruction should not cause an exception return but it
will in the QEMU implementation)
* we have to account for it in various places (like refusing to take
an interrupt if the PC is at a magic value, and making sure that
the MPU doesn't deny execution at the magic value addresses)
Drop these hacks, and instead implement exception return the way the
architecture specifies -- by having the relevant instructions check
for the magic value and raise the 'do an exception return' QEMU
internal exception immediately.
The effect on the generated code is minor:
bx lr, old code (and new code for Thread mode):
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
exit_tb $0x0
set_label $L0
exit_tb $0x7f2aabd61993
x86_64 generated code:
0x7f2aabe87019: mov %ebx,%ebp
0x7f2aabe8701b: and $0xfffffffffffffffe,%ebp
0x7f2aabe8701e: mov %ebp,0x3c(%r14)
0x7f2aabe87022: and $0x1,%ebx
0x7f2aabe87025: mov %ebx,0x218(%r14)
0x7f2aabe8702c: xor %eax,%eax
0x7f2aabe8702e: jmpq 0x7f2aabe7c016
bx lr, new code when in Handler mode:
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
movi_i32 tmp5,$0xffffffffff000000
brcond_i32 pc,tmp5,geu,$L1
exit_tb $0x0
set_label $L1
movi_i32 tmp5,$0x8
call exception_internal,$0x0,$0,env,tmp5
x86_64 generated code:
0x7fe8fa1264e3: mov %ebp,%ebx
0x7fe8fa1264e5: and $0xfffffffffffffffe,%ebx
0x7fe8fa1264e8: mov %ebx,0x3c(%r14)
0x7fe8fa1264ec: and $0x1,%ebp
0x7fe8fa1264ef: mov %ebp,0x218(%r14)
0x7fe8fa1264f6: cmp $0xff000000,%ebx
0x7fe8fa1264fc: jae 0x7fe8fa126509
0x7fe8fa126502: xor %eax,%eax
0x7fe8fa126504: jmpq 0x7fe8fa122016
0x7fe8fa126509: mov %r14,%rdi
0x7fe8fa12650c: mov $0x8,%esi
0x7fe8fa126511: mov $0x56095dbeccf5,%r10
0x7fe8fa12651b: callq *%r10
which is a difference of one cmp/branch-not-taken. This will
be lost in the noise of having to exit generated code and
look up the next TB anyway.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 1491844419-12485-9-git-send-email-peter.maydell@linaro.org
2017-04-21 00:32:31 +08:00
|
|
|
*/
|
|
|
|
gen_bx(s, var);
|
2019-08-22 21:15:34 +08:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2017-10-09 21:48:34 +08:00
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
|
|
|
|
(s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
|
2017-07-14 17:01:59 +08:00
|
|
|
s->base.is_jmp = DISAS_BX_EXCRET;
|
arm: Implement M profile exception return properly
On M profile, return from exceptions happen when code in Handler mode
executes one of the following function call return instructions:
* POP or LDM which loads the PC
* LDR to PC
* BX register
and the new PC value is 0xFFxxxxxx.
QEMU tries to implement this by not treating the instruction
specially but then catching the attempt to execute from the magic
address value. This is not ideal, because:
* there are guest visible differences from the architecturally
specified behaviour (for instance jumping to 0xFFxxxxxx via a
different instruction should not cause an exception return but it
will in the QEMU implementation)
* we have to account for it in various places (like refusing to take
an interrupt if the PC is at a magic value, and making sure that
the MPU doesn't deny execution at the magic value addresses)
Drop these hacks, and instead implement exception return the way the
architecture specifies -- by having the relevant instructions check
for the magic value and raise the 'do an exception return' QEMU
internal exception immediately.
The effect on the generated code is minor:
bx lr, old code (and new code for Thread mode):
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
exit_tb $0x0
set_label $L0
exit_tb $0x7f2aabd61993
x86_64 generated code:
0x7f2aabe87019: mov %ebx,%ebp
0x7f2aabe8701b: and $0xfffffffffffffffe,%ebp
0x7f2aabe8701e: mov %ebp,0x3c(%r14)
0x7f2aabe87022: and $0x1,%ebx
0x7f2aabe87025: mov %ebx,0x218(%r14)
0x7f2aabe8702c: xor %eax,%eax
0x7f2aabe8702e: jmpq 0x7f2aabe7c016
bx lr, new code when in Handler mode:
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
movi_i32 tmp5,$0xffffffffff000000
brcond_i32 pc,tmp5,geu,$L1
exit_tb $0x0
set_label $L1
movi_i32 tmp5,$0x8
call exception_internal,$0x0,$0,env,tmp5
x86_64 generated code:
0x7fe8fa1264e3: mov %ebp,%ebx
0x7fe8fa1264e5: and $0xfffffffffffffffe,%ebx
0x7fe8fa1264e8: mov %ebx,0x3c(%r14)
0x7fe8fa1264ec: and $0x1,%ebp
0x7fe8fa1264ef: mov %ebp,0x218(%r14)
0x7fe8fa1264f6: cmp $0xff000000,%ebx
0x7fe8fa1264fc: jae 0x7fe8fa126509
0x7fe8fa126502: xor %eax,%eax
0x7fe8fa126504: jmpq 0x7fe8fa122016
0x7fe8fa126509: mov %r14,%rdi
0x7fe8fa12650c: mov $0x8,%esi
0x7fe8fa126511: mov $0x56095dbeccf5,%r10
0x7fe8fa12651b: callq *%r10
which is a difference of one cmp/branch-not-taken. This will
be lost in the noise of having to exit generated code and
look up the next TB anyway.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 1491844419-12485-9-git-send-email-peter.maydell@linaro.org
2017-04-21 00:32:31 +08:00
|
|
|
}
|
2019-08-22 21:15:34 +08:00
|
|
|
#endif
|
arm: Implement M profile exception return properly
On M profile, return from exceptions happen when code in Handler mode
executes one of the following function call return instructions:
* POP or LDM which loads the PC
* LDR to PC
* BX register
and the new PC value is 0xFFxxxxxx.
QEMU tries to implement this by not treating the instruction
specially but then catching the attempt to execute from the magic
address value. This is not ideal, because:
* there are guest visible differences from the architecturally
specified behaviour (for instance jumping to 0xFFxxxxxx via a
different instruction should not cause an exception return but it
will in the QEMU implementation)
* we have to account for it in various places (like refusing to take
an interrupt if the PC is at a magic value, and making sure that
the MPU doesn't deny execution at the magic value addresses)
Drop these hacks, and instead implement exception return the way the
architecture specifies -- by having the relevant instructions check
for the magic value and raise the 'do an exception return' QEMU
internal exception immediately.
The effect on the generated code is minor:
bx lr, old code (and new code for Thread mode):
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
exit_tb $0x0
set_label $L0
exit_tb $0x7f2aabd61993
x86_64 generated code:
0x7f2aabe87019: mov %ebx,%ebp
0x7f2aabe8701b: and $0xfffffffffffffffe,%ebp
0x7f2aabe8701e: mov %ebp,0x3c(%r14)
0x7f2aabe87022: and $0x1,%ebx
0x7f2aabe87025: mov %ebx,0x218(%r14)
0x7f2aabe8702c: xor %eax,%eax
0x7f2aabe8702e: jmpq 0x7f2aabe7c016
bx lr, new code when in Handler mode:
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
movi_i32 tmp5,$0xffffffffff000000
brcond_i32 pc,tmp5,geu,$L1
exit_tb $0x0
set_label $L1
movi_i32 tmp5,$0x8
call exception_internal,$0x0,$0,env,tmp5
x86_64 generated code:
0x7fe8fa1264e3: mov %ebp,%ebx
0x7fe8fa1264e5: and $0xfffffffffffffffe,%ebx
0x7fe8fa1264e8: mov %ebx,0x3c(%r14)
0x7fe8fa1264ec: and $0x1,%ebp
0x7fe8fa1264ef: mov %ebp,0x218(%r14)
0x7fe8fa1264f6: cmp $0xff000000,%ebx
0x7fe8fa1264fc: jae 0x7fe8fa126509
0x7fe8fa126502: xor %eax,%eax
0x7fe8fa126504: jmpq 0x7fe8fa122016
0x7fe8fa126509: mov %r14,%rdi
0x7fe8fa12650c: mov $0x8,%esi
0x7fe8fa126511: mov $0x56095dbeccf5,%r10
0x7fe8fa12651b: callq *%r10
which is a difference of one cmp/branch-not-taken. This will
be lost in the noise of having to exit generated code and
look up the next TB anyway.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 1491844419-12485-9-git-send-email-peter.maydell@linaro.org
2017-04-21 00:32:31 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_bx_excret_final_code(DisasContext *s)
|
|
|
|
{
|
|
|
|
/* Generate the code to finish possible exception return and end the TB */
|
|
|
|
TCGLabel *excret_label = gen_new_label();
|
2017-10-09 21:48:34 +08:00
|
|
|
uint32_t min_magic;
|
|
|
|
|
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
|
|
|
|
/* Covers FNC_RETURN and EXC_RETURN magic */
|
|
|
|
min_magic = FNC_RETURN_MIN_MAGIC;
|
|
|
|
} else {
|
|
|
|
/* EXC_RETURN magic only */
|
|
|
|
min_magic = EXC_RETURN_MIN_MAGIC;
|
|
|
|
}
|
arm: Implement M profile exception return properly
On M profile, return from exceptions happen when code in Handler mode
executes one of the following function call return instructions:
* POP or LDM which loads the PC
* LDR to PC
* BX register
and the new PC value is 0xFFxxxxxx.
QEMU tries to implement this by not treating the instruction
specially but then catching the attempt to execute from the magic
address value. This is not ideal, because:
* there are guest visible differences from the architecturally
specified behaviour (for instance jumping to 0xFFxxxxxx via a
different instruction should not cause an exception return but it
will in the QEMU implementation)
* we have to account for it in various places (like refusing to take
an interrupt if the PC is at a magic value, and making sure that
the MPU doesn't deny execution at the magic value addresses)
Drop these hacks, and instead implement exception return the way the
architecture specifies -- by having the relevant instructions check
for the magic value and raise the 'do an exception return' QEMU
internal exception immediately.
The effect on the generated code is minor:
bx lr, old code (and new code for Thread mode):
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
exit_tb $0x0
set_label $L0
exit_tb $0x7f2aabd61993
x86_64 generated code:
0x7f2aabe87019: mov %ebx,%ebp
0x7f2aabe8701b: and $0xfffffffffffffffe,%ebp
0x7f2aabe8701e: mov %ebp,0x3c(%r14)
0x7f2aabe87022: and $0x1,%ebx
0x7f2aabe87025: mov %ebx,0x218(%r14)
0x7f2aabe8702c: xor %eax,%eax
0x7f2aabe8702e: jmpq 0x7f2aabe7c016
bx lr, new code when in Handler mode:
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
movi_i32 tmp5,$0xffffffffff000000
brcond_i32 pc,tmp5,geu,$L1
exit_tb $0x0
set_label $L1
movi_i32 tmp5,$0x8
call exception_internal,$0x0,$0,env,tmp5
x86_64 generated code:
0x7fe8fa1264e3: mov %ebp,%ebx
0x7fe8fa1264e5: and $0xfffffffffffffffe,%ebx
0x7fe8fa1264e8: mov %ebx,0x3c(%r14)
0x7fe8fa1264ec: and $0x1,%ebp
0x7fe8fa1264ef: mov %ebp,0x218(%r14)
0x7fe8fa1264f6: cmp $0xff000000,%ebx
0x7fe8fa1264fc: jae 0x7fe8fa126509
0x7fe8fa126502: xor %eax,%eax
0x7fe8fa126504: jmpq 0x7fe8fa122016
0x7fe8fa126509: mov %r14,%rdi
0x7fe8fa12650c: mov $0x8,%esi
0x7fe8fa126511: mov $0x56095dbeccf5,%r10
0x7fe8fa12651b: callq *%r10
which is a difference of one cmp/branch-not-taken. This will
be lost in the noise of having to exit generated code and
look up the next TB anyway.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 1491844419-12485-9-git-send-email-peter.maydell@linaro.org
2017-04-21 00:32:31 +08:00
|
|
|
|
|
|
|
/* Is the new PC value in the magic range indicating exception return? */
|
2017-10-09 21:48:34 +08:00
|
|
|
tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
|
arm: Implement M profile exception return properly
On M profile, return from exceptions happen when code in Handler mode
executes one of the following function call return instructions:
* POP or LDM which loads the PC
* LDR to PC
* BX register
and the new PC value is 0xFFxxxxxx.
QEMU tries to implement this by not treating the instruction
specially but then catching the attempt to execute from the magic
address value. This is not ideal, because:
* there are guest visible differences from the architecturally
specified behaviour (for instance jumping to 0xFFxxxxxx via a
different instruction should not cause an exception return but it
will in the QEMU implementation)
* we have to account for it in various places (like refusing to take
an interrupt if the PC is at a magic value, and making sure that
the MPU doesn't deny execution at the magic value addresses)
Drop these hacks, and instead implement exception return the way the
architecture specifies -- by having the relevant instructions check
for the magic value and raise the 'do an exception return' QEMU
internal exception immediately.
The effect on the generated code is minor:
bx lr, old code (and new code for Thread mode):
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
exit_tb $0x0
set_label $L0
exit_tb $0x7f2aabd61993
x86_64 generated code:
0x7f2aabe87019: mov %ebx,%ebp
0x7f2aabe8701b: and $0xfffffffffffffffe,%ebp
0x7f2aabe8701e: mov %ebp,0x3c(%r14)
0x7f2aabe87022: and $0x1,%ebx
0x7f2aabe87025: mov %ebx,0x218(%r14)
0x7f2aabe8702c: xor %eax,%eax
0x7f2aabe8702e: jmpq 0x7f2aabe7c016
bx lr, new code when in Handler mode:
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
movi_i32 tmp5,$0xffffffffff000000
brcond_i32 pc,tmp5,geu,$L1
exit_tb $0x0
set_label $L1
movi_i32 tmp5,$0x8
call exception_internal,$0x0,$0,env,tmp5
x86_64 generated code:
0x7fe8fa1264e3: mov %ebp,%ebx
0x7fe8fa1264e5: and $0xfffffffffffffffe,%ebx
0x7fe8fa1264e8: mov %ebx,0x3c(%r14)
0x7fe8fa1264ec: and $0x1,%ebp
0x7fe8fa1264ef: mov %ebp,0x218(%r14)
0x7fe8fa1264f6: cmp $0xff000000,%ebx
0x7fe8fa1264fc: jae 0x7fe8fa126509
0x7fe8fa126502: xor %eax,%eax
0x7fe8fa126504: jmpq 0x7fe8fa122016
0x7fe8fa126509: mov %r14,%rdi
0x7fe8fa12650c: mov $0x8,%esi
0x7fe8fa126511: mov $0x56095dbeccf5,%r10
0x7fe8fa12651b: callq *%r10
which is a difference of one cmp/branch-not-taken. This will
be lost in the noise of having to exit generated code and
look up the next TB anyway.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 1491844419-12485-9-git-send-email-peter.maydell@linaro.org
2017-04-21 00:32:31 +08:00
|
|
|
/* No: end the TB as we would for a DISAS_JMP */
|
|
|
|
if (is_singlestepping(s)) {
|
|
|
|
gen_singlestep_exception(s);
|
|
|
|
} else {
|
2018-05-31 09:06:23 +08:00
|
|
|
tcg_gen_exit_tb(NULL, 0);
|
arm: Implement M profile exception return properly
On M profile, return from exceptions happen when code in Handler mode
executes one of the following function call return instructions:
* POP or LDM which loads the PC
* LDR to PC
* BX register
and the new PC value is 0xFFxxxxxx.
QEMU tries to implement this by not treating the instruction
specially but then catching the attempt to execute from the magic
address value. This is not ideal, because:
* there are guest visible differences from the architecturally
specified behaviour (for instance jumping to 0xFFxxxxxx via a
different instruction should not cause an exception return but it
will in the QEMU implementation)
* we have to account for it in various places (like refusing to take
an interrupt if the PC is at a magic value, and making sure that
the MPU doesn't deny execution at the magic value addresses)
Drop these hacks, and instead implement exception return the way the
architecture specifies -- by having the relevant instructions check
for the magic value and raise the 'do an exception return' QEMU
internal exception immediately.
The effect on the generated code is minor:
bx lr, old code (and new code for Thread mode):
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
exit_tb $0x0
set_label $L0
exit_tb $0x7f2aabd61993
x86_64 generated code:
0x7f2aabe87019: mov %ebx,%ebp
0x7f2aabe8701b: and $0xfffffffffffffffe,%ebp
0x7f2aabe8701e: mov %ebp,0x3c(%r14)
0x7f2aabe87022: and $0x1,%ebx
0x7f2aabe87025: mov %ebx,0x218(%r14)
0x7f2aabe8702c: xor %eax,%eax
0x7f2aabe8702e: jmpq 0x7f2aabe7c016
bx lr, new code when in Handler mode:
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
movi_i32 tmp5,$0xffffffffff000000
brcond_i32 pc,tmp5,geu,$L1
exit_tb $0x0
set_label $L1
movi_i32 tmp5,$0x8
call exception_internal,$0x0,$0,env,tmp5
x86_64 generated code:
0x7fe8fa1264e3: mov %ebp,%ebx
0x7fe8fa1264e5: and $0xfffffffffffffffe,%ebx
0x7fe8fa1264e8: mov %ebx,0x3c(%r14)
0x7fe8fa1264ec: and $0x1,%ebp
0x7fe8fa1264ef: mov %ebp,0x218(%r14)
0x7fe8fa1264f6: cmp $0xff000000,%ebx
0x7fe8fa1264fc: jae 0x7fe8fa126509
0x7fe8fa126502: xor %eax,%eax
0x7fe8fa126504: jmpq 0x7fe8fa122016
0x7fe8fa126509: mov %r14,%rdi
0x7fe8fa12650c: mov $0x8,%esi
0x7fe8fa126511: mov $0x56095dbeccf5,%r10
0x7fe8fa12651b: callq *%r10
which is a difference of one cmp/branch-not-taken. This will
be lost in the noise of having to exit generated code and
look up the next TB anyway.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 1491844419-12485-9-git-send-email-peter.maydell@linaro.org
2017-04-21 00:32:31 +08:00
|
|
|
}
|
|
|
|
gen_set_label(excret_label);
|
|
|
|
/* Yes: this is an exception return.
|
|
|
|
* At this point in runtime env->regs[15] and env->thumb will hold
|
|
|
|
* the exception-return magic number, which do_v7m_exception_exit()
|
|
|
|
* will read. Nothing else will be able to see those values because
|
|
|
|
* the cpu-exec main loop guarantees that we will always go straight
|
|
|
|
* from raising the exception to the exception-handling code.
|
|
|
|
*
|
|
|
|
* gen_ss_advance(s) does nothing on M profile currently but
|
|
|
|
* calling it is conceptually the right thing as we have executed
|
|
|
|
* this instruction (compare SWI, HVC, SMC handling).
|
|
|
|
*/
|
|
|
|
gen_ss_advance(s);
|
|
|
|
gen_exception_internal(EXCP_EXCEPTION_EXIT);
|
|
|
|
}
|
|
|
|
|
2017-09-07 20:54:54 +08:00
|
|
|
static inline void gen_bxns(DisasContext *s, int rm)
|
|
|
|
{
|
|
|
|
TCGv_i32 var = load_reg(s, rm);
|
|
|
|
|
|
|
|
/* The bxns helper may raise an EXCEPTION_EXIT exception, so in theory
|
|
|
|
* we need to sync state before calling it, but:
|
|
|
|
* - we don't need to do gen_set_pc_im() because the bxns helper will
|
|
|
|
* always set the PC itself
|
|
|
|
* - we don't need to do gen_set_condexec() because BXNS is UNPREDICTABLE
|
|
|
|
* unless it's outside an IT block or the last insn in an IT block,
|
|
|
|
* so we know that condexec == 0 (already set at the top of the TB)
|
|
|
|
* is correct in the non-UNPREDICTABLE cases, and we can choose
|
|
|
|
* "zeroes the IT bits" as our UNPREDICTABLE behaviour otherwise.
|
|
|
|
*/
|
|
|
|
gen_helper_v7m_bxns(cpu_env, var);
|
|
|
|
tcg_temp_free_i32(var);
|
2017-09-07 23:42:55 +08:00
|
|
|
s->base.is_jmp = DISAS_EXIT;
|
2017-09-07 20:54:54 +08:00
|
|
|
}
|
|
|
|
|
2017-10-09 21:48:33 +08:00
|
|
|
static inline void gen_blxns(DisasContext *s, int rm)
|
|
|
|
{
|
|
|
|
TCGv_i32 var = load_reg(s, rm);
|
|
|
|
|
|
|
|
/* We don't need to sync condexec state, for the same reason as bxns.
|
|
|
|
* We do however need to set the PC, because the blxns helper reads it.
|
|
|
|
* The blxns helper may throw an exception.
|
|
|
|
*/
|
2019-08-15 16:46:44 +08:00
|
|
|
gen_set_pc_im(s, s->base.pc_next);
|
2017-10-09 21:48:33 +08:00
|
|
|
gen_helper_v7m_blxns(cpu_env, var);
|
|
|
|
tcg_temp_free_i32(var);
|
|
|
|
s->base.is_jmp = DISAS_EXIT;
|
|
|
|
}
|
|
|
|
|
2009-05-06 14:16:12 +08:00
|
|
|
/* Variant of store_reg which uses branch&exchange logic when storing
|
|
|
|
to r15 in ARM architecture v7 and above. The source must be a temporary
|
|
|
|
and will be marked as dead. */
|
2014-10-29 03:24:03 +08:00
|
|
|
static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
|
2009-05-06 14:16:12 +08:00
|
|
|
{
|
|
|
|
if (reg == 15 && ENABLE_ARCH_7) {
|
|
|
|
gen_bx(s, var);
|
|
|
|
} else {
|
|
|
|
store_reg(s, reg, var);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
arm: basic support for ARMv4/ARMv4T emulation
Currently target-arm/ assumes at least ARMv5 core. Add support for
handling also ARMv4/ARMv4T. This changes the following instructions:
BX(v4T and later)
BKPT, BLX, CDP2, CLZ, LDC2, LDRD, MCRR, MCRR2, MRRC, MCRR, MRC2, MRRC,
MRRC2, PLD QADD, QDADD, QDSUB, QSUB, STRD, SMLAxy, SMLALxy, SMLAWxy,
SMULxy, SMULWxy, STC2 (v5 and later)
All instructions that are "v5TE and later" are also bound to just v5, as
that's how it was before.
This patch doesn _not_ include disabling of cp15 access and base-updated
data abort model (that will be required to emulate chips based on a
ARM7TDMI), because:
* no ARM7TDMI chips are currently emulated (or planned)
* those features aren't strictly necessary for my purposes (SA-1 core
emulation).
All v5 models are handled as they are v5T. Internally we still have a
check if the model is a v5(T) or v5TE, but as all emulated cores are
v5TE, those two cases are simply aliased (for now).
Patch is heavily based on patch by Filip Navara <filip.navara@gmail.com>
which in turn is based on work by Ulrich Hecht <uli@suse.de> and Vincent
Sanders <vince@kyllikki.org>.
Signed-off-by: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2011-04-04 21:38:44 +08:00
|
|
|
/* Variant of store_reg which uses branch&exchange logic when storing
|
|
|
|
* to r15 in ARM architecture v5T and above. This is used for storing
|
|
|
|
* the results of a LDR/LDM/POP into r15, and corresponds to the cases
|
|
|
|
* in the ARM ARM which use the LoadWritePC() pseudocode function. */
|
2014-10-29 03:24:03 +08:00
|
|
|
static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
|
arm: basic support for ARMv4/ARMv4T emulation
Currently target-arm/ assumes at least ARMv5 core. Add support for
handling also ARMv4/ARMv4T. This changes the following instructions:
BX(v4T and later)
BKPT, BLX, CDP2, CLZ, LDC2, LDRD, MCRR, MCRR2, MRRC, MCRR, MRC2, MRRC,
MRRC2, PLD QADD, QDADD, QDSUB, QSUB, STRD, SMLAxy, SMLALxy, SMLAWxy,
SMULxy, SMULWxy, STC2 (v5 and later)
All instructions that are "v5TE and later" are also bound to just v5, as
that's how it was before.
This patch doesn _not_ include disabling of cp15 access and base-updated
data abort model (that will be required to emulate chips based on a
ARM7TDMI), because:
* no ARM7TDMI chips are currently emulated (or planned)
* those features aren't strictly necessary for my purposes (SA-1 core
emulation).
All v5 models are handled as they are v5T. Internally we still have a
check if the model is a v5(T) or v5TE, but as all emulated cores are
v5TE, those two cases are simply aliased (for now).
Patch is heavily based on patch by Filip Navara <filip.navara@gmail.com>
which in turn is based on work by Ulrich Hecht <uli@suse.de> and Vincent
Sanders <vince@kyllikki.org>.
Signed-off-by: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2011-04-04 21:38:44 +08:00
|
|
|
{
|
|
|
|
if (reg == 15 && ENABLE_ARCH_5) {
|
arm: Implement M profile exception return properly
On M profile, return from exceptions happen when code in Handler mode
executes one of the following function call return instructions:
* POP or LDM which loads the PC
* LDR to PC
* BX register
and the new PC value is 0xFFxxxxxx.
QEMU tries to implement this by not treating the instruction
specially but then catching the attempt to execute from the magic
address value. This is not ideal, because:
* there are guest visible differences from the architecturally
specified behaviour (for instance jumping to 0xFFxxxxxx via a
different instruction should not cause an exception return but it
will in the QEMU implementation)
* we have to account for it in various places (like refusing to take
an interrupt if the PC is at a magic value, and making sure that
the MPU doesn't deny execution at the magic value addresses)
Drop these hacks, and instead implement exception return the way the
architecture specifies -- by having the relevant instructions check
for the magic value and raise the 'do an exception return' QEMU
internal exception immediately.
The effect on the generated code is minor:
bx lr, old code (and new code for Thread mode):
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
exit_tb $0x0
set_label $L0
exit_tb $0x7f2aabd61993
x86_64 generated code:
0x7f2aabe87019: mov %ebx,%ebp
0x7f2aabe8701b: and $0xfffffffffffffffe,%ebp
0x7f2aabe8701e: mov %ebp,0x3c(%r14)
0x7f2aabe87022: and $0x1,%ebx
0x7f2aabe87025: mov %ebx,0x218(%r14)
0x7f2aabe8702c: xor %eax,%eax
0x7f2aabe8702e: jmpq 0x7f2aabe7c016
bx lr, new code when in Handler mode:
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
movi_i32 tmp5,$0xffffffffff000000
brcond_i32 pc,tmp5,geu,$L1
exit_tb $0x0
set_label $L1
movi_i32 tmp5,$0x8
call exception_internal,$0x0,$0,env,tmp5
x86_64 generated code:
0x7fe8fa1264e3: mov %ebp,%ebx
0x7fe8fa1264e5: and $0xfffffffffffffffe,%ebx
0x7fe8fa1264e8: mov %ebx,0x3c(%r14)
0x7fe8fa1264ec: and $0x1,%ebp
0x7fe8fa1264ef: mov %ebp,0x218(%r14)
0x7fe8fa1264f6: cmp $0xff000000,%ebx
0x7fe8fa1264fc: jae 0x7fe8fa126509
0x7fe8fa126502: xor %eax,%eax
0x7fe8fa126504: jmpq 0x7fe8fa122016
0x7fe8fa126509: mov %r14,%rdi
0x7fe8fa12650c: mov $0x8,%esi
0x7fe8fa126511: mov $0x56095dbeccf5,%r10
0x7fe8fa12651b: callq *%r10
which is a difference of one cmp/branch-not-taken. This will
be lost in the noise of having to exit generated code and
look up the next TB anyway.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 1491844419-12485-9-git-send-email-peter.maydell@linaro.org
2017-04-21 00:32:31 +08:00
|
|
|
gen_bx_excret(s, var);
|
arm: basic support for ARMv4/ARMv4T emulation
Currently target-arm/ assumes at least ARMv5 core. Add support for
handling also ARMv4/ARMv4T. This changes the following instructions:
BX(v4T and later)
BKPT, BLX, CDP2, CLZ, LDC2, LDRD, MCRR, MCRR2, MRRC, MCRR, MRC2, MRRC,
MRRC2, PLD QADD, QDADD, QDSUB, QSUB, STRD, SMLAxy, SMLALxy, SMLAWxy,
SMULxy, SMULWxy, STC2 (v5 and later)
All instructions that are "v5TE and later" are also bound to just v5, as
that's how it was before.
This patch doesn _not_ include disabling of cp15 access and base-updated
data abort model (that will be required to emulate chips based on a
ARM7TDMI), because:
* no ARM7TDMI chips are currently emulated (or planned)
* those features aren't strictly necessary for my purposes (SA-1 core
emulation).
All v5 models are handled as they are v5T. Internally we still have a
check if the model is a v5(T) or v5TE, but as all emulated cores are
v5TE, those two cases are simply aliased (for now).
Patch is heavily based on patch by Filip Navara <filip.navara@gmail.com>
which in turn is based on work by Ulrich Hecht <uli@suse.de> and Vincent
Sanders <vince@kyllikki.org>.
Signed-off-by: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2011-04-04 21:38:44 +08:00
|
|
|
} else {
|
|
|
|
store_reg(s, reg, var);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-03-04 19:30:21 +08:00
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
#define IS_USER_ONLY 1
|
|
|
|
#else
|
|
|
|
#define IS_USER_ONLY 0
|
|
|
|
#endif
|
|
|
|
|
2013-09-04 03:12:02 +08:00
|
|
|
/* Abstractions of "generate code to do a guest load/store for
|
|
|
|
* AArch32", where a vaddr is always 32 bits (and is zero
|
|
|
|
* extended if we're a 64 bit core) and data is also
|
|
|
|
* 32 bits unless specifically doing a 64 bit access.
|
|
|
|
* These functions work like tcg_gen_qemu_{ld,st}* except
|
2013-12-10 06:37:06 +08:00
|
|
|
* that the address argument is TCGv_i32 rather than TCGv.
|
2013-09-04 03:12:02 +08:00
|
|
|
*/
|
|
|
|
|
2019-08-24 02:10:58 +08:00
|
|
|
static inline TCGv gen_aa32_addr(DisasContext *s, TCGv_i32 a32, MemOp op)
|
2013-09-04 03:12:02 +08:00
|
|
|
{
|
2016-07-01 02:44:14 +08:00
|
|
|
TCGv addr = tcg_temp_new();
|
|
|
|
tcg_gen_extu_i32_tl(addr, a32);
|
|
|
|
|
2016-03-04 19:30:21 +08:00
|
|
|
/* Not needed for user-mode BE32, where we use MO_BE instead. */
|
2016-07-01 02:44:14 +08:00
|
|
|
if (!IS_USER_ONLY && s->sctlr_b && (op & MO_SIZE) < MO_32) {
|
|
|
|
tcg_gen_xori_tl(addr, addr, 4 - (1 << (op & MO_SIZE)));
|
2016-03-04 19:30:21 +08:00
|
|
|
}
|
2016-07-01 02:44:14 +08:00
|
|
|
return addr;
|
2013-09-04 03:12:02 +08:00
|
|
|
}
|
|
|
|
|
2016-07-01 02:44:14 +08:00
|
|
|
static void gen_aa32_ld_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
|
2019-08-24 02:10:58 +08:00
|
|
|
int index, MemOp opc)
|
2013-09-04 03:12:02 +08:00
|
|
|
{
|
2018-06-22 20:28:41 +08:00
|
|
|
TCGv addr;
|
|
|
|
|
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_M) &&
|
|
|
|
!arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
|
|
|
|
opc |= MO_ALIGN;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = gen_aa32_addr(s, a32, opc);
|
2016-07-01 02:44:14 +08:00
|
|
|
tcg_gen_qemu_ld_i32(val, addr, index, opc);
|
|
|
|
tcg_temp_free(addr);
|
2013-09-04 03:12:02 +08:00
|
|
|
}
|
|
|
|
|
2016-07-01 02:44:14 +08:00
|
|
|
static void gen_aa32_st_i32(DisasContext *s, TCGv_i32 val, TCGv_i32 a32,
|
2019-08-24 02:10:58 +08:00
|
|
|
int index, MemOp opc)
|
2016-07-01 02:44:14 +08:00
|
|
|
{
|
2018-06-22 20:28:41 +08:00
|
|
|
TCGv addr;
|
|
|
|
|
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_M) &&
|
|
|
|
!arm_dc_feature(s, ARM_FEATURE_M_MAIN)) {
|
|
|
|
opc |= MO_ALIGN;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = gen_aa32_addr(s, a32, opc);
|
2016-07-01 02:44:14 +08:00
|
|
|
tcg_gen_qemu_st_i32(val, addr, index, opc);
|
|
|
|
tcg_temp_free(addr);
|
|
|
|
}
|
2013-09-04 03:12:02 +08:00
|
|
|
|
2016-07-01 02:44:14 +08:00
|
|
|
#define DO_GEN_LD(SUFF, OPC) \
|
2016-03-04 19:30:20 +08:00
|
|
|
static inline void gen_aa32_ld##SUFF(DisasContext *s, TCGv_i32 val, \
|
2016-07-01 02:44:14 +08:00
|
|
|
TCGv_i32 a32, int index) \
|
2013-09-04 03:12:02 +08:00
|
|
|
{ \
|
2016-07-01 02:44:14 +08:00
|
|
|
gen_aa32_ld_i32(s, val, a32, index, OPC | s->be_data); \
|
2013-09-04 03:12:02 +08:00
|
|
|
}
|
|
|
|
|
2016-07-01 02:44:14 +08:00
|
|
|
#define DO_GEN_ST(SUFF, OPC) \
|
2016-03-04 19:30:20 +08:00
|
|
|
static inline void gen_aa32_st##SUFF(DisasContext *s, TCGv_i32 val, \
|
2016-07-01 02:44:14 +08:00
|
|
|
TCGv_i32 a32, int index) \
|
2013-09-04 03:12:02 +08:00
|
|
|
{ \
|
2016-07-01 02:44:14 +08:00
|
|
|
gen_aa32_st_i32(s, val, a32, index, OPC | s->be_data); \
|
2013-09-04 03:12:02 +08:00
|
|
|
}
|
|
|
|
|
2016-07-01 02:44:14 +08:00
|
|
|
static inline void gen_aa32_frob64(DisasContext *s, TCGv_i64 val)
|
2013-09-04 03:12:02 +08:00
|
|
|
{
|
2016-03-04 19:30:21 +08:00
|
|
|
/* Not needed for user-mode BE32, where we use MO_BE instead. */
|
|
|
|
if (!IS_USER_ONLY && s->sctlr_b) {
|
|
|
|
tcg_gen_rotri_i64(val, val, 32);
|
|
|
|
}
|
2013-09-04 03:12:02 +08:00
|
|
|
}
|
|
|
|
|
2016-07-01 02:44:14 +08:00
|
|
|
static void gen_aa32_ld_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
|
2019-08-24 02:10:58 +08:00
|
|
|
int index, MemOp opc)
|
2013-09-04 03:12:02 +08:00
|
|
|
{
|
2016-07-01 02:44:14 +08:00
|
|
|
TCGv addr = gen_aa32_addr(s, a32, opc);
|
|
|
|
tcg_gen_qemu_ld_i64(val, addr, index, opc);
|
|
|
|
gen_aa32_frob64(s, val);
|
|
|
|
tcg_temp_free(addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_aa32_ld64(DisasContext *s, TCGv_i64 val,
|
|
|
|
TCGv_i32 a32, int index)
|
|
|
|
{
|
|
|
|
gen_aa32_ld_i64(s, val, a32, index, MO_Q | s->be_data);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_aa32_st_i64(DisasContext *s, TCGv_i64 val, TCGv_i32 a32,
|
2019-08-24 02:10:58 +08:00
|
|
|
int index, MemOp opc)
|
2016-07-01 02:44:14 +08:00
|
|
|
{
|
|
|
|
TCGv addr = gen_aa32_addr(s, a32, opc);
|
2016-03-04 19:30:21 +08:00
|
|
|
|
|
|
|
/* Not needed for user-mode BE32, where we use MO_BE instead. */
|
|
|
|
if (!IS_USER_ONLY && s->sctlr_b) {
|
2016-07-01 02:44:14 +08:00
|
|
|
TCGv_i64 tmp = tcg_temp_new_i64();
|
2016-03-04 19:30:21 +08:00
|
|
|
tcg_gen_rotri_i64(tmp, val, 32);
|
2016-07-01 02:44:14 +08:00
|
|
|
tcg_gen_qemu_st_i64(tmp, addr, index, opc);
|
|
|
|
tcg_temp_free_i64(tmp);
|
2016-03-04 19:30:21 +08:00
|
|
|
} else {
|
2016-07-01 02:44:14 +08:00
|
|
|
tcg_gen_qemu_st_i64(val, addr, index, opc);
|
2016-03-04 19:30:21 +08:00
|
|
|
}
|
2016-07-01 02:44:14 +08:00
|
|
|
tcg_temp_free(addr);
|
2013-09-04 03:12:02 +08:00
|
|
|
}
|
|
|
|
|
2016-07-01 02:44:14 +08:00
|
|
|
static inline void gen_aa32_st64(DisasContext *s, TCGv_i64 val,
|
|
|
|
TCGv_i32 a32, int index)
|
|
|
|
{
|
|
|
|
gen_aa32_st_i64(s, val, a32, index, MO_Q | s->be_data);
|
|
|
|
}
|
2013-09-04 03:12:02 +08:00
|
|
|
|
2016-07-01 02:44:14 +08:00
|
|
|
DO_GEN_LD(8u, MO_UB)
|
|
|
|
DO_GEN_LD(16u, MO_UW)
|
|
|
|
DO_GEN_LD(32u, MO_UL)
|
|
|
|
DO_GEN_ST(8, MO_UB)
|
|
|
|
DO_GEN_ST(16, MO_UW)
|
|
|
|
DO_GEN_ST(32, MO_UL)
|
2013-09-04 03:12:02 +08:00
|
|
|
|
2014-10-24 19:19:13 +08:00
|
|
|
static inline void gen_hvc(DisasContext *s, int imm16)
|
|
|
|
{
|
|
|
|
/* The pre HVC helper handles cases when HVC gets trapped
|
|
|
|
* as an undefined insn by runtime configuration (ie before
|
|
|
|
* the insn really executes).
|
|
|
|
*/
|
2019-08-15 16:46:43 +08:00
|
|
|
gen_set_pc_im(s, s->pc_curr);
|
2014-10-24 19:19:13 +08:00
|
|
|
gen_helper_pre_hvc(cpu_env);
|
|
|
|
/* Otherwise we will treat this as a real exception which
|
|
|
|
* happens after execution of the insn. (The distinction matters
|
|
|
|
* for the PC value reported to the exception handler and also
|
|
|
|
* for single stepping.)
|
|
|
|
*/
|
|
|
|
s->svc_imm = imm16;
|
2019-08-15 16:46:44 +08:00
|
|
|
gen_set_pc_im(s, s->base.pc_next);
|
2017-07-14 17:01:59 +08:00
|
|
|
s->base.is_jmp = DISAS_HVC;
|
2014-10-24 19:19:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_smc(DisasContext *s)
|
|
|
|
{
|
|
|
|
/* As with HVC, we may take an exception either before or after
|
|
|
|
* the insn executes.
|
|
|
|
*/
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
2019-08-15 16:46:43 +08:00
|
|
|
gen_set_pc_im(s, s->pc_curr);
|
2014-10-24 19:19:13 +08:00
|
|
|
tmp = tcg_const_i32(syn_aa32_smc());
|
|
|
|
gen_helper_pre_smc(cpu_env, tmp);
|
|
|
|
tcg_temp_free_i32(tmp);
|
2019-08-15 16:46:44 +08:00
|
|
|
gen_set_pc_im(s, s->base.pc_next);
|
2017-07-14 17:01:59 +08:00
|
|
|
s->base.is_jmp = DISAS_SMC;
|
2014-10-24 19:19:13 +08:00
|
|
|
}
|
|
|
|
|
2019-08-15 16:46:44 +08:00
|
|
|
static void gen_exception_internal_insn(DisasContext *s, uint32_t pc, int excp)
|
2014-04-16 02:18:38 +08:00
|
|
|
{
|
|
|
|
gen_set_condexec(s);
|
2019-08-15 16:46:44 +08:00
|
|
|
gen_set_pc_im(s, pc);
|
2014-04-16 02:18:38 +08:00
|
|
|
gen_exception_internal(excp);
|
2017-07-14 17:01:59 +08:00
|
|
|
s->base.is_jmp = DISAS_NORETURN;
|
2014-04-16 02:18:38 +08:00
|
|
|
}
|
|
|
|
|
2019-08-15 16:46:44 +08:00
|
|
|
static void gen_exception_insn(DisasContext *s, uint32_t pc, int excp,
|
2015-05-29 18:28:50 +08:00
|
|
|
int syn, uint32_t target_el)
|
2014-04-16 02:18:38 +08:00
|
|
|
{
|
|
|
|
gen_set_condexec(s);
|
2019-08-15 16:46:44 +08:00
|
|
|
gen_set_pc_im(s, pc);
|
2015-05-29 18:28:50 +08:00
|
|
|
gen_exception(excp, syn, target_el);
|
2017-07-14 17:01:59 +08:00
|
|
|
s->base.is_jmp = DISAS_NORETURN;
|
2014-04-16 02:18:38 +08:00
|
|
|
}
|
|
|
|
|
2019-08-15 16:46:44 +08:00
|
|
|
static void gen_exception_bkpt_insn(DisasContext *s, uint32_t syn)
|
2018-03-24 02:26:46 +08:00
|
|
|
{
|
|
|
|
TCGv_i32 tcg_syn;
|
|
|
|
|
|
|
|
gen_set_condexec(s);
|
2019-08-15 16:46:44 +08:00
|
|
|
gen_set_pc_im(s, s->pc_curr);
|
2018-03-24 02:26:46 +08:00
|
|
|
tcg_syn = tcg_const_i32(syn);
|
|
|
|
gen_helper_exception_bkpt_insn(cpu_env, tcg_syn);
|
|
|
|
tcg_temp_free_i32(tcg_syn);
|
|
|
|
s->base.is_jmp = DISAS_NORETURN;
|
|
|
|
}
|
|
|
|
|
2019-08-26 23:15:36 +08:00
|
|
|
static void unallocated_encoding(DisasContext *s)
|
|
|
|
{
|
|
|
|
/* Unallocated and reserved encodings are uncategorized */
|
|
|
|
gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(),
|
|
|
|
default_exception_el(s));
|
|
|
|
}
|
|
|
|
|
2005-11-26 18:38:39 +08:00
|
|
|
/* Force a TB lookup after an instruction that changes the CPU state. */
|
|
|
|
static inline void gen_lookup_tb(DisasContext *s)
|
|
|
|
{
|
2019-08-15 16:46:44 +08:00
|
|
|
tcg_gen_movi_i32(cpu_R[15], s->base.pc_next);
|
2017-07-14 17:01:59 +08:00
|
|
|
s->base.is_jmp = DISAS_EXIT;
|
2005-11-26 18:38:39 +08:00
|
|
|
}
|
|
|
|
|
2016-10-24 23:26:56 +08:00
|
|
|
static inline void gen_hlt(DisasContext *s, int imm)
|
|
|
|
{
|
|
|
|
/* HLT. This has two purposes.
|
|
|
|
* Architecturally, it is an external halting debug instruction.
|
|
|
|
* Since QEMU doesn't implement external debug, we treat this as
|
|
|
|
* it is required for halting debug disabled: it will UNDEF.
|
|
|
|
* Secondly, "HLT 0x3C" is a T32 semihosting trap instruction,
|
|
|
|
* and "HLT 0xF000" is an A32 semihosting syscall. These traps
|
|
|
|
* must trigger semihosting even for ARMv7 and earlier, where
|
|
|
|
* HLT was an undefined encoding.
|
|
|
|
* In system mode, we don't allow userspace access to
|
|
|
|
* semihosting, to provide some semblance of security
|
|
|
|
* (and for consistency with our 32-bit semihosting).
|
|
|
|
*/
|
|
|
|
if (semihosting_enabled() &&
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
s->current_el != 0 &&
|
|
|
|
#endif
|
|
|
|
(imm == (s->thumb ? 0x3c : 0xf000))) {
|
2019-12-17 23:08:57 +08:00
|
|
|
gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
|
2016-10-24 23:26:56 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-08-26 23:15:36 +08:00
|
|
|
unallocated_encoding(s);
|
2016-10-24 23:26:56 +08:00
|
|
|
}
|
|
|
|
|
2020-11-03 00:52:12 +08:00
|
|
|
/*
|
|
|
|
* Return the offset of a "full" NEON Dreg.
|
|
|
|
*/
|
|
|
|
static long neon_full_reg_offset(unsigned reg)
|
|
|
|
{
|
|
|
|
return offsetof(CPUARMState, vfp.zregs[reg >> 1].d[reg & 1]);
|
|
|
|
}
|
|
|
|
|
2020-11-03 00:52:13 +08:00
|
|
|
/*
|
|
|
|
* Return the offset of a 2**SIZE piece of a NEON register, at index ELE,
|
|
|
|
* where 0 is the least significant end of the register.
|
|
|
|
*/
|
2020-11-03 00:52:14 +08:00
|
|
|
static long neon_element_offset(int reg, int element, MemOp memop)
|
2020-11-03 00:52:13 +08:00
|
|
|
{
|
2020-11-03 00:52:14 +08:00
|
|
|
int element_size = 1 << (memop & MO_SIZE);
|
2020-11-03 00:52:13 +08:00
|
|
|
int ofs = element * element_size;
|
|
|
|
#ifdef HOST_WORDS_BIGENDIAN
|
|
|
|
/*
|
|
|
|
* Calculate the offset assuming fully little-endian,
|
|
|
|
* then XOR to account for the order of the 8-byte units.
|
|
|
|
*/
|
|
|
|
if (element_size < 8) {
|
|
|
|
ofs ^= 8 - element_size;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return neon_full_reg_offset(reg) + ofs;
|
|
|
|
}
|
|
|
|
|
2020-11-03 00:52:13 +08:00
|
|
|
/* Return the offset of a VFP Dreg (dp = true) or VFP Sreg (dp = false). */
|
|
|
|
static long vfp_reg_offset(bool dp, unsigned reg)
|
2005-04-08 03:42:46 +08:00
|
|
|
{
|
2018-01-25 19:45:29 +08:00
|
|
|
if (dp) {
|
2020-11-03 00:52:13 +08:00
|
|
|
return neon_element_offset(reg, 0, MO_64);
|
2005-04-08 03:42:46 +08:00
|
|
|
} else {
|
2020-11-03 00:52:13 +08:00
|
|
|
return neon_element_offset(reg >> 1, reg & 1, MO_32);
|
2005-04-08 03:42:46 +08:00
|
|
|
}
|
|
|
|
}
|
2007-11-11 08:04:49 +08:00
|
|
|
|
2020-11-03 00:52:14 +08:00
|
|
|
static inline void vfp_load_reg64(TCGv_i64 var, int reg)
|
2008-03-31 11:48:30 +08:00
|
|
|
{
|
2020-11-03 00:52:14 +08:00
|
|
|
tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(true, reg));
|
2008-03-31 11:48:30 +08:00
|
|
|
}
|
|
|
|
|
2020-11-03 00:52:14 +08:00
|
|
|
static inline void vfp_store_reg64(TCGv_i64 var, int reg)
|
2008-03-31 11:48:30 +08:00
|
|
|
{
|
2020-11-03 00:52:14 +08:00
|
|
|
tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(true, reg));
|
2008-03-31 11:48:30 +08:00
|
|
|
}
|
|
|
|
|
2020-11-03 00:52:14 +08:00
|
|
|
static inline void vfp_load_reg32(TCGv_i32 var, int reg)
|
target/arm: Add helpers for VFP register loads and stores
The current VFP code has two different idioms for
loading and storing from the VFP register file:
1 using the gen_mov_F0_vreg() and similar functions,
which load and store to a fixed set of TCG globals
cpu_F0s, CPU_F0d, etc
2 by direct calls to tcg_gen_ld_f64() and friends
We want to phase out idiom 1 (because the use of the
fixed globals is a relic of a much older version of TCG),
but idiom 2 is quite longwinded:
tcg_gen_ld_f64(tmp, cpu_env, vfp_reg_offset(true, reg))
requires us to specify the 64-bitness twice, once in
the function name and once by passing 'true' to
vfp_reg_offset(). There's no guard against accidentally
passing the wrong flag.
Instead, let's move to a convention of accessing 64-bit
registers via the existing neon_load_reg64() and
neon_store_reg64(), and provide new neon_load_reg32()
and neon_store_reg32() for the 32-bit equivalents.
Implement the new functions and use them in the code in
translate-vfp.inc.c. We will convert the rest of the VFP
code as we do the decodetree conversion in subsequent
commits.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 23:39:44 +08:00
|
|
|
{
|
|
|
|
tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
|
|
|
|
}
|
|
|
|
|
2020-11-03 00:52:14 +08:00
|
|
|
static inline void vfp_store_reg32(TCGv_i32 var, int reg)
|
target/arm: Add helpers for VFP register loads and stores
The current VFP code has two different idioms for
loading and storing from the VFP register file:
1 using the gen_mov_F0_vreg() and similar functions,
which load and store to a fixed set of TCG globals
cpu_F0s, CPU_F0d, etc
2 by direct calls to tcg_gen_ld_f64() and friends
We want to phase out idiom 1 (because the use of the
fixed globals is a relic of a much older version of TCG),
but idiom 2 is quite longwinded:
tcg_gen_ld_f64(tmp, cpu_env, vfp_reg_offset(true, reg))
requires us to specify the 64-bitness twice, once in
the function name and once by passing 'true' to
vfp_reg_offset(). There's no guard against accidentally
passing the wrong flag.
Instead, let's move to a convention of accessing 64-bit
registers via the existing neon_load_reg64() and
neon_store_reg64(), and provide new neon_load_reg32()
and neon_store_reg32() for the 32-bit equivalents.
Implement the new functions and use them in the code in
translate-vfp.inc.c. We will convert the rest of the VFP
code as we do the decodetree conversion in subsequent
commits.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 23:39:44 +08:00
|
|
|
{
|
|
|
|
tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
|
|
|
|
}
|
|
|
|
|
2020-11-03 00:52:14 +08:00
|
|
|
static void read_neon_element32(TCGv_i32 dest, int reg, int ele, MemOp memop)
|
2020-11-03 00:52:13 +08:00
|
|
|
{
|
2020-11-03 00:52:14 +08:00
|
|
|
long off = neon_element_offset(reg, ele, memop);
|
2020-11-03 00:52:13 +08:00
|
|
|
|
2020-11-03 00:52:14 +08:00
|
|
|
switch (memop) {
|
|
|
|
case MO_SB:
|
|
|
|
tcg_gen_ld8s_i32(dest, cpu_env, off);
|
|
|
|
break;
|
|
|
|
case MO_UB:
|
|
|
|
tcg_gen_ld8u_i32(dest, cpu_env, off);
|
|
|
|
break;
|
|
|
|
case MO_SW:
|
|
|
|
tcg_gen_ld16s_i32(dest, cpu_env, off);
|
|
|
|
break;
|
|
|
|
case MO_UW:
|
|
|
|
tcg_gen_ld16u_i32(dest, cpu_env, off);
|
|
|
|
break;
|
|
|
|
case MO_UL:
|
|
|
|
case MO_SL:
|
2020-11-03 00:52:13 +08:00
|
|
|
tcg_gen_ld_i32(dest, cpu_env, off);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-03 00:52:14 +08:00
|
|
|
static void read_neon_element64(TCGv_i64 dest, int reg, int ele, MemOp memop)
|
|
|
|
{
|
|
|
|
long off = neon_element_offset(reg, ele, memop);
|
|
|
|
|
|
|
|
switch (memop) {
|
2020-11-03 00:52:15 +08:00
|
|
|
case MO_SL:
|
|
|
|
tcg_gen_ld32s_i64(dest, cpu_env, off);
|
|
|
|
break;
|
|
|
|
case MO_UL:
|
|
|
|
tcg_gen_ld32u_i64(dest, cpu_env, off);
|
|
|
|
break;
|
2020-11-03 00:52:14 +08:00
|
|
|
case MO_Q:
|
|
|
|
tcg_gen_ld_i64(dest, cpu_env, off);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-03 00:52:14 +08:00
|
|
|
static void write_neon_element32(TCGv_i32 src, int reg, int ele, MemOp memop)
|
2020-11-03 00:52:13 +08:00
|
|
|
{
|
2020-11-03 00:52:14 +08:00
|
|
|
long off = neon_element_offset(reg, ele, memop);
|
2020-11-03 00:52:13 +08:00
|
|
|
|
2020-11-03 00:52:14 +08:00
|
|
|
switch (memop) {
|
|
|
|
case MO_8:
|
|
|
|
tcg_gen_st8_i32(src, cpu_env, off);
|
|
|
|
break;
|
|
|
|
case MO_16:
|
|
|
|
tcg_gen_st16_i32(src, cpu_env, off);
|
|
|
|
break;
|
2020-11-03 00:52:13 +08:00
|
|
|
case MO_32:
|
|
|
|
tcg_gen_st_i32(src, cpu_env, off);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-03 00:52:14 +08:00
|
|
|
static void write_neon_element64(TCGv_i64 src, int reg, int ele, MemOp memop)
|
|
|
|
{
|
|
|
|
long off = neon_element_offset(reg, ele, memop);
|
|
|
|
|
|
|
|
switch (memop) {
|
2020-11-20 05:55:53 +08:00
|
|
|
case MO_32:
|
|
|
|
tcg_gen_st32_i64(src, cpu_env, off);
|
|
|
|
break;
|
2020-11-03 00:52:14 +08:00
|
|
|
case MO_64:
|
|
|
|
tcg_gen_st_i64(src, cpu_env, off);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-25 19:45:28 +08:00
|
|
|
static TCGv_ptr vfp_reg_ptr(bool dp, int reg)
|
|
|
|
{
|
|
|
|
TCGv_ptr ret = tcg_temp_new_ptr();
|
|
|
|
tcg_gen_addi_ptr(ret, cpu_env, vfp_reg_offset(dp, reg));
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2018-08-24 20:17:47 +08:00
|
|
|
#define ARM_CP_RW_BIT (1 << 20)
|
2007-04-30 10:02:17 +08:00
|
|
|
|
2020-05-01 02:09:30 +08:00
|
|
|
/* Include the VFP and Neon decoders */
|
2020-08-03 19:18:47 +08:00
|
|
|
#include "decode-m-nocp.c.inc"
|
2020-02-04 19:41:01 +08:00
|
|
|
#include "translate-vfp.c.inc"
|
|
|
|
#include "translate-neon.c.inc"
|
2019-06-11 23:39:41 +08:00
|
|
|
|
2008-11-17 22:43:54 +08:00
|
|
|
static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
|
2008-03-31 11:49:05 +08:00
|
|
|
{
|
2012-03-14 08:38:21 +08:00
|
|
|
tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
|
2008-03-31 11:49:05 +08:00
|
|
|
}
|
|
|
|
|
2008-11-17 22:43:54 +08:00
|
|
|
static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
|
2008-03-31 11:49:05 +08:00
|
|
|
{
|
2012-03-14 08:38:21 +08:00
|
|
|
tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
|
2008-03-31 11:49:05 +08:00
|
|
|
}
|
|
|
|
|
2013-05-23 19:59:55 +08:00
|
|
|
static inline TCGv_i32 iwmmxt_load_creg(int reg)
|
2008-03-31 11:49:05 +08:00
|
|
|
{
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 var = tcg_temp_new_i32();
|
2012-03-14 08:38:21 +08:00
|
|
|
tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
|
2009-10-15 20:39:02 +08:00
|
|
|
return var;
|
2008-03-31 11:49:05 +08:00
|
|
|
}
|
|
|
|
|
2013-05-23 19:59:55 +08:00
|
|
|
static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
|
2008-03-31 11:49:05 +08:00
|
|
|
{
|
2012-03-14 08:38:21 +08:00
|
|
|
tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(var);
|
2008-03-31 11:49:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
|
|
|
|
{
|
|
|
|
iwmmxt_store_reg(cpu_M0, rn);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
|
|
|
|
{
|
|
|
|
iwmmxt_load_reg(cpu_M0, rn);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
|
|
|
|
{
|
|
|
|
iwmmxt_load_reg(cpu_V1, rn);
|
|
|
|
tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
|
|
|
|
{
|
|
|
|
iwmmxt_load_reg(cpu_V1, rn);
|
|
|
|
tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
|
|
|
|
{
|
|
|
|
iwmmxt_load_reg(cpu_V1, rn);
|
|
|
|
tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define IWMMXT_OP(name) \
|
|
|
|
static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
|
|
|
|
{ \
|
|
|
|
iwmmxt_load_reg(cpu_V1, rn); \
|
|
|
|
gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
|
|
|
|
}
|
|
|
|
|
2011-05-25 21:22:31 +08:00
|
|
|
#define IWMMXT_OP_ENV(name) \
|
|
|
|
static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
|
|
|
|
{ \
|
|
|
|
iwmmxt_load_reg(cpu_V1, rn); \
|
|
|
|
gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define IWMMXT_OP_ENV_SIZE(name) \
|
|
|
|
IWMMXT_OP_ENV(name##b) \
|
|
|
|
IWMMXT_OP_ENV(name##w) \
|
|
|
|
IWMMXT_OP_ENV(name##l)
|
2008-03-31 11:49:05 +08:00
|
|
|
|
2011-05-25 21:22:31 +08:00
|
|
|
#define IWMMXT_OP_ENV1(name) \
|
2008-03-31 11:49:05 +08:00
|
|
|
static inline void gen_op_iwmmxt_##name##_M0(void) \
|
|
|
|
{ \
|
2011-05-25 21:22:31 +08:00
|
|
|
gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
|
2008-03-31 11:49:05 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
IWMMXT_OP(maddsq)
|
|
|
|
IWMMXT_OP(madduq)
|
|
|
|
IWMMXT_OP(sadb)
|
|
|
|
IWMMXT_OP(sadw)
|
|
|
|
IWMMXT_OP(mulslw)
|
|
|
|
IWMMXT_OP(mulshw)
|
|
|
|
IWMMXT_OP(mululw)
|
|
|
|
IWMMXT_OP(muluhw)
|
|
|
|
IWMMXT_OP(macsw)
|
|
|
|
IWMMXT_OP(macuw)
|
|
|
|
|
2011-05-25 21:22:31 +08:00
|
|
|
IWMMXT_OP_ENV_SIZE(unpackl)
|
|
|
|
IWMMXT_OP_ENV_SIZE(unpackh)
|
|
|
|
|
|
|
|
IWMMXT_OP_ENV1(unpacklub)
|
|
|
|
IWMMXT_OP_ENV1(unpackluw)
|
|
|
|
IWMMXT_OP_ENV1(unpacklul)
|
|
|
|
IWMMXT_OP_ENV1(unpackhub)
|
|
|
|
IWMMXT_OP_ENV1(unpackhuw)
|
|
|
|
IWMMXT_OP_ENV1(unpackhul)
|
|
|
|
IWMMXT_OP_ENV1(unpacklsb)
|
|
|
|
IWMMXT_OP_ENV1(unpacklsw)
|
|
|
|
IWMMXT_OP_ENV1(unpacklsl)
|
|
|
|
IWMMXT_OP_ENV1(unpackhsb)
|
|
|
|
IWMMXT_OP_ENV1(unpackhsw)
|
|
|
|
IWMMXT_OP_ENV1(unpackhsl)
|
|
|
|
|
|
|
|
IWMMXT_OP_ENV_SIZE(cmpeq)
|
|
|
|
IWMMXT_OP_ENV_SIZE(cmpgtu)
|
|
|
|
IWMMXT_OP_ENV_SIZE(cmpgts)
|
|
|
|
|
|
|
|
IWMMXT_OP_ENV_SIZE(mins)
|
|
|
|
IWMMXT_OP_ENV_SIZE(minu)
|
|
|
|
IWMMXT_OP_ENV_SIZE(maxs)
|
|
|
|
IWMMXT_OP_ENV_SIZE(maxu)
|
|
|
|
|
|
|
|
IWMMXT_OP_ENV_SIZE(subn)
|
|
|
|
IWMMXT_OP_ENV_SIZE(addn)
|
|
|
|
IWMMXT_OP_ENV_SIZE(subu)
|
|
|
|
IWMMXT_OP_ENV_SIZE(addu)
|
|
|
|
IWMMXT_OP_ENV_SIZE(subs)
|
|
|
|
IWMMXT_OP_ENV_SIZE(adds)
|
|
|
|
|
|
|
|
IWMMXT_OP_ENV(avgb0)
|
|
|
|
IWMMXT_OP_ENV(avgb1)
|
|
|
|
IWMMXT_OP_ENV(avgw0)
|
|
|
|
IWMMXT_OP_ENV(avgw1)
|
2008-03-31 11:49:05 +08:00
|
|
|
|
2011-05-25 21:22:31 +08:00
|
|
|
IWMMXT_OP_ENV(packuw)
|
|
|
|
IWMMXT_OP_ENV(packul)
|
|
|
|
IWMMXT_OP_ENV(packuq)
|
|
|
|
IWMMXT_OP_ENV(packsw)
|
|
|
|
IWMMXT_OP_ENV(packsl)
|
|
|
|
IWMMXT_OP_ENV(packsq)
|
2008-03-31 11:49:05 +08:00
|
|
|
|
|
|
|
static void gen_op_iwmmxt_set_mup(void)
|
|
|
|
{
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp;
|
2008-03-31 11:49:05 +08:00
|
|
|
tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
|
|
|
|
tcg_gen_ori_i32(tmp, tmp, 2);
|
|
|
|
store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_op_iwmmxt_set_cup(void)
|
|
|
|
{
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp;
|
2008-03-31 11:49:05 +08:00
|
|
|
tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
|
|
|
|
tcg_gen_ori_i32(tmp, tmp, 1);
|
|
|
|
store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_op_iwmmxt_setpsr_nz(void)
|
|
|
|
{
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
2008-03-31 11:49:05 +08:00
|
|
|
gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
|
|
|
|
store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
|
|
|
|
{
|
|
|
|
iwmmxt_load_reg(cpu_V1, rn);
|
2008-05-11 20:22:01 +08:00
|
|
|
tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
|
2008-03-31 11:49:05 +08:00
|
|
|
tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
|
|
|
|
}
|
|
|
|
|
2013-05-23 19:59:55 +08:00
|
|
|
static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
|
|
|
|
TCGv_i32 dest)
|
2007-04-30 10:02:17 +08:00
|
|
|
{
|
|
|
|
int rd;
|
|
|
|
uint32_t offset;
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp;
|
2007-04-30 10:02:17 +08:00
|
|
|
|
|
|
|
rd = (insn >> 16) & 0xf;
|
2009-10-15 20:39:02 +08:00
|
|
|
tmp = load_reg(s, rd);
|
2007-04-30 10:02:17 +08:00
|
|
|
|
|
|
|
offset = (insn & 0xff) << ((insn >> 7) & 2);
|
|
|
|
if (insn & (1 << 24)) {
|
|
|
|
/* Pre indexed */
|
|
|
|
if (insn & (1 << 23))
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_addi_i32(tmp, tmp, offset);
|
2007-04-30 10:02:17 +08:00
|
|
|
else
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_addi_i32(tmp, tmp, -offset);
|
|
|
|
tcg_gen_mov_i32(dest, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
if (insn & (1 << 21))
|
2009-10-15 20:39:02 +08:00
|
|
|
store_reg(s, rd, tmp);
|
|
|
|
else
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
} else if (insn & (1 << 21)) {
|
|
|
|
/* Post indexed */
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_mov_i32(dest, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
if (insn & (1 << 23))
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_addi_i32(tmp, tmp, offset);
|
2007-04-30 10:02:17 +08:00
|
|
|
else
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_addi_i32(tmp, tmp, -offset);
|
|
|
|
store_reg(s, rd, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
} else if (!(insn & (1 << 23)))
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2013-05-23 19:59:55 +08:00
|
|
|
static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
|
2007-04-30 10:02:17 +08:00
|
|
|
{
|
|
|
|
int rd = (insn >> 0) & 0xf;
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp;
|
2007-04-30 10:02:17 +08:00
|
|
|
|
2009-10-15 20:39:02 +08:00
|
|
|
if (insn & (1 << 8)) {
|
|
|
|
if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
|
2007-04-30 10:02:17 +08:00
|
|
|
return 1;
|
2009-10-15 20:39:02 +08:00
|
|
|
} else {
|
|
|
|
tmp = iwmmxt_load_creg(rd);
|
|
|
|
}
|
|
|
|
} else {
|
2011-03-07 05:39:54 +08:00
|
|
|
tmp = tcg_temp_new_i32();
|
2009-10-15 20:39:02 +08:00
|
|
|
iwmmxt_load_reg(cpu_V0, rd);
|
2015-07-25 02:49:53 +08:00
|
|
|
tcg_gen_extrl_i64_i32(tmp, cpu_V0);
|
2009-10-15 20:39:02 +08:00
|
|
|
}
|
|
|
|
tcg_gen_andi_i32(tmp, tmp, mask);
|
|
|
|
tcg_gen_mov_i32(dest, tmp);
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-04-28 23:20:38 +08:00
|
|
|
/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
|
2007-04-30 10:02:17 +08:00
|
|
|
(ie. an undefined instruction). */
|
2014-10-29 03:24:03 +08:00
|
|
|
static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
|
2007-04-30 10:02:17 +08:00
|
|
|
{
|
|
|
|
int rd, wrd;
|
|
|
|
int rdhi, rdlo, rd0, rd1, i;
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 addr;
|
|
|
|
TCGv_i32 tmp, tmp2, tmp3;
|
2007-04-30 10:02:17 +08:00
|
|
|
|
|
|
|
if ((insn & 0x0e000e00) == 0x0c000000) {
|
|
|
|
if ((insn & 0x0fe00ff0) == 0x0c400000) {
|
|
|
|
wrd = insn & 0xf;
|
|
|
|
rdlo = (insn >> 12) & 0xf;
|
|
|
|
rdhi = (insn >> 16) & 0xf;
|
2018-08-24 20:17:47 +08:00
|
|
|
if (insn & ARM_CP_RW_BIT) { /* TMRRC */
|
2009-10-15 20:39:02 +08:00
|
|
|
iwmmxt_load_reg(cpu_V0, wrd);
|
2015-07-25 02:49:53 +08:00
|
|
|
tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
|
2019-08-09 04:26:16 +08:00
|
|
|
tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
|
2018-08-24 20:17:47 +08:00
|
|
|
} else { /* TMCRR */
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
|
|
|
|
iwmmxt_store_reg(cpu_V0, wrd);
|
2007-04-30 10:02:17 +08:00
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
wrd = (insn >> 12) & 0xf;
|
2011-03-07 05:39:54 +08:00
|
|
|
addr = tcg_temp_new_i32();
|
2009-10-15 20:39:02 +08:00
|
|
|
if (gen_iwmmxt_address(s, insn, addr)) {
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(addr);
|
2007-04-30 10:02:17 +08:00
|
|
|
return 1;
|
2009-10-15 20:39:02 +08:00
|
|
|
}
|
2007-04-30 10:02:17 +08:00
|
|
|
if (insn & ARM_CP_RW_BIT) {
|
2018-08-24 20:17:47 +08:00
|
|
|
if ((insn >> 28) == 0xf) { /* WLDRW wCx */
|
2011-03-07 05:39:54 +08:00
|
|
|
tmp = tcg_temp_new_i32();
|
2016-03-04 19:30:20 +08:00
|
|
|
gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
|
2009-10-15 20:39:02 +08:00
|
|
|
iwmmxt_store_creg(wrd, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
} else {
|
2008-03-31 11:49:05 +08:00
|
|
|
i = 1;
|
|
|
|
if (insn & (1 << 8)) {
|
2018-08-24 20:17:47 +08:00
|
|
|
if (insn & (1 << 22)) { /* WLDRD */
|
2016-03-04 19:30:20 +08:00
|
|
|
gen_aa32_ld64(s, cpu_M0, addr, get_mem_index(s));
|
2008-03-31 11:49:05 +08:00
|
|
|
i = 0;
|
2018-08-24 20:17:47 +08:00
|
|
|
} else { /* WLDRW wRd */
|
2013-05-23 19:59:57 +08:00
|
|
|
tmp = tcg_temp_new_i32();
|
2016-03-04 19:30:20 +08:00
|
|
|
gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
|
2008-03-31 11:49:05 +08:00
|
|
|
}
|
|
|
|
} else {
|
2013-05-23 19:59:57 +08:00
|
|
|
tmp = tcg_temp_new_i32();
|
2018-08-24 20:17:47 +08:00
|
|
|
if (insn & (1 << 22)) { /* WLDRH */
|
2016-03-04 19:30:20 +08:00
|
|
|
gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
|
2018-08-24 20:17:47 +08:00
|
|
|
} else { /* WLDRB */
|
2016-03-04 19:30:20 +08:00
|
|
|
gen_aa32_ld8u(s, tmp, addr, get_mem_index(s));
|
2008-03-31 11:49:05 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if (i) {
|
|
|
|
tcg_gen_extu_i32_i64(cpu_M0, tmp);
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2008-03-31 11:49:05 +08:00
|
|
|
}
|
2007-04-30 10:02:17 +08:00
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
}
|
|
|
|
} else {
|
2018-08-24 20:17:47 +08:00
|
|
|
if ((insn >> 28) == 0xf) { /* WSTRW wCx */
|
2009-10-15 20:39:02 +08:00
|
|
|
tmp = iwmmxt_load_creg(wrd);
|
2016-03-04 19:30:20 +08:00
|
|
|
gen_aa32_st32(s, tmp, addr, get_mem_index(s));
|
2007-04-30 10:02:17 +08:00
|
|
|
} else {
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(wrd);
|
2011-03-07 05:39:54 +08:00
|
|
|
tmp = tcg_temp_new_i32();
|
2008-03-31 11:49:05 +08:00
|
|
|
if (insn & (1 << 8)) {
|
2018-08-24 20:17:47 +08:00
|
|
|
if (insn & (1 << 22)) { /* WSTRD */
|
2016-03-04 19:30:20 +08:00
|
|
|
gen_aa32_st64(s, cpu_M0, addr, get_mem_index(s));
|
2018-08-24 20:17:47 +08:00
|
|
|
} else { /* WSTRW wRd */
|
2015-07-25 02:49:53 +08:00
|
|
|
tcg_gen_extrl_i64_i32(tmp, cpu_M0);
|
2016-03-04 19:30:20 +08:00
|
|
|
gen_aa32_st32(s, tmp, addr, get_mem_index(s));
|
2008-03-31 11:49:05 +08:00
|
|
|
}
|
|
|
|
} else {
|
2018-08-24 20:17:47 +08:00
|
|
|
if (insn & (1 << 22)) { /* WSTRH */
|
2015-07-25 02:49:53 +08:00
|
|
|
tcg_gen_extrl_i64_i32(tmp, cpu_M0);
|
2016-03-04 19:30:20 +08:00
|
|
|
gen_aa32_st16(s, tmp, addr, get_mem_index(s));
|
2018-08-24 20:17:47 +08:00
|
|
|
} else { /* WSTRB */
|
2015-07-25 02:49:53 +08:00
|
|
|
tcg_gen_extrl_i64_i32(tmp, cpu_M0);
|
2016-03-04 19:30:20 +08:00
|
|
|
gen_aa32_st8(s, tmp, addr, get_mem_index(s));
|
2008-03-31 11:49:05 +08:00
|
|
|
}
|
|
|
|
}
|
2007-04-30 10:02:17 +08:00
|
|
|
}
|
2013-05-23 19:59:57 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
}
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(addr);
|
2007-04-30 10:02:17 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((insn & 0x0f000000) != 0x0e000000)
|
|
|
|
return 1;
|
|
|
|
|
|
|
|
switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x000: /* WOR */
|
2007-04-30 10:02:17 +08:00
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 0) & 0xf;
|
|
|
|
rd1 = (insn >> 16) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
|
|
|
gen_op_iwmmxt_orq_M0_wRn(rd1);
|
|
|
|
gen_op_iwmmxt_setpsr_nz();
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x011: /* TMCR */
|
2007-04-30 10:02:17 +08:00
|
|
|
if (insn & 0xf)
|
|
|
|
return 1;
|
|
|
|
rd = (insn >> 12) & 0xf;
|
|
|
|
wrd = (insn >> 16) & 0xf;
|
|
|
|
switch (wrd) {
|
|
|
|
case ARM_IWMMXT_wCID:
|
|
|
|
case ARM_IWMMXT_wCASF:
|
|
|
|
break;
|
|
|
|
case ARM_IWMMXT_wCon:
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
/* Fall through. */
|
|
|
|
case ARM_IWMMXT_wCSSF:
|
2009-10-15 20:39:02 +08:00
|
|
|
tmp = iwmmxt_load_creg(wrd);
|
|
|
|
tmp2 = load_reg(s, rd);
|
2009-10-15 22:45:14 +08:00
|
|
|
tcg_gen_andc_i32(tmp, tmp, tmp2);
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp2);
|
2009-10-15 20:39:02 +08:00
|
|
|
iwmmxt_store_creg(wrd, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case ARM_IWMMXT_wCGR0:
|
|
|
|
case ARM_IWMMXT_wCGR1:
|
|
|
|
case ARM_IWMMXT_wCGR2:
|
|
|
|
case ARM_IWMMXT_wCGR3:
|
|
|
|
gen_op_iwmmxt_set_cup();
|
2009-10-15 20:39:02 +08:00
|
|
|
tmp = load_reg(s, rd);
|
|
|
|
iwmmxt_store_creg(wrd, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x100: /* WXOR */
|
2007-04-30 10:02:17 +08:00
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 0) & 0xf;
|
|
|
|
rd1 = (insn >> 16) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
|
|
|
gen_op_iwmmxt_xorq_M0_wRn(rd1);
|
|
|
|
gen_op_iwmmxt_setpsr_nz();
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x111: /* TMRC */
|
2007-04-30 10:02:17 +08:00
|
|
|
if (insn & 0xf)
|
|
|
|
return 1;
|
|
|
|
rd = (insn >> 12) & 0xf;
|
|
|
|
wrd = (insn >> 16) & 0xf;
|
2009-10-15 20:39:02 +08:00
|
|
|
tmp = iwmmxt_load_creg(wrd);
|
|
|
|
store_reg(s, rd, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x300: /* WANDN */
|
2007-04-30 10:02:17 +08:00
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 0) & 0xf;
|
|
|
|
rd1 = (insn >> 16) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
2008-03-31 11:49:05 +08:00
|
|
|
tcg_gen_neg_i64(cpu_M0, cpu_M0);
|
2007-04-30 10:02:17 +08:00
|
|
|
gen_op_iwmmxt_andq_M0_wRn(rd1);
|
|
|
|
gen_op_iwmmxt_setpsr_nz();
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x200: /* WAND */
|
2007-04-30 10:02:17 +08:00
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 0) & 0xf;
|
|
|
|
rd1 = (insn >> 16) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
|
|
|
gen_op_iwmmxt_andq_M0_wRn(rd1);
|
|
|
|
gen_op_iwmmxt_setpsr_nz();
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x810: case 0xa10: /* WMADD */
|
2007-04-30 10:02:17 +08:00
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 0) & 0xf;
|
|
|
|
rd1 = (insn >> 16) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_maddsq_M0_wRn(rd1);
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_madduq_M0_wRn(rd1);
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
|
2007-04-30 10:02:17 +08:00
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
rd1 = (insn >> 0) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
|
|
|
switch ((insn >> 22) & 3) {
|
|
|
|
case 0:
|
|
|
|
gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
gen_op_iwmmxt_unpackll_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
|
2007-04-30 10:02:17 +08:00
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
rd1 = (insn >> 0) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
|
|
|
switch ((insn >> 22) & 3) {
|
|
|
|
case 0:
|
|
|
|
gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
|
2007-04-30 10:02:17 +08:00
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
rd1 = (insn >> 0) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
|
|
|
if (insn & (1 << 22))
|
|
|
|
gen_op_iwmmxt_sadw_M0_wRn(rd1);
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_sadb_M0_wRn(rd1);
|
|
|
|
if (!(insn & (1 << 20)))
|
|
|
|
gen_op_iwmmxt_addl_M0_wRn(wrd);
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
|
2007-04-30 10:02:17 +08:00
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
rd1 = (insn >> 0) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
2008-03-31 11:49:05 +08:00
|
|
|
if (insn & (1 << 21)) {
|
|
|
|
if (insn & (1 << 20))
|
|
|
|
gen_op_iwmmxt_mulshw_M0_wRn(rd1);
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_mulslw_M0_wRn(rd1);
|
|
|
|
} else {
|
|
|
|
if (insn & (1 << 20))
|
|
|
|
gen_op_iwmmxt_muluhw_M0_wRn(rd1);
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_mululw_M0_wRn(rd1);
|
|
|
|
}
|
2007-04-30 10:02:17 +08:00
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
|
2007-04-30 10:02:17 +08:00
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
rd1 = (insn >> 0) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_macsw_M0_wRn(rd1);
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_macuw_M0_wRn(rd1);
|
|
|
|
if (!(insn & (1 << 20))) {
|
2008-03-31 11:49:05 +08:00
|
|
|
iwmmxt_load_reg(cpu_V1, wrd);
|
|
|
|
tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
|
2007-04-30 10:02:17 +08:00
|
|
|
}
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
|
2007-04-30 10:02:17 +08:00
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
rd1 = (insn >> 0) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
|
|
|
switch ((insn >> 22) & 3) {
|
|
|
|
case 0:
|
|
|
|
gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
|
2007-04-30 10:02:17 +08:00
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
rd1 = (insn >> 0) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
2008-03-31 11:49:05 +08:00
|
|
|
if (insn & (1 << 22)) {
|
|
|
|
if (insn & (1 << 20))
|
|
|
|
gen_op_iwmmxt_avgw1_M0_wRn(rd1);
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_avgw0_M0_wRn(rd1);
|
|
|
|
} else {
|
|
|
|
if (insn & (1 << 20))
|
|
|
|
gen_op_iwmmxt_avgb1_M0_wRn(rd1);
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_avgb0_M0_wRn(rd1);
|
|
|
|
}
|
2007-04-30 10:02:17 +08:00
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
|
2007-04-30 10:02:17 +08:00
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
rd1 = (insn >> 0) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
2009-10-15 20:39:02 +08:00
|
|
|
tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
|
|
|
|
tcg_gen_andi_i32(tmp, tmp, 7);
|
|
|
|
iwmmxt_load_reg(cpu_V1, rd1);
|
|
|
|
gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
|
2009-10-15 20:39:02 +08:00
|
|
|
if (((insn >> 6) & 3) == 3)
|
|
|
|
return 1;
|
2007-04-30 10:02:17 +08:00
|
|
|
rd = (insn >> 12) & 0xf;
|
|
|
|
wrd = (insn >> 16) & 0xf;
|
2009-10-15 20:39:02 +08:00
|
|
|
tmp = load_reg(s, rd);
|
2007-04-30 10:02:17 +08:00
|
|
|
gen_op_iwmmxt_movq_M0_wRn(wrd);
|
|
|
|
switch ((insn >> 6) & 3) {
|
|
|
|
case 0:
|
2009-10-15 20:39:02 +08:00
|
|
|
tmp2 = tcg_const_i32(0xff);
|
|
|
|
tmp3 = tcg_const_i32((insn & 7) << 3);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case 1:
|
2009-10-15 20:39:02 +08:00
|
|
|
tmp2 = tcg_const_i32(0xffff);
|
|
|
|
tmp3 = tcg_const_i32((insn & 3) << 4);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case 2:
|
2009-10-15 20:39:02 +08:00
|
|
|
tmp2 = tcg_const_i32(0xffffffff);
|
|
|
|
tmp3 = tcg_const_i32((insn & 1) << 5);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
2009-10-15 20:39:02 +08:00
|
|
|
default:
|
2017-11-02 19:47:37 +08:00
|
|
|
tmp2 = NULL;
|
|
|
|
tmp3 = NULL;
|
2007-04-30 10:02:17 +08:00
|
|
|
}
|
2009-10-15 20:39:02 +08:00
|
|
|
gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
|
2013-05-23 19:59:55 +08:00
|
|
|
tcg_temp_free_i32(tmp3);
|
|
|
|
tcg_temp_free_i32(tmp2);
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
|
2007-04-30 10:02:17 +08:00
|
|
|
rd = (insn >> 12) & 0xf;
|
|
|
|
wrd = (insn >> 16) & 0xf;
|
2009-10-15 20:39:02 +08:00
|
|
|
if (rd == 15 || ((insn >> 22) & 3) == 3)
|
2007-04-30 10:02:17 +08:00
|
|
|
return 1;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(wrd);
|
2011-03-07 05:39:54 +08:00
|
|
|
tmp = tcg_temp_new_i32();
|
2007-04-30 10:02:17 +08:00
|
|
|
switch ((insn >> 22) & 3) {
|
|
|
|
case 0:
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
|
2015-07-25 02:49:53 +08:00
|
|
|
tcg_gen_extrl_i64_i32(tmp, cpu_M0);
|
2009-10-15 20:39:02 +08:00
|
|
|
if (insn & 8) {
|
|
|
|
tcg_gen_ext8s_i32(tmp, tmp);
|
|
|
|
} else {
|
|
|
|
tcg_gen_andi_i32(tmp, tmp, 0xff);
|
2007-04-30 10:02:17 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 1:
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
|
2015-07-25 02:49:53 +08:00
|
|
|
tcg_gen_extrl_i64_i32(tmp, cpu_M0);
|
2009-10-15 20:39:02 +08:00
|
|
|
if (insn & 8) {
|
|
|
|
tcg_gen_ext16s_i32(tmp, tmp);
|
|
|
|
} else {
|
|
|
|
tcg_gen_andi_i32(tmp, tmp, 0xffff);
|
2007-04-30 10:02:17 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 2:
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
|
2015-07-25 02:49:53 +08:00
|
|
|
tcg_gen_extrl_i64_i32(tmp, cpu_M0);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
}
|
2009-10-15 20:39:02 +08:00
|
|
|
store_reg(s, rd, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
|
2009-10-15 20:39:02 +08:00
|
|
|
if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
|
2007-04-30 10:02:17 +08:00
|
|
|
return 1;
|
2009-10-15 20:39:02 +08:00
|
|
|
tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
|
2007-04-30 10:02:17 +08:00
|
|
|
switch ((insn >> 22) & 3) {
|
|
|
|
case 0:
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case 1:
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case 2:
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
}
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_shli_i32(tmp, tmp, 28);
|
|
|
|
gen_set_nzcv(tmp);
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
|
2009-10-15 20:39:02 +08:00
|
|
|
if (((insn >> 6) & 3) == 3)
|
|
|
|
return 1;
|
2007-04-30 10:02:17 +08:00
|
|
|
rd = (insn >> 12) & 0xf;
|
|
|
|
wrd = (insn >> 16) & 0xf;
|
2009-10-15 20:39:02 +08:00
|
|
|
tmp = load_reg(s, rd);
|
2007-04-30 10:02:17 +08:00
|
|
|
switch ((insn >> 6) & 3) {
|
|
|
|
case 0:
|
2009-10-15 20:39:02 +08:00
|
|
|
gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case 1:
|
2009-10-15 20:39:02 +08:00
|
|
|
gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case 2:
|
2009-10-15 20:39:02 +08:00
|
|
|
gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
}
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
|
2009-10-15 20:39:02 +08:00
|
|
|
if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
|
2007-04-30 10:02:17 +08:00
|
|
|
return 1;
|
2009-10-15 20:39:02 +08:00
|
|
|
tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
|
2011-03-07 05:39:54 +08:00
|
|
|
tmp2 = tcg_temp_new_i32();
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_mov_i32(tmp2, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
switch ((insn >> 22) & 3) {
|
|
|
|
case 0:
|
|
|
|
for (i = 0; i < 7; i ++) {
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_shli_i32(tmp2, tmp2, 4);
|
|
|
|
tcg_gen_and_i32(tmp, tmp, tmp2);
|
2007-04-30 10:02:17 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
for (i = 0; i < 3; i ++) {
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_shli_i32(tmp2, tmp2, 8);
|
|
|
|
tcg_gen_and_i32(tmp, tmp, tmp2);
|
2007-04-30 10:02:17 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 2:
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_shli_i32(tmp2, tmp2, 16);
|
|
|
|
tcg_gen_and_i32(tmp, tmp, tmp2);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
}
|
2009-10-15 20:39:02 +08:00
|
|
|
gen_set_nzcv(tmp);
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp2);
|
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
|
2007-04-30 10:02:17 +08:00
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
|
|
|
switch ((insn >> 22) & 3) {
|
|
|
|
case 0:
|
2008-03-31 11:49:05 +08:00
|
|
|
gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case 1:
|
2008-03-31 11:49:05 +08:00
|
|
|
gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case 2:
|
2008-03-31 11:49:05 +08:00
|
|
|
gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
|
2009-10-15 20:39:02 +08:00
|
|
|
if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
|
2007-04-30 10:02:17 +08:00
|
|
|
return 1;
|
2009-10-15 20:39:02 +08:00
|
|
|
tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
|
2011-03-07 05:39:54 +08:00
|
|
|
tmp2 = tcg_temp_new_i32();
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_mov_i32(tmp2, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
switch ((insn >> 22) & 3) {
|
|
|
|
case 0:
|
|
|
|
for (i = 0; i < 7; i ++) {
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_shli_i32(tmp2, tmp2, 4);
|
|
|
|
tcg_gen_or_i32(tmp, tmp, tmp2);
|
2007-04-30 10:02:17 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
for (i = 0; i < 3; i ++) {
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_shli_i32(tmp2, tmp2, 8);
|
|
|
|
tcg_gen_or_i32(tmp, tmp, tmp2);
|
2007-04-30 10:02:17 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 2:
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_shli_i32(tmp2, tmp2, 16);
|
|
|
|
tcg_gen_or_i32(tmp, tmp, tmp2);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
}
|
2009-10-15 20:39:02 +08:00
|
|
|
gen_set_nzcv(tmp);
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp2);
|
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
|
2007-04-30 10:02:17 +08:00
|
|
|
rd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
2009-10-15 20:39:02 +08:00
|
|
|
if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
|
2007-04-30 10:02:17 +08:00
|
|
|
return 1;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
2011-03-07 05:39:54 +08:00
|
|
|
tmp = tcg_temp_new_i32();
|
2007-04-30 10:02:17 +08:00
|
|
|
switch ((insn >> 22) & 3) {
|
|
|
|
case 0:
|
2009-10-15 20:39:02 +08:00
|
|
|
gen_helper_iwmmxt_msbb(tmp, cpu_M0);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case 1:
|
2009-10-15 20:39:02 +08:00
|
|
|
gen_helper_iwmmxt_msbw(tmp, cpu_M0);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case 2:
|
2009-10-15 20:39:02 +08:00
|
|
|
gen_helper_iwmmxt_msbl(tmp, cpu_M0);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
}
|
2009-10-15 20:39:02 +08:00
|
|
|
store_reg(s, rd, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
|
2007-04-30 10:02:17 +08:00
|
|
|
case 0x906: case 0xb06: case 0xd06: case 0xf06:
|
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
rd1 = (insn >> 0) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
|
|
|
switch ((insn >> 22) & 3) {
|
|
|
|
case 0:
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
|
2007-04-30 10:02:17 +08:00
|
|
|
case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
|
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
|
|
|
switch ((insn >> 22) & 3) {
|
|
|
|
case 0:
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_unpacklsb_M0();
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_unpacklub_M0();
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_unpacklsw_M0();
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_unpackluw_M0();
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_unpacklsl_M0();
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_unpacklul_M0();
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
|
2007-04-30 10:02:17 +08:00
|
|
|
case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
|
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
|
|
|
switch ((insn >> 22) & 3) {
|
|
|
|
case 0:
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_unpackhsb_M0();
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_unpackhub_M0();
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_unpackhsw_M0();
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_unpackhuw_M0();
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_unpackhsl_M0();
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_unpackhul_M0();
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
|
2007-04-30 10:02:17 +08:00
|
|
|
case 0x214: case 0x614: case 0xa14: case 0xe14:
|
2009-10-15 20:39:02 +08:00
|
|
|
if (((insn >> 22) & 3) == 0)
|
|
|
|
return 1;
|
2007-04-30 10:02:17 +08:00
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
2011-03-07 05:39:54 +08:00
|
|
|
tmp = tcg_temp_new_i32();
|
2009-10-15 20:39:02 +08:00
|
|
|
if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
return 1;
|
2009-10-15 20:39:02 +08:00
|
|
|
}
|
2007-04-30 10:02:17 +08:00
|
|
|
switch ((insn >> 22) & 3) {
|
|
|
|
case 1:
|
2011-05-25 21:22:31 +08:00
|
|
|
gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case 2:
|
2011-05-25 21:22:31 +08:00
|
|
|
gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case 3:
|
2011-05-25 21:22:31 +08:00
|
|
|
gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
}
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
|
2007-04-30 10:02:17 +08:00
|
|
|
case 0x014: case 0x414: case 0x814: case 0xc14:
|
2009-10-15 20:39:02 +08:00
|
|
|
if (((insn >> 22) & 3) == 0)
|
|
|
|
return 1;
|
2007-04-30 10:02:17 +08:00
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
2011-03-07 05:39:54 +08:00
|
|
|
tmp = tcg_temp_new_i32();
|
2009-10-15 20:39:02 +08:00
|
|
|
if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
return 1;
|
2009-10-15 20:39:02 +08:00
|
|
|
}
|
2007-04-30 10:02:17 +08:00
|
|
|
switch ((insn >> 22) & 3) {
|
|
|
|
case 1:
|
2011-05-25 21:22:31 +08:00
|
|
|
gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case 2:
|
2011-05-25 21:22:31 +08:00
|
|
|
gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case 3:
|
2011-05-25 21:22:31 +08:00
|
|
|
gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
}
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
|
2007-04-30 10:02:17 +08:00
|
|
|
case 0x114: case 0x514: case 0x914: case 0xd14:
|
2009-10-15 20:39:02 +08:00
|
|
|
if (((insn >> 22) & 3) == 0)
|
|
|
|
return 1;
|
2007-04-30 10:02:17 +08:00
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
2011-03-07 05:39:54 +08:00
|
|
|
tmp = tcg_temp_new_i32();
|
2009-10-15 20:39:02 +08:00
|
|
|
if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
return 1;
|
2009-10-15 20:39:02 +08:00
|
|
|
}
|
2007-04-30 10:02:17 +08:00
|
|
|
switch ((insn >> 22) & 3) {
|
|
|
|
case 1:
|
2011-05-25 21:22:31 +08:00
|
|
|
gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case 2:
|
2011-05-25 21:22:31 +08:00
|
|
|
gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case 3:
|
2011-05-25 21:22:31 +08:00
|
|
|
gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
}
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
|
2007-04-30 10:02:17 +08:00
|
|
|
case 0x314: case 0x714: case 0xb14: case 0xf14:
|
2009-10-15 20:39:02 +08:00
|
|
|
if (((insn >> 22) & 3) == 0)
|
|
|
|
return 1;
|
2007-04-30 10:02:17 +08:00
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
2011-03-07 05:39:54 +08:00
|
|
|
tmp = tcg_temp_new_i32();
|
2007-04-30 10:02:17 +08:00
|
|
|
switch ((insn >> 22) & 3) {
|
|
|
|
case 1:
|
2009-10-15 20:39:02 +08:00
|
|
|
if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
return 1;
|
2009-10-15 20:39:02 +08:00
|
|
|
}
|
2011-05-25 21:22:31 +08:00
|
|
|
gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case 2:
|
2009-10-15 20:39:02 +08:00
|
|
|
if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
return 1;
|
2009-10-15 20:39:02 +08:00
|
|
|
}
|
2011-05-25 21:22:31 +08:00
|
|
|
gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
case 3:
|
2009-10-15 20:39:02 +08:00
|
|
|
if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
return 1;
|
2009-10-15 20:39:02 +08:00
|
|
|
}
|
2011-05-25 21:22:31 +08:00
|
|
|
gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
}
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
|
2007-04-30 10:02:17 +08:00
|
|
|
case 0x916: case 0xb16: case 0xd16: case 0xf16:
|
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
rd1 = (insn >> 0) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
|
|
|
switch ((insn >> 22) & 3) {
|
|
|
|
case 0:
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_minsb_M0_wRn(rd1);
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_minub_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_minsw_M0_wRn(rd1);
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_minuw_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_minsl_M0_wRn(rd1);
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_minul_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
|
2007-04-30 10:02:17 +08:00
|
|
|
case 0x816: case 0xa16: case 0xc16: case 0xe16:
|
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
rd1 = (insn >> 0) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
|
|
|
switch ((insn >> 22) & 3) {
|
|
|
|
case 0:
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_maxsb_M0_wRn(rd1);
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_maxub_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_maxsw_M0_wRn(rd1);
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_maxuw_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_maxsl_M0_wRn(rd1);
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_maxul_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
|
2007-04-30 10:02:17 +08:00
|
|
|
case 0x402: case 0x502: case 0x602: case 0x702:
|
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
rd1 = (insn >> 0) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
2009-10-15 20:39:02 +08:00
|
|
|
tmp = tcg_const_i32((insn >> 20) & 3);
|
|
|
|
iwmmxt_load_reg(cpu_V1, rd1);
|
|
|
|
gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
|
2013-05-23 19:59:55 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
|
2007-04-30 10:02:17 +08:00
|
|
|
case 0x41a: case 0x51a: case 0x61a: case 0x71a:
|
|
|
|
case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
|
|
|
|
case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
|
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
rd1 = (insn >> 0) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
|
|
|
switch ((insn >> 20) & 0xf) {
|
|
|
|
case 0x0:
|
|
|
|
gen_op_iwmmxt_subnb_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 0x1:
|
|
|
|
gen_op_iwmmxt_subub_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 0x3:
|
|
|
|
gen_op_iwmmxt_subsb_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 0x4:
|
|
|
|
gen_op_iwmmxt_subnw_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 0x5:
|
|
|
|
gen_op_iwmmxt_subuw_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 0x7:
|
|
|
|
gen_op_iwmmxt_subsw_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 0x8:
|
|
|
|
gen_op_iwmmxt_subnl_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 0x9:
|
|
|
|
gen_op_iwmmxt_subul_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 0xb:
|
|
|
|
gen_op_iwmmxt_subsl_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
|
2007-04-30 10:02:17 +08:00
|
|
|
case 0x41e: case 0x51e: case 0x61e: case 0x71e:
|
|
|
|
case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
|
|
|
|
case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
|
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
2009-10-15 20:39:02 +08:00
|
|
|
tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
|
2011-05-25 21:22:31 +08:00
|
|
|
gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
|
2013-05-23 19:59:55 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
|
2007-04-30 10:02:17 +08:00
|
|
|
case 0x418: case 0x518: case 0x618: case 0x718:
|
|
|
|
case 0x818: case 0x918: case 0xa18: case 0xb18:
|
|
|
|
case 0xc18: case 0xd18: case 0xe18: case 0xf18:
|
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
rd1 = (insn >> 0) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
|
|
|
switch ((insn >> 20) & 0xf) {
|
|
|
|
case 0x0:
|
|
|
|
gen_op_iwmmxt_addnb_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 0x1:
|
|
|
|
gen_op_iwmmxt_addub_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 0x3:
|
|
|
|
gen_op_iwmmxt_addsb_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 0x4:
|
|
|
|
gen_op_iwmmxt_addnw_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 0x5:
|
|
|
|
gen_op_iwmmxt_adduw_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 0x7:
|
|
|
|
gen_op_iwmmxt_addsw_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 0x8:
|
|
|
|
gen_op_iwmmxt_addnl_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 0x9:
|
|
|
|
gen_op_iwmmxt_addul_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 0xb:
|
|
|
|
gen_op_iwmmxt_addsl_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
|
2007-04-30 10:02:17 +08:00
|
|
|
case 0x408: case 0x508: case 0x608: case 0x708:
|
|
|
|
case 0x808: case 0x908: case 0xa08: case 0xb08:
|
|
|
|
case 0xc08: case 0xd08: case 0xe08: case 0xf08:
|
2009-10-15 20:39:02 +08:00
|
|
|
if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
|
|
|
|
return 1;
|
2007-04-30 10:02:17 +08:00
|
|
|
wrd = (insn >> 12) & 0xf;
|
|
|
|
rd0 = (insn >> 16) & 0xf;
|
|
|
|
rd1 = (insn >> 0) & 0xf;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(rd0);
|
|
|
|
switch ((insn >> 22) & 3) {
|
|
|
|
case 1:
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_packsw_M0_wRn(rd1);
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_packuw_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_packsl_M0_wRn(rd1);
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_packul_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
if (insn & (1 << 21))
|
|
|
|
gen_op_iwmmxt_packsq_M0_wRn(rd1);
|
|
|
|
else
|
|
|
|
gen_op_iwmmxt_packuq_M0_wRn(rd1);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
gen_op_iwmmxt_set_cup();
|
|
|
|
break;
|
|
|
|
case 0x201: case 0x203: case 0x205: case 0x207:
|
|
|
|
case 0x209: case 0x20b: case 0x20d: case 0x20f:
|
|
|
|
case 0x211: case 0x213: case 0x215: case 0x217:
|
|
|
|
case 0x219: case 0x21b: case 0x21d: case 0x21f:
|
|
|
|
wrd = (insn >> 5) & 0xf;
|
|
|
|
rd0 = (insn >> 12) & 0xf;
|
|
|
|
rd1 = (insn >> 0) & 0xf;
|
|
|
|
if (rd0 == 0xf || rd1 == 0xf)
|
|
|
|
return 1;
|
|
|
|
gen_op_iwmmxt_movq_M0_wRn(wrd);
|
2009-10-15 20:39:02 +08:00
|
|
|
tmp = load_reg(s, rd0);
|
|
|
|
tmp2 = load_reg(s, rd1);
|
2007-04-30 10:02:17 +08:00
|
|
|
switch ((insn >> 16) & 0xf) {
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x0: /* TMIA */
|
2009-10-15 20:39:02 +08:00
|
|
|
gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x8: /* TMIAPH */
|
2009-10-15 20:39:02 +08:00
|
|
|
gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
|
2007-04-30 10:02:17 +08:00
|
|
|
if (insn & (1 << 16))
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_shri_i32(tmp, tmp, 16);
|
2007-04-30 10:02:17 +08:00
|
|
|
if (insn & (1 << 17))
|
2009-10-15 20:39:02 +08:00
|
|
|
tcg_gen_shri_i32(tmp2, tmp2, 16);
|
|
|
|
gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
default:
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp2);
|
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
return 1;
|
|
|
|
}
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp2);
|
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
gen_op_iwmmxt_movq_wRn_M0(wrd);
|
|
|
|
gen_op_iwmmxt_set_mup();
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-04-28 23:20:38 +08:00
|
|
|
/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
|
2007-04-30 10:02:17 +08:00
|
|
|
(ie. an undefined instruction). */
|
2014-10-29 03:24:03 +08:00
|
|
|
static int disas_dsp_insn(DisasContext *s, uint32_t insn)
|
2007-04-30 10:02:17 +08:00
|
|
|
{
|
|
|
|
int acc, rd0, rd1, rdhi, rdlo;
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp, tmp2;
|
2007-04-30 10:02:17 +08:00
|
|
|
|
|
|
|
if ((insn & 0x0ff00f10) == 0x0e200010) {
|
|
|
|
/* Multiply with Internal Accumulate Format */
|
|
|
|
rd0 = (insn >> 12) & 0xf;
|
|
|
|
rd1 = insn & 0xf;
|
|
|
|
acc = (insn >> 5) & 7;
|
|
|
|
|
|
|
|
if (acc != 0)
|
|
|
|
return 1;
|
|
|
|
|
2009-10-15 20:38:54 +08:00
|
|
|
tmp = load_reg(s, rd0);
|
|
|
|
tmp2 = load_reg(s, rd1);
|
2007-04-30 10:02:17 +08:00
|
|
|
switch ((insn >> 16) & 0xf) {
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x0: /* MIA */
|
2009-10-15 20:38:54 +08:00
|
|
|
gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0x8: /* MIAPH */
|
2009-10-15 20:38:54 +08:00
|
|
|
gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
2018-08-24 20:17:47 +08:00
|
|
|
case 0xc: /* MIABB */
|
|
|
|
case 0xd: /* MIABT */
|
|
|
|
case 0xe: /* MIATB */
|
|
|
|
case 0xf: /* MIATT */
|
2007-04-30 10:02:17 +08:00
|
|
|
if (insn & (1 << 16))
|
2009-10-15 20:38:54 +08:00
|
|
|
tcg_gen_shri_i32(tmp, tmp, 16);
|
2007-04-30 10:02:17 +08:00
|
|
|
if (insn & (1 << 17))
|
2009-10-15 20:38:54 +08:00
|
|
|
tcg_gen_shri_i32(tmp2, tmp2, 16);
|
|
|
|
gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
|
2007-04-30 10:02:17 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return 1;
|
|
|
|
}
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmp2);
|
|
|
|
tcg_temp_free_i32(tmp);
|
2007-04-30 10:02:17 +08:00
|
|
|
|
|
|
|
gen_op_iwmmxt_movq_wRn_M0(acc);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((insn & 0x0fe00ff8) == 0x0c400000) {
|
|
|
|
/* Internal Accumulator Access Format */
|
|
|
|
rdhi = (insn >> 16) & 0xf;
|
|
|
|
rdlo = (insn >> 12) & 0xf;
|
|
|
|
acc = insn & 7;
|
|
|
|
|
|
|
|
if (acc != 0)
|
|
|
|
return 1;
|
|
|
|
|
2018-08-24 20:17:47 +08:00
|
|
|
if (insn & ARM_CP_RW_BIT) { /* MRA */
|
2009-10-15 20:38:54 +08:00
|
|
|
iwmmxt_load_reg(cpu_V0, acc);
|
2015-07-25 02:49:53 +08:00
|
|
|
tcg_gen_extrl_i64_i32(cpu_R[rdlo], cpu_V0);
|
2019-08-09 04:26:16 +08:00
|
|
|
tcg_gen_extrh_i64_i32(cpu_R[rdhi], cpu_V0);
|
2009-10-15 20:38:54 +08:00
|
|
|
tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
|
2018-08-24 20:17:47 +08:00
|
|
|
} else { /* MAR */
|
2009-10-15 20:38:54 +08:00
|
|
|
tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
|
|
|
|
iwmmxt_store_reg(cpu_V0, acc);
|
2007-04-30 10:02:17 +08:00
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
2016-04-09 06:00:23 +08:00
|
|
|
static inline bool use_goto_tb(DisasContext *s, target_ulong dest)
|
2005-10-31 05:39:19 +08:00
|
|
|
{
|
2016-04-09 06:00:23 +08:00
|
|
|
#ifndef CONFIG_USER_ONLY
|
2017-07-14 17:01:59 +08:00
|
|
|
return (s->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) ||
|
2019-08-15 16:46:44 +08:00
|
|
|
((s->base.pc_next - 1) & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
|
2016-04-09 06:00:23 +08:00
|
|
|
#else
|
|
|
|
return true;
|
|
|
|
#endif
|
|
|
|
}
|
2005-11-20 18:32:05 +08:00
|
|
|
|
2017-04-27 11:29:20 +08:00
|
|
|
static void gen_goto_ptr(void)
|
|
|
|
{
|
2017-07-12 05:06:48 +08:00
|
|
|
tcg_gen_lookup_and_goto_ptr();
|
2017-04-27 11:29:20 +08:00
|
|
|
}
|
|
|
|
|
2017-07-17 20:36:07 +08:00
|
|
|
/* This will end the TB but doesn't guarantee we'll return to
|
|
|
|
* cpu_loop_exec. Any live exit_requests will be processed as we
|
|
|
|
* enter the next TB.
|
|
|
|
*/
|
2017-04-27 11:29:20 +08:00
|
|
|
static void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
|
2016-04-09 06:00:23 +08:00
|
|
|
{
|
|
|
|
if (use_goto_tb(s, dest)) {
|
2008-02-01 18:50:11 +08:00
|
|
|
tcg_gen_goto_tb(n);
|
2013-09-04 03:12:06 +08:00
|
|
|
gen_set_pc_im(s, dest);
|
2018-05-31 09:06:23 +08:00
|
|
|
tcg_gen_exit_tb(s->base.tb, n);
|
2005-11-20 18:32:05 +08:00
|
|
|
} else {
|
2013-09-04 03:12:06 +08:00
|
|
|
gen_set_pc_im(s, dest);
|
2017-04-27 11:29:20 +08:00
|
|
|
gen_goto_ptr();
|
2005-11-20 18:32:05 +08:00
|
|
|
}
|
2017-07-14 17:01:59 +08:00
|
|
|
s->base.is_jmp = DISAS_NORETURN;
|
2005-10-31 05:39:19 +08:00
|
|
|
}
|
|
|
|
|
2020-10-19 23:12:58 +08:00
|
|
|
/* Jump, specifying which TB number to use if we gen_goto_tb() */
|
|
|
|
static inline void gen_jmp_tb(DisasContext *s, uint32_t dest, int tbno)
|
2005-04-24 02:27:52 +08:00
|
|
|
{
|
2017-04-21 00:32:30 +08:00
|
|
|
if (unlikely(is_singlestepping(s))) {
|
2005-04-24 02:27:52 +08:00
|
|
|
/* An indirect jump so that we still trigger the debug exception. */
|
2019-09-05 03:30:59 +08:00
|
|
|
gen_set_pc_im(s, dest);
|
|
|
|
s->base.is_jmp = DISAS_JUMP;
|
2005-04-24 02:27:52 +08:00
|
|
|
} else {
|
2020-10-19 23:12:58 +08:00
|
|
|
gen_goto_tb(s, tbno, dest);
|
2005-04-24 02:27:52 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-19 23:12:58 +08:00
|
|
|
static inline void gen_jmp(DisasContext *s, uint32_t dest)
|
|
|
|
{
|
|
|
|
gen_jmp_tb(s, dest, 0);
|
|
|
|
}
|
|
|
|
|
2013-05-23 19:59:55 +08:00
|
|
|
static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
|
2005-11-26 18:38:39 +08:00
|
|
|
{
|
2005-12-05 02:56:28 +08:00
|
|
|
if (x)
|
2008-03-31 11:46:50 +08:00
|
|
|
tcg_gen_sari_i32(t0, t0, 16);
|
2005-11-26 18:38:39 +08:00
|
|
|
else
|
2008-03-31 11:46:50 +08:00
|
|
|
gen_sxth(t0);
|
2005-12-05 02:56:28 +08:00
|
|
|
if (y)
|
2008-03-31 11:46:50 +08:00
|
|
|
tcg_gen_sari_i32(t1, t1, 16);
|
2005-11-26 18:38:39 +08:00
|
|
|
else
|
2008-03-31 11:46:50 +08:00
|
|
|
gen_sxth(t1);
|
|
|
|
tcg_gen_mul_i32(t0, t0, t1);
|
2005-11-26 18:38:39 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the mask of PSR bits set by a MSR instruction. */
|
2014-10-29 03:24:03 +08:00
|
|
|
static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
|
|
|
|
{
|
2020-02-08 20:58:01 +08:00
|
|
|
uint32_t mask = 0;
|
2005-11-26 18:38:39 +08:00
|
|
|
|
2020-02-08 20:58:01 +08:00
|
|
|
if (flags & (1 << 0)) {
|
2005-11-26 18:38:39 +08:00
|
|
|
mask |= 0xff;
|
2014-10-29 03:24:01 +08:00
|
|
|
}
|
2020-02-08 20:58:01 +08:00
|
|
|
if (flags & (1 << 1)) {
|
|
|
|
mask |= 0xff00;
|
2014-10-29 03:24:01 +08:00
|
|
|
}
|
2020-02-08 20:58:01 +08:00
|
|
|
if (flags & (1 << 2)) {
|
|
|
|
mask |= 0xff0000;
|
2014-10-29 03:24:01 +08:00
|
|
|
}
|
2020-02-08 20:58:01 +08:00
|
|
|
if (flags & (1 << 3)) {
|
|
|
|
mask |= 0xff000000;
|
2014-10-29 03:24:01 +08:00
|
|
|
}
|
2020-02-08 20:58:01 +08:00
|
|
|
|
|
|
|
/* Mask out undefined and reserved bits. */
|
|
|
|
mask &= aarch32_cpsr_valid_mask(s->features, s->isar);
|
|
|
|
|
|
|
|
/* Mask out execution state. */
|
2014-08-20 01:56:26 +08:00
|
|
|
if (!spsr) {
|
2020-02-08 20:58:01 +08:00
|
|
|
mask &= ~CPSR_EXEC;
|
2014-08-20 01:56:26 +08:00
|
|
|
}
|
2020-02-08 20:58:01 +08:00
|
|
|
|
2005-11-26 18:38:39 +08:00
|
|
|
/* Mask out privileged bits. */
|
2020-02-08 20:58:01 +08:00
|
|
|
if (IS_USER(s)) {
|
2007-11-11 08:04:49 +08:00
|
|
|
mask &= CPSR_USER;
|
2020-02-08 20:58:01 +08:00
|
|
|
}
|
2005-11-26 18:38:39 +08:00
|
|
|
return mask;
|
|
|
|
}
|
|
|
|
|
2009-10-15 18:43:04 +08:00
|
|
|
/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
|
2013-05-23 19:59:55 +08:00
|
|
|
static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
|
2005-11-26 18:38:39 +08:00
|
|
|
{
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp;
|
2005-11-26 18:38:39 +08:00
|
|
|
if (spsr) {
|
|
|
|
/* ??? This is also undefined in system mode. */
|
|
|
|
if (IS_USER(s))
|
|
|
|
return 1;
|
2008-03-31 11:46:50 +08:00
|
|
|
|
|
|
|
tmp = load_cpu_field(spsr);
|
|
|
|
tcg_gen_andi_i32(tmp, tmp, ~mask);
|
2009-10-15 18:43:04 +08:00
|
|
|
tcg_gen_andi_i32(t0, t0, mask);
|
|
|
|
tcg_gen_or_i32(tmp, tmp, t0);
|
2008-03-31 11:46:50 +08:00
|
|
|
store_cpu_field(tmp, spsr);
|
2005-11-26 18:38:39 +08:00
|
|
|
} else {
|
2009-10-15 18:43:04 +08:00
|
|
|
gen_set_cpsr(t0, mask);
|
2005-11-26 18:38:39 +08:00
|
|
|
}
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(t0);
|
2005-11-26 18:38:39 +08:00
|
|
|
gen_lookup_tb(s);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-10-15 18:43:04 +08:00
|
|
|
/* Returns nonzero if access to the PSR is not permitted. */
|
|
|
|
static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
|
|
|
|
{
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp;
|
2011-03-07 05:39:54 +08:00
|
|
|
tmp = tcg_temp_new_i32();
|
2009-10-15 18:43:04 +08:00
|
|
|
tcg_gen_movi_i32(tmp, val);
|
|
|
|
return gen_set_psr(s, mask, spsr, tmp);
|
|
|
|
}
|
|
|
|
|
2016-03-17 01:05:58 +08:00
|
|
|
static bool msr_banked_access_decode(DisasContext *s, int r, int sysm, int rn,
|
|
|
|
int *tgtmode, int *regno)
|
|
|
|
{
|
|
|
|
/* Decode the r and sysm fields of MSR/MRS banked accesses into
|
|
|
|
* the target mode and register number, and identify the various
|
|
|
|
* unpredictable cases.
|
|
|
|
* MSR (banked) and MRS (banked) are CONSTRAINED UNPREDICTABLE if:
|
|
|
|
* + executed in user mode
|
|
|
|
* + using R15 as the src/dest register
|
|
|
|
* + accessing an unimplemented register
|
|
|
|
* + accessing a register that's inaccessible at current PL/security state*
|
|
|
|
* + accessing a register that you could access with a different insn
|
|
|
|
* We choose to UNDEF in all these cases.
|
|
|
|
* Since we don't know which of the various AArch32 modes we are in
|
|
|
|
* we have to defer some checks to runtime.
|
|
|
|
* Accesses to Monitor mode registers from Secure EL1 (which implies
|
|
|
|
* that EL3 is AArch64) must trap to EL3.
|
|
|
|
*
|
|
|
|
* If the access checks fail this function will emit code to take
|
|
|
|
* an exception and return false. Otherwise it will return true,
|
|
|
|
* and set *tgtmode and *regno appropriately.
|
|
|
|
*/
|
|
|
|
int exc_target = default_exception_el(s);
|
|
|
|
|
|
|
|
/* These instructions are present only in ARMv8, or in ARMv7 with the
|
|
|
|
* Virtualization Extensions.
|
|
|
|
*/
|
|
|
|
if (!arm_dc_feature(s, ARM_FEATURE_V8) &&
|
|
|
|
!arm_dc_feature(s, ARM_FEATURE_EL2)) {
|
|
|
|
goto undef;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (IS_USER(s) || rn == 15) {
|
|
|
|
goto undef;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The table in the v8 ARM ARM section F5.2.3 describes the encoding
|
|
|
|
* of registers into (r, sysm).
|
|
|
|
*/
|
|
|
|
if (r) {
|
|
|
|
/* SPSRs for other modes */
|
|
|
|
switch (sysm) {
|
|
|
|
case 0xe: /* SPSR_fiq */
|
|
|
|
*tgtmode = ARM_CPU_MODE_FIQ;
|
|
|
|
break;
|
|
|
|
case 0x10: /* SPSR_irq */
|
|
|
|
*tgtmode = ARM_CPU_MODE_IRQ;
|
|
|
|
break;
|
|
|
|
case 0x12: /* SPSR_svc */
|
|
|
|
*tgtmode = ARM_CPU_MODE_SVC;
|
|
|
|
break;
|
|
|
|
case 0x14: /* SPSR_abt */
|
|
|
|
*tgtmode = ARM_CPU_MODE_ABT;
|
|
|
|
break;
|
|
|
|
case 0x16: /* SPSR_und */
|
|
|
|
*tgtmode = ARM_CPU_MODE_UND;
|
|
|
|
break;
|
|
|
|
case 0x1c: /* SPSR_mon */
|
|
|
|
*tgtmode = ARM_CPU_MODE_MON;
|
|
|
|
break;
|
|
|
|
case 0x1e: /* SPSR_hyp */
|
|
|
|
*tgtmode = ARM_CPU_MODE_HYP;
|
|
|
|
break;
|
|
|
|
default: /* unallocated */
|
|
|
|
goto undef;
|
|
|
|
}
|
|
|
|
/* We arbitrarily assign SPSR a register number of 16. */
|
|
|
|
*regno = 16;
|
|
|
|
} else {
|
|
|
|
/* general purpose registers for other modes */
|
|
|
|
switch (sysm) {
|
|
|
|
case 0x0 ... 0x6: /* 0b00xxx : r8_usr ... r14_usr */
|
|
|
|
*tgtmode = ARM_CPU_MODE_USR;
|
|
|
|
*regno = sysm + 8;
|
|
|
|
break;
|
|
|
|
case 0x8 ... 0xe: /* 0b01xxx : r8_fiq ... r14_fiq */
|
|
|
|
*tgtmode = ARM_CPU_MODE_FIQ;
|
|
|
|
*regno = sysm;
|
|
|
|
break;
|
|
|
|
case 0x10 ... 0x11: /* 0b1000x : r14_irq, r13_irq */
|
|
|
|
*tgtmode = ARM_CPU_MODE_IRQ;
|
|
|
|
*regno = sysm & 1 ? 13 : 14;
|
|
|
|
break;
|
|
|
|
case 0x12 ... 0x13: /* 0b1001x : r14_svc, r13_svc */
|
|
|
|
*tgtmode = ARM_CPU_MODE_SVC;
|
|
|
|
*regno = sysm & 1 ? 13 : 14;
|
|
|
|
break;
|
|
|
|
case 0x14 ... 0x15: /* 0b1010x : r14_abt, r13_abt */
|
|
|
|
*tgtmode = ARM_CPU_MODE_ABT;
|
|
|
|
*regno = sysm & 1 ? 13 : 14;
|
|
|
|
break;
|
|
|
|
case 0x16 ... 0x17: /* 0b1011x : r14_und, r13_und */
|
|
|
|
*tgtmode = ARM_CPU_MODE_UND;
|
|
|
|
*regno = sysm & 1 ? 13 : 14;
|
|
|
|
break;
|
|
|
|
case 0x1c ... 0x1d: /* 0b1110x : r14_mon, r13_mon */
|
|
|
|
*tgtmode = ARM_CPU_MODE_MON;
|
|
|
|
*regno = sysm & 1 ? 13 : 14;
|
|
|
|
break;
|
|
|
|
case 0x1e ... 0x1f: /* 0b1111x : elr_hyp, r13_hyp */
|
|
|
|
*tgtmode = ARM_CPU_MODE_HYP;
|
|
|
|
/* Arbitrarily pick 17 for ELR_Hyp (which is not a banked LR!) */
|
|
|
|
*regno = sysm & 1 ? 13 : 17;
|
|
|
|
break;
|
|
|
|
default: /* unallocated */
|
|
|
|
goto undef;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Catch the 'accessing inaccessible register' cases we can detect
|
|
|
|
* at translate time.
|
|
|
|
*/
|
|
|
|
switch (*tgtmode) {
|
|
|
|
case ARM_CPU_MODE_MON:
|
|
|
|
if (!arm_dc_feature(s, ARM_FEATURE_EL3) || s->ns) {
|
|
|
|
goto undef;
|
|
|
|
}
|
|
|
|
if (s->current_el == 1) {
|
|
|
|
/* If we're in Secure EL1 (which implies that EL3 is AArch64)
|
|
|
|
* then accesses to Mon registers trap to EL3
|
|
|
|
*/
|
|
|
|
exc_target = 3;
|
|
|
|
goto undef;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ARM_CPU_MODE_HYP:
|
2018-08-20 18:24:32 +08:00
|
|
|
/*
|
|
|
|
* SPSR_hyp and r13_hyp can only be accessed from Monitor mode
|
|
|
|
* (and so we can forbid accesses from EL2 or below). elr_hyp
|
|
|
|
* can be accessed also from Hyp mode, so forbid accesses from
|
|
|
|
* EL0 or EL1.
|
2016-03-17 01:05:58 +08:00
|
|
|
*/
|
2018-08-20 18:24:32 +08:00
|
|
|
if (!arm_dc_feature(s, ARM_FEATURE_EL2) || s->current_el < 2 ||
|
|
|
|
(s->current_el < 3 && *regno != 17)) {
|
2016-03-17 01:05:58 +08:00
|
|
|
goto undef;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
undef:
|
|
|
|
/* If we get here then some access check did not pass */
|
2019-08-15 16:46:44 +08:00
|
|
|
gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
|
|
|
|
syn_uncategorized(), exc_target);
|
2016-03-17 01:05:58 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_msr_banked(DisasContext *s, int r, int sysm, int rn)
|
|
|
|
{
|
|
|
|
TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
|
|
|
|
int tgtmode = 0, regno = 0;
|
|
|
|
|
|
|
|
if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sync state because msr_banked() can raise exceptions */
|
|
|
|
gen_set_condexec(s);
|
2019-08-15 16:46:43 +08:00
|
|
|
gen_set_pc_im(s, s->pc_curr);
|
2016-03-17 01:05:58 +08:00
|
|
|
tcg_reg = load_reg(s, rn);
|
|
|
|
tcg_tgtmode = tcg_const_i32(tgtmode);
|
|
|
|
tcg_regno = tcg_const_i32(regno);
|
|
|
|
gen_helper_msr_banked(cpu_env, tcg_reg, tcg_tgtmode, tcg_regno);
|
|
|
|
tcg_temp_free_i32(tcg_tgtmode);
|
|
|
|
tcg_temp_free_i32(tcg_regno);
|
|
|
|
tcg_temp_free_i32(tcg_reg);
|
2020-06-26 11:31:03 +08:00
|
|
|
s->base.is_jmp = DISAS_UPDATE_EXIT;
|
2016-03-17 01:05:58 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_mrs_banked(DisasContext *s, int r, int sysm, int rn)
|
|
|
|
{
|
|
|
|
TCGv_i32 tcg_reg, tcg_tgtmode, tcg_regno;
|
|
|
|
int tgtmode = 0, regno = 0;
|
|
|
|
|
|
|
|
if (!msr_banked_access_decode(s, r, sysm, rn, &tgtmode, ®no)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sync state because mrs_banked() can raise exceptions */
|
|
|
|
gen_set_condexec(s);
|
2019-08-15 16:46:43 +08:00
|
|
|
gen_set_pc_im(s, s->pc_curr);
|
2016-03-17 01:05:58 +08:00
|
|
|
tcg_reg = tcg_temp_new_i32();
|
|
|
|
tcg_tgtmode = tcg_const_i32(tgtmode);
|
|
|
|
tcg_regno = tcg_const_i32(regno);
|
|
|
|
gen_helper_mrs_banked(tcg_reg, cpu_env, tcg_tgtmode, tcg_regno);
|
|
|
|
tcg_temp_free_i32(tcg_tgtmode);
|
|
|
|
tcg_temp_free_i32(tcg_regno);
|
|
|
|
store_reg(s, rn, tcg_reg);
|
2020-06-26 11:31:03 +08:00
|
|
|
s->base.is_jmp = DISAS_UPDATE_EXIT;
|
2016-03-17 01:05:58 +08:00
|
|
|
}
|
|
|
|
|
2016-10-10 23:26:03 +08:00
|
|
|
/* Store value to PC as for an exception return (ie don't
|
|
|
|
* mask bits). The subsequent call to gen_helper_cpsr_write_eret()
|
|
|
|
* will do the masking based on the new value of the Thumb bit.
|
|
|
|
*/
|
|
|
|
static void store_pc_exc_ret(DisasContext *s, TCGv_i32 pc)
|
2005-11-26 18:38:39 +08:00
|
|
|
{
|
2016-10-10 23:26:03 +08:00
|
|
|
tcg_gen_mov_i32(cpu_R[15], pc);
|
|
|
|
tcg_temp_free_i32(pc);
|
2005-11-26 18:38:39 +08:00
|
|
|
}
|
|
|
|
|
2008-03-31 11:47:03 +08:00
|
|
|
/* Generate a v6 exception return. Marks both values as dead. */
|
2013-05-23 19:59:55 +08:00
|
|
|
static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
|
2003-10-01 04:34:21 +08:00
|
|
|
{
|
2016-10-10 23:26:03 +08:00
|
|
|
store_pc_exc_ret(s, pc);
|
|
|
|
/* The cpsr_write_eret helper will mask the low bits of PC
|
|
|
|
* appropriately depending on the new Thumb bit, so it must
|
|
|
|
* be called after storing the new PC.
|
|
|
|
*/
|
2018-04-26 18:04:39 +08:00
|
|
|
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
|
|
|
gen_io_start();
|
|
|
|
}
|
2016-02-23 23:36:43 +08:00
|
|
|
gen_helper_cpsr_write_eret(cpu_env, cpsr);
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(cpsr);
|
2017-07-17 20:36:07 +08:00
|
|
|
/* Must exit loop to check un-masked IRQs */
|
2017-07-14 17:01:59 +08:00
|
|
|
s->base.is_jmp = DISAS_EXIT;
|
2007-11-11 08:04:49 +08:00
|
|
|
}
|
2007-09-17 16:09:54 +08:00
|
|
|
|
2016-10-10 23:26:03 +08:00
|
|
|
/* Generate an old-style exception return. Marks pc as dead. */
|
|
|
|
static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
|
|
|
|
{
|
|
|
|
gen_rfe(s, pc, load_cpu_field(spsr));
|
|
|
|
}
|
|
|
|
|
2020-05-14 00:32:42 +08:00
|
|
|
static void gen_gvec_fn3_qc(uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs,
|
|
|
|
uint32_t opr_sz, uint32_t max_sz,
|
|
|
|
gen_helper_gvec_3_ptr *fn)
|
|
|
|
{
|
|
|
|
TCGv_ptr qc_ptr = tcg_temp_new_ptr();
|
|
|
|
|
|
|
|
tcg_gen_addi_ptr(qc_ptr, cpu_env, offsetof(CPUARMState, vfp.qc));
|
|
|
|
tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, qc_ptr,
|
|
|
|
opr_sz, max_sz, 0, fn);
|
|
|
|
tcg_temp_free_ptr(qc_ptr);
|
|
|
|
}
|
|
|
|
|
2020-05-14 00:32:41 +08:00
|
|
|
void gen_gvec_sqrdmlah_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static gen_helper_gvec_3_ptr * const fns[2] = {
|
|
|
|
gen_helper_gvec_qrdmlah_s16, gen_helper_gvec_qrdmlah_s32
|
|
|
|
};
|
|
|
|
tcg_debug_assert(vece >= 1 && vece <= 2);
|
2020-05-14 00:32:42 +08:00
|
|
|
gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]);
|
2020-05-14 00:32:41 +08:00
|
|
|
}
|
2018-03-02 18:45:42 +08:00
|
|
|
|
2020-05-14 00:32:41 +08:00
|
|
|
void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
2018-03-02 18:45:42 +08:00
|
|
|
{
|
2020-05-14 00:32:41 +08:00
|
|
|
static gen_helper_gvec_3_ptr * const fns[2] = {
|
|
|
|
gen_helper_gvec_qrdmlsh_s16, gen_helper_gvec_qrdmlsh_s32
|
|
|
|
};
|
|
|
|
tcg_debug_assert(vece >= 1 && vece <= 2);
|
2020-05-14 00:32:42 +08:00
|
|
|
gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]);
|
2018-03-02 18:45:42 +08:00
|
|
|
}
|
|
|
|
|
2020-05-14 00:32:35 +08:00
|
|
|
#define GEN_CMP0(NAME, COND) \
|
|
|
|
static void gen_##NAME##0_i32(TCGv_i32 d, TCGv_i32 a) \
|
|
|
|
{ \
|
|
|
|
tcg_gen_setcondi_i32(COND, d, a, 0); \
|
|
|
|
tcg_gen_neg_i32(d, d); \
|
|
|
|
} \
|
|
|
|
static void gen_##NAME##0_i64(TCGv_i64 d, TCGv_i64 a) \
|
|
|
|
{ \
|
|
|
|
tcg_gen_setcondi_i64(COND, d, a, 0); \
|
|
|
|
tcg_gen_neg_i64(d, d); \
|
|
|
|
} \
|
|
|
|
static void gen_##NAME##0_vec(unsigned vece, TCGv_vec d, TCGv_vec a) \
|
|
|
|
{ \
|
|
|
|
TCGv_vec zero = tcg_const_zeros_vec_matching(d); \
|
|
|
|
tcg_gen_cmp_vec(COND, vece, d, a, zero); \
|
|
|
|
tcg_temp_free_vec(zero); \
|
|
|
|
} \
|
|
|
|
void gen_gvec_##NAME##0(unsigned vece, uint32_t d, uint32_t m, \
|
|
|
|
uint32_t opr_sz, uint32_t max_sz) \
|
|
|
|
{ \
|
|
|
|
const GVecGen2 op[4] = { \
|
|
|
|
{ .fno = gen_helper_gvec_##NAME##0_b, \
|
|
|
|
.fniv = gen_##NAME##0_vec, \
|
|
|
|
.opt_opc = vecop_list_cmp, \
|
|
|
|
.vece = MO_8 }, \
|
|
|
|
{ .fno = gen_helper_gvec_##NAME##0_h, \
|
|
|
|
.fniv = gen_##NAME##0_vec, \
|
|
|
|
.opt_opc = vecop_list_cmp, \
|
|
|
|
.vece = MO_16 }, \
|
|
|
|
{ .fni4 = gen_##NAME##0_i32, \
|
|
|
|
.fniv = gen_##NAME##0_vec, \
|
|
|
|
.opt_opc = vecop_list_cmp, \
|
|
|
|
.vece = MO_32 }, \
|
|
|
|
{ .fni8 = gen_##NAME##0_i64, \
|
|
|
|
.fniv = gen_##NAME##0_vec, \
|
|
|
|
.opt_opc = vecop_list_cmp, \
|
|
|
|
.prefer_i64 = TCG_TARGET_REG_BITS == 64, \
|
|
|
|
.vece = MO_64 }, \
|
|
|
|
}; \
|
|
|
|
tcg_gen_gvec_2(d, m, opr_sz, max_sz, &op[vece]); \
|
|
|
|
}
|
2020-04-19 00:28:08 +08:00
|
|
|
|
|
|
|
static const TCGOpcode vecop_list_cmp[] = {
|
|
|
|
INDEX_op_cmp_vec, 0
|
|
|
|
};
|
|
|
|
|
2020-05-14 00:32:35 +08:00
|
|
|
GEN_CMP0(ceq, TCG_COND_EQ)
|
|
|
|
GEN_CMP0(cle, TCG_COND_LE)
|
|
|
|
GEN_CMP0(cge, TCG_COND_GE)
|
|
|
|
GEN_CMP0(clt, TCG_COND_LT)
|
|
|
|
GEN_CMP0(cgt, TCG_COND_GT)
|
2020-04-19 00:28:08 +08:00
|
|
|
|
2020-05-14 00:32:35 +08:00
|
|
|
#undef GEN_CMP0
|
2020-04-19 00:28:08 +08:00
|
|
|
|
2018-10-24 14:50:19 +08:00
|
|
|
static void gen_ssra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
|
|
|
|
{
|
|
|
|
tcg_gen_vec_sar8i_i64(a, a, shift);
|
|
|
|
tcg_gen_vec_add8_i64(d, d, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_ssra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
|
|
|
|
{
|
|
|
|
tcg_gen_vec_sar16i_i64(a, a, shift);
|
|
|
|
tcg_gen_vec_add16_i64(d, d, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_ssra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
|
|
|
|
{
|
|
|
|
tcg_gen_sari_i32(a, a, shift);
|
|
|
|
tcg_gen_add_i32(d, d, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_ssra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
|
|
|
|
{
|
|
|
|
tcg_gen_sari_i64(a, a, shift);
|
|
|
|
tcg_gen_add_i64(d, d, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_ssra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
|
|
|
|
{
|
|
|
|
tcg_gen_sari_vec(vece, a, a, sh);
|
|
|
|
tcg_gen_add_vec(vece, d, d, a);
|
|
|
|
}
|
|
|
|
|
2020-05-14 00:32:30 +08:00
|
|
|
void gen_gvec_ssra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = {
|
|
|
|
INDEX_op_sari_vec, INDEX_op_add_vec, 0
|
|
|
|
};
|
|
|
|
static const GVecGen2i ops[4] = {
|
|
|
|
{ .fni8 = gen_ssra8_i64,
|
|
|
|
.fniv = gen_ssra_vec,
|
|
|
|
.fno = gen_helper_gvec_ssra_b,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fni8 = gen_ssra16_i64,
|
|
|
|
.fniv = gen_ssra_vec,
|
|
|
|
.fno = gen_helper_gvec_ssra_h,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fni4 = gen_ssra32_i32,
|
|
|
|
.fniv = gen_ssra_vec,
|
|
|
|
.fno = gen_helper_gvec_ssra_s,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fni8 = gen_ssra64_i64,
|
|
|
|
.fniv = gen_ssra_vec,
|
|
|
|
.fno = gen_helper_gvec_ssra_b,
|
|
|
|
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.load_dest = true,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
|
|
|
|
/* tszimm encoding produces immediates in the range [1..esize]. */
|
|
|
|
tcg_debug_assert(shift > 0);
|
|
|
|
tcg_debug_assert(shift <= (8 << vece));
|
2019-03-17 08:27:29 +08:00
|
|
|
|
2020-05-14 00:32:30 +08:00
|
|
|
/*
|
|
|
|
* Shifts larger than the element size are architecturally valid.
|
|
|
|
* Signed results in all sign bits.
|
|
|
|
*/
|
|
|
|
shift = MIN(shift, (8 << vece) - 1);
|
|
|
|
tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
|
|
|
|
}
|
2018-10-24 14:50:19 +08:00
|
|
|
|
|
|
|
static void gen_usra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
|
|
|
|
{
|
|
|
|
tcg_gen_vec_shr8i_i64(a, a, shift);
|
|
|
|
tcg_gen_vec_add8_i64(d, d, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_usra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
|
|
|
|
{
|
|
|
|
tcg_gen_vec_shr16i_i64(a, a, shift);
|
|
|
|
tcg_gen_vec_add16_i64(d, d, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_usra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
|
|
|
|
{
|
|
|
|
tcg_gen_shri_i32(a, a, shift);
|
|
|
|
tcg_gen_add_i32(d, d, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_usra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
|
|
|
|
{
|
|
|
|
tcg_gen_shri_i64(a, a, shift);
|
|
|
|
tcg_gen_add_i64(d, d, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_usra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
|
|
|
|
{
|
|
|
|
tcg_gen_shri_vec(vece, a, a, sh);
|
|
|
|
tcg_gen_add_vec(vece, d, d, a);
|
|
|
|
}
|
|
|
|
|
2020-05-14 00:32:30 +08:00
|
|
|
void gen_gvec_usra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = {
|
|
|
|
INDEX_op_shri_vec, INDEX_op_add_vec, 0
|
|
|
|
};
|
|
|
|
static const GVecGen2i ops[4] = {
|
|
|
|
{ .fni8 = gen_usra8_i64,
|
|
|
|
.fniv = gen_usra_vec,
|
|
|
|
.fno = gen_helper_gvec_usra_b,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_8, },
|
|
|
|
{ .fni8 = gen_usra16_i64,
|
|
|
|
.fniv = gen_usra_vec,
|
|
|
|
.fno = gen_helper_gvec_usra_h,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_16, },
|
|
|
|
{ .fni4 = gen_usra32_i32,
|
|
|
|
.fniv = gen_usra_vec,
|
|
|
|
.fno = gen_helper_gvec_usra_s,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_32, },
|
|
|
|
{ .fni8 = gen_usra64_i64,
|
|
|
|
.fniv = gen_usra_vec,
|
|
|
|
.fno = gen_helper_gvec_usra_d,
|
|
|
|
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_64, },
|
|
|
|
};
|
|
|
|
|
|
|
|
/* tszimm encoding produces immediates in the range [1..esize]. */
|
|
|
|
tcg_debug_assert(shift > 0);
|
|
|
|
tcg_debug_assert(shift <= (8 << vece));
|
2019-03-17 08:27:29 +08:00
|
|
|
|
2020-05-14 00:32:30 +08:00
|
|
|
/*
|
|
|
|
* Shifts larger than the element size are architecturally valid.
|
|
|
|
* Unsigned results in all zeros as input to accumulate: nop.
|
|
|
|
*/
|
|
|
|
if (shift < (8 << vece)) {
|
|
|
|
tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
|
|
|
|
} else {
|
|
|
|
/* Nop, but we do need to clear the tail. */
|
|
|
|
tcg_gen_gvec_mov(vece, rd_ofs, rd_ofs, opr_sz, max_sz);
|
|
|
|
}
|
|
|
|
}
|
2018-10-24 14:50:19 +08:00
|
|
|
|
2020-05-14 00:32:31 +08:00
|
|
|
/*
|
|
|
|
* Shift one less than the requested amount, and the low bit is
|
|
|
|
* the rounding bit. For the 8 and 16-bit operations, because we
|
|
|
|
* mask the low bit, we can perform a normal integer shift instead
|
|
|
|
* of a vector shift.
|
|
|
|
*/
|
|
|
|
static void gen_srshr8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
|
|
|
{
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
tcg_gen_shri_i64(t, a, sh - 1);
|
|
|
|
tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
|
|
|
|
tcg_gen_vec_sar8i_i64(d, a, sh);
|
|
|
|
tcg_gen_vec_add8_i64(d, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_srshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
|
|
|
{
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
tcg_gen_shri_i64(t, a, sh - 1);
|
|
|
|
tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
|
|
|
|
tcg_gen_vec_sar16i_i64(d, a, sh);
|
|
|
|
tcg_gen_vec_add16_i64(d, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_srshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
|
|
|
|
{
|
|
|
|
TCGv_i32 t = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
tcg_gen_extract_i32(t, a, sh - 1, 1);
|
|
|
|
tcg_gen_sari_i32(d, a, sh);
|
|
|
|
tcg_gen_add_i32(d, d, t);
|
|
|
|
tcg_temp_free_i32(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_srshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
|
|
|
{
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
tcg_gen_extract_i64(t, a, sh - 1, 1);
|
|
|
|
tcg_gen_sari_i64(d, a, sh);
|
|
|
|
tcg_gen_add_i64(d, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_srshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
|
|
|
|
{
|
|
|
|
TCGv_vec t = tcg_temp_new_vec_matching(d);
|
|
|
|
TCGv_vec ones = tcg_temp_new_vec_matching(d);
|
|
|
|
|
|
|
|
tcg_gen_shri_vec(vece, t, a, sh - 1);
|
|
|
|
tcg_gen_dupi_vec(vece, ones, 1);
|
|
|
|
tcg_gen_and_vec(vece, t, t, ones);
|
|
|
|
tcg_gen_sari_vec(vece, d, a, sh);
|
|
|
|
tcg_gen_add_vec(vece, d, d, t);
|
|
|
|
|
|
|
|
tcg_temp_free_vec(t);
|
|
|
|
tcg_temp_free_vec(ones);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen_gvec_srshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = {
|
|
|
|
INDEX_op_shri_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0
|
|
|
|
};
|
|
|
|
static const GVecGen2i ops[4] = {
|
|
|
|
{ .fni8 = gen_srshr8_i64,
|
|
|
|
.fniv = gen_srshr_vec,
|
|
|
|
.fno = gen_helper_gvec_srshr_b,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fni8 = gen_srshr16_i64,
|
|
|
|
.fniv = gen_srshr_vec,
|
|
|
|
.fno = gen_helper_gvec_srshr_h,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fni4 = gen_srshr32_i32,
|
|
|
|
.fniv = gen_srshr_vec,
|
|
|
|
.fno = gen_helper_gvec_srshr_s,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fni8 = gen_srshr64_i64,
|
|
|
|
.fniv = gen_srshr_vec,
|
|
|
|
.fno = gen_helper_gvec_srshr_d,
|
|
|
|
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
|
|
|
|
/* tszimm encoding produces immediates in the range [1..esize] */
|
|
|
|
tcg_debug_assert(shift > 0);
|
|
|
|
tcg_debug_assert(shift <= (8 << vece));
|
|
|
|
|
|
|
|
if (shift == (8 << vece)) {
|
|
|
|
/*
|
|
|
|
* Shifts larger than the element size are architecturally valid.
|
|
|
|
* Signed results in all sign bits. With rounding, this produces
|
|
|
|
* (-1 + 1) >> 1 == 0, or (0 + 1) >> 1 == 0.
|
|
|
|
* I.e. always zero.
|
|
|
|
*/
|
|
|
|
tcg_gen_gvec_dup_imm(vece, rd_ofs, opr_sz, max_sz, 0);
|
|
|
|
} else {
|
|
|
|
tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_srsra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
|
|
|
{
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
gen_srshr8_i64(t, a, sh);
|
|
|
|
tcg_gen_vec_add8_i64(d, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_srsra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
|
|
|
{
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
gen_srshr16_i64(t, a, sh);
|
|
|
|
tcg_gen_vec_add16_i64(d, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_srsra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
|
|
|
|
{
|
|
|
|
TCGv_i32 t = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
gen_srshr32_i32(t, a, sh);
|
|
|
|
tcg_gen_add_i32(d, d, t);
|
|
|
|
tcg_temp_free_i32(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_srsra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
|
|
|
{
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
gen_srshr64_i64(t, a, sh);
|
|
|
|
tcg_gen_add_i64(d, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_srsra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
|
|
|
|
{
|
|
|
|
TCGv_vec t = tcg_temp_new_vec_matching(d);
|
|
|
|
|
|
|
|
gen_srshr_vec(vece, t, a, sh);
|
|
|
|
tcg_gen_add_vec(vece, d, d, t);
|
|
|
|
tcg_temp_free_vec(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen_gvec_srsra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = {
|
|
|
|
INDEX_op_shri_vec, INDEX_op_sari_vec, INDEX_op_add_vec, 0
|
|
|
|
};
|
|
|
|
static const GVecGen2i ops[4] = {
|
|
|
|
{ .fni8 = gen_srsra8_i64,
|
|
|
|
.fniv = gen_srsra_vec,
|
|
|
|
.fno = gen_helper_gvec_srsra_b,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.load_dest = true,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fni8 = gen_srsra16_i64,
|
|
|
|
.fniv = gen_srsra_vec,
|
|
|
|
.fno = gen_helper_gvec_srsra_h,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.load_dest = true,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fni4 = gen_srsra32_i32,
|
|
|
|
.fniv = gen_srsra_vec,
|
|
|
|
.fno = gen_helper_gvec_srsra_s,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.load_dest = true,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fni8 = gen_srsra64_i64,
|
|
|
|
.fniv = gen_srsra_vec,
|
|
|
|
.fno = gen_helper_gvec_srsra_d,
|
|
|
|
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.load_dest = true,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
|
|
|
|
/* tszimm encoding produces immediates in the range [1..esize] */
|
|
|
|
tcg_debug_assert(shift > 0);
|
|
|
|
tcg_debug_assert(shift <= (8 << vece));
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Shifts larger than the element size are architecturally valid.
|
|
|
|
* Signed results in all sign bits. With rounding, this produces
|
|
|
|
* (-1 + 1) >> 1 == 0, or (0 + 1) >> 1 == 0.
|
|
|
|
* I.e. always zero. With accumulation, this leaves D unchanged.
|
|
|
|
*/
|
|
|
|
if (shift == (8 << vece)) {
|
|
|
|
/* Nop, but we do need to clear the tail. */
|
|
|
|
tcg_gen_gvec_mov(vece, rd_ofs, rd_ofs, opr_sz, max_sz);
|
|
|
|
} else {
|
|
|
|
tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_urshr8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
|
|
|
{
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
tcg_gen_shri_i64(t, a, sh - 1);
|
|
|
|
tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
|
|
|
|
tcg_gen_vec_shr8i_i64(d, a, sh);
|
|
|
|
tcg_gen_vec_add8_i64(d, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_urshr16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
|
|
|
{
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
tcg_gen_shri_i64(t, a, sh - 1);
|
|
|
|
tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
|
|
|
|
tcg_gen_vec_shr16i_i64(d, a, sh);
|
|
|
|
tcg_gen_vec_add16_i64(d, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_urshr32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
|
|
|
|
{
|
|
|
|
TCGv_i32 t = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
tcg_gen_extract_i32(t, a, sh - 1, 1);
|
|
|
|
tcg_gen_shri_i32(d, a, sh);
|
|
|
|
tcg_gen_add_i32(d, d, t);
|
|
|
|
tcg_temp_free_i32(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_urshr64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
|
|
|
{
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
tcg_gen_extract_i64(t, a, sh - 1, 1);
|
|
|
|
tcg_gen_shri_i64(d, a, sh);
|
|
|
|
tcg_gen_add_i64(d, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_urshr_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t shift)
|
|
|
|
{
|
|
|
|
TCGv_vec t = tcg_temp_new_vec_matching(d);
|
|
|
|
TCGv_vec ones = tcg_temp_new_vec_matching(d);
|
|
|
|
|
|
|
|
tcg_gen_shri_vec(vece, t, a, shift - 1);
|
|
|
|
tcg_gen_dupi_vec(vece, ones, 1);
|
|
|
|
tcg_gen_and_vec(vece, t, t, ones);
|
|
|
|
tcg_gen_shri_vec(vece, d, a, shift);
|
|
|
|
tcg_gen_add_vec(vece, d, d, t);
|
|
|
|
|
|
|
|
tcg_temp_free_vec(t);
|
|
|
|
tcg_temp_free_vec(ones);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen_gvec_urshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = {
|
|
|
|
INDEX_op_shri_vec, INDEX_op_add_vec, 0
|
|
|
|
};
|
|
|
|
static const GVecGen2i ops[4] = {
|
|
|
|
{ .fni8 = gen_urshr8_i64,
|
|
|
|
.fniv = gen_urshr_vec,
|
|
|
|
.fno = gen_helper_gvec_urshr_b,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fni8 = gen_urshr16_i64,
|
|
|
|
.fniv = gen_urshr_vec,
|
|
|
|
.fno = gen_helper_gvec_urshr_h,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fni4 = gen_urshr32_i32,
|
|
|
|
.fniv = gen_urshr_vec,
|
|
|
|
.fno = gen_helper_gvec_urshr_s,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fni8 = gen_urshr64_i64,
|
|
|
|
.fniv = gen_urshr_vec,
|
|
|
|
.fno = gen_helper_gvec_urshr_d,
|
|
|
|
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
|
|
|
|
/* tszimm encoding produces immediates in the range [1..esize] */
|
|
|
|
tcg_debug_assert(shift > 0);
|
|
|
|
tcg_debug_assert(shift <= (8 << vece));
|
|
|
|
|
|
|
|
if (shift == (8 << vece)) {
|
|
|
|
/*
|
|
|
|
* Shifts larger than the element size are architecturally valid.
|
|
|
|
* Unsigned results in zero. With rounding, this produces a
|
|
|
|
* copy of the most significant bit.
|
|
|
|
*/
|
|
|
|
tcg_gen_gvec_shri(vece, rd_ofs, rm_ofs, shift - 1, opr_sz, max_sz);
|
|
|
|
} else {
|
|
|
|
tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_ursra8_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
|
|
|
{
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
if (sh == 8) {
|
|
|
|
tcg_gen_vec_shr8i_i64(t, a, 7);
|
|
|
|
} else {
|
|
|
|
gen_urshr8_i64(t, a, sh);
|
|
|
|
}
|
|
|
|
tcg_gen_vec_add8_i64(d, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_ursra16_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
|
|
|
{
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
if (sh == 16) {
|
|
|
|
tcg_gen_vec_shr16i_i64(t, a, 15);
|
|
|
|
} else {
|
|
|
|
gen_urshr16_i64(t, a, sh);
|
|
|
|
}
|
|
|
|
tcg_gen_vec_add16_i64(d, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_ursra32_i32(TCGv_i32 d, TCGv_i32 a, int32_t sh)
|
|
|
|
{
|
|
|
|
TCGv_i32 t = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
if (sh == 32) {
|
|
|
|
tcg_gen_shri_i32(t, a, 31);
|
|
|
|
} else {
|
|
|
|
gen_urshr32_i32(t, a, sh);
|
|
|
|
}
|
|
|
|
tcg_gen_add_i32(d, d, t);
|
|
|
|
tcg_temp_free_i32(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_ursra64_i64(TCGv_i64 d, TCGv_i64 a, int64_t sh)
|
|
|
|
{
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
if (sh == 64) {
|
|
|
|
tcg_gen_shri_i64(t, a, 63);
|
|
|
|
} else {
|
|
|
|
gen_urshr64_i64(t, a, sh);
|
|
|
|
}
|
|
|
|
tcg_gen_add_i64(d, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_ursra_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
|
|
|
|
{
|
|
|
|
TCGv_vec t = tcg_temp_new_vec_matching(d);
|
|
|
|
|
|
|
|
if (sh == (8 << vece)) {
|
|
|
|
tcg_gen_shri_vec(vece, t, a, sh - 1);
|
|
|
|
} else {
|
|
|
|
gen_urshr_vec(vece, t, a, sh);
|
|
|
|
}
|
|
|
|
tcg_gen_add_vec(vece, d, d, t);
|
|
|
|
tcg_temp_free_vec(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen_gvec_ursra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = {
|
|
|
|
INDEX_op_shri_vec, INDEX_op_add_vec, 0
|
|
|
|
};
|
|
|
|
static const GVecGen2i ops[4] = {
|
|
|
|
{ .fni8 = gen_ursra8_i64,
|
|
|
|
.fniv = gen_ursra_vec,
|
|
|
|
.fno = gen_helper_gvec_ursra_b,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.load_dest = true,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fni8 = gen_ursra16_i64,
|
|
|
|
.fniv = gen_ursra_vec,
|
|
|
|
.fno = gen_helper_gvec_ursra_h,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.load_dest = true,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fni4 = gen_ursra32_i32,
|
|
|
|
.fniv = gen_ursra_vec,
|
|
|
|
.fno = gen_helper_gvec_ursra_s,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.load_dest = true,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fni8 = gen_ursra64_i64,
|
|
|
|
.fniv = gen_ursra_vec,
|
|
|
|
.fno = gen_helper_gvec_ursra_d,
|
|
|
|
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.load_dest = true,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
|
|
|
|
/* tszimm encoding produces immediates in the range [1..esize] */
|
|
|
|
tcg_debug_assert(shift > 0);
|
|
|
|
tcg_debug_assert(shift <= (8 << vece));
|
|
|
|
|
|
|
|
tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
|
|
|
|
}
|
|
|
|
|
2018-10-24 14:50:19 +08:00
|
|
|
static void gen_shr8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
|
|
|
|
{
|
|
|
|
uint64_t mask = dup_const(MO_8, 0xff >> shift);
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
tcg_gen_shri_i64(t, a, shift);
|
|
|
|
tcg_gen_andi_i64(t, t, mask);
|
|
|
|
tcg_gen_andi_i64(d, d, ~mask);
|
|
|
|
tcg_gen_or_i64(d, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_shr16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
|
|
|
|
{
|
|
|
|
uint64_t mask = dup_const(MO_16, 0xffff >> shift);
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
tcg_gen_shri_i64(t, a, shift);
|
|
|
|
tcg_gen_andi_i64(t, t, mask);
|
|
|
|
tcg_gen_andi_i64(d, d, ~mask);
|
|
|
|
tcg_gen_or_i64(d, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_shr32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
|
|
|
|
{
|
|
|
|
tcg_gen_shri_i32(a, a, shift);
|
|
|
|
tcg_gen_deposit_i32(d, d, a, 0, 32 - shift);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_shr64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
|
|
|
|
{
|
|
|
|
tcg_gen_shri_i64(a, a, shift);
|
|
|
|
tcg_gen_deposit_i64(d, d, a, 0, 64 - shift);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_shr_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
|
|
|
|
{
|
2020-05-14 00:32:32 +08:00
|
|
|
TCGv_vec t = tcg_temp_new_vec_matching(d);
|
|
|
|
TCGv_vec m = tcg_temp_new_vec_matching(d);
|
2018-10-24 14:50:19 +08:00
|
|
|
|
2020-05-14 00:32:32 +08:00
|
|
|
tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK((8 << vece) - sh, sh));
|
|
|
|
tcg_gen_shri_vec(vece, t, a, sh);
|
|
|
|
tcg_gen_and_vec(vece, d, d, m);
|
|
|
|
tcg_gen_or_vec(vece, d, d, t);
|
2018-10-24 14:50:19 +08:00
|
|
|
|
2020-05-14 00:32:32 +08:00
|
|
|
tcg_temp_free_vec(t);
|
|
|
|
tcg_temp_free_vec(m);
|
2018-10-24 14:50:19 +08:00
|
|
|
}
|
|
|
|
|
2020-05-14 00:32:32 +08:00
|
|
|
void gen_gvec_sri(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = { INDEX_op_shri_vec, 0 };
|
|
|
|
const GVecGen2i ops[4] = {
|
|
|
|
{ .fni8 = gen_shr8_ins_i64,
|
|
|
|
.fniv = gen_shr_ins_vec,
|
|
|
|
.fno = gen_helper_gvec_sri_b,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fni8 = gen_shr16_ins_i64,
|
|
|
|
.fniv = gen_shr_ins_vec,
|
|
|
|
.fno = gen_helper_gvec_sri_h,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fni4 = gen_shr32_ins_i32,
|
|
|
|
.fniv = gen_shr_ins_vec,
|
|
|
|
.fno = gen_helper_gvec_sri_s,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fni8 = gen_shr64_ins_i64,
|
|
|
|
.fniv = gen_shr_ins_vec,
|
|
|
|
.fno = gen_helper_gvec_sri_d,
|
|
|
|
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
|
|
|
|
/* tszimm encoding produces immediates in the range [1..esize]. */
|
|
|
|
tcg_debug_assert(shift > 0);
|
|
|
|
tcg_debug_assert(shift <= (8 << vece));
|
2019-03-17 08:27:29 +08:00
|
|
|
|
2020-05-14 00:32:32 +08:00
|
|
|
/* Shift of esize leaves destination unchanged. */
|
|
|
|
if (shift < (8 << vece)) {
|
|
|
|
tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
|
|
|
|
} else {
|
|
|
|
/* Nop, but we do need to clear the tail. */
|
|
|
|
tcg_gen_gvec_mov(vece, rd_ofs, rd_ofs, opr_sz, max_sz);
|
|
|
|
}
|
|
|
|
}
|
2018-10-24 14:50:19 +08:00
|
|
|
|
|
|
|
static void gen_shl8_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
|
|
|
|
{
|
|
|
|
uint64_t mask = dup_const(MO_8, 0xff << shift);
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
tcg_gen_shli_i64(t, a, shift);
|
|
|
|
tcg_gen_andi_i64(t, t, mask);
|
|
|
|
tcg_gen_andi_i64(d, d, ~mask);
|
|
|
|
tcg_gen_or_i64(d, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_shl16_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
|
|
|
|
{
|
|
|
|
uint64_t mask = dup_const(MO_16, 0xffff << shift);
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
tcg_gen_shli_i64(t, a, shift);
|
|
|
|
tcg_gen_andi_i64(t, t, mask);
|
|
|
|
tcg_gen_andi_i64(d, d, ~mask);
|
|
|
|
tcg_gen_or_i64(d, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_shl32_ins_i32(TCGv_i32 d, TCGv_i32 a, int32_t shift)
|
|
|
|
{
|
|
|
|
tcg_gen_deposit_i32(d, d, a, shift, 32 - shift);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_shl64_ins_i64(TCGv_i64 d, TCGv_i64 a, int64_t shift)
|
|
|
|
{
|
|
|
|
tcg_gen_deposit_i64(d, d, a, shift, 64 - shift);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_shl_ins_vec(unsigned vece, TCGv_vec d, TCGv_vec a, int64_t sh)
|
|
|
|
{
|
2020-05-14 00:32:32 +08:00
|
|
|
TCGv_vec t = tcg_temp_new_vec_matching(d);
|
|
|
|
TCGv_vec m = tcg_temp_new_vec_matching(d);
|
2018-10-24 14:50:19 +08:00
|
|
|
|
2020-05-14 00:32:32 +08:00
|
|
|
tcg_gen_shli_vec(vece, t, a, sh);
|
|
|
|
tcg_gen_dupi_vec(vece, m, MAKE_64BIT_MASK(0, sh));
|
|
|
|
tcg_gen_and_vec(vece, d, d, m);
|
|
|
|
tcg_gen_or_vec(vece, d, d, t);
|
2018-10-24 14:50:19 +08:00
|
|
|
|
2020-05-14 00:32:32 +08:00
|
|
|
tcg_temp_free_vec(t);
|
|
|
|
tcg_temp_free_vec(m);
|
2018-10-24 14:50:19 +08:00
|
|
|
}
|
|
|
|
|
2020-05-14 00:32:32 +08:00
|
|
|
void gen_gvec_sli(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = { INDEX_op_shli_vec, 0 };
|
|
|
|
const GVecGen2i ops[4] = {
|
|
|
|
{ .fni8 = gen_shl8_ins_i64,
|
|
|
|
.fniv = gen_shl_ins_vec,
|
|
|
|
.fno = gen_helper_gvec_sli_b,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fni8 = gen_shl16_ins_i64,
|
|
|
|
.fniv = gen_shl_ins_vec,
|
|
|
|
.fno = gen_helper_gvec_sli_h,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fni4 = gen_shl32_ins_i32,
|
|
|
|
.fniv = gen_shl_ins_vec,
|
|
|
|
.fno = gen_helper_gvec_sli_s,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fni8 = gen_shl64_ins_i64,
|
|
|
|
.fniv = gen_shl_ins_vec,
|
|
|
|
.fno = gen_helper_gvec_sli_d,
|
|
|
|
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
|
|
|
|
/* tszimm encoding produces immediates in the range [0..esize-1]. */
|
|
|
|
tcg_debug_assert(shift >= 0);
|
|
|
|
tcg_debug_assert(shift < (8 << vece));
|
2019-03-17 08:27:29 +08:00
|
|
|
|
2020-05-14 00:32:32 +08:00
|
|
|
if (shift == 0) {
|
|
|
|
tcg_gen_gvec_mov(vece, rd_ofs, rm_ofs, opr_sz, max_sz);
|
|
|
|
} else {
|
|
|
|
tcg_gen_gvec_2i(rd_ofs, rm_ofs, opr_sz, max_sz, shift, &ops[vece]);
|
|
|
|
}
|
|
|
|
}
|
2018-10-24 14:50:19 +08:00
|
|
|
|
2018-10-24 14:50:19 +08:00
|
|
|
static void gen_mla8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
{
|
|
|
|
gen_helper_neon_mul_u8(a, a, b);
|
|
|
|
gen_helper_neon_add_u8(d, d, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_mls8_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
{
|
|
|
|
gen_helper_neon_mul_u8(a, a, b);
|
|
|
|
gen_helper_neon_sub_u8(d, d, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_mla16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
{
|
|
|
|
gen_helper_neon_mul_u16(a, a, b);
|
|
|
|
gen_helper_neon_add_u16(d, d, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_mls16_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
{
|
|
|
|
gen_helper_neon_mul_u16(a, a, b);
|
|
|
|
gen_helper_neon_sub_u16(d, d, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_mla32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
{
|
|
|
|
tcg_gen_mul_i32(a, a, b);
|
|
|
|
tcg_gen_add_i32(d, d, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_mls32_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
{
|
|
|
|
tcg_gen_mul_i32(a, a, b);
|
|
|
|
tcg_gen_sub_i32(d, d, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_mla64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
{
|
|
|
|
tcg_gen_mul_i64(a, a, b);
|
|
|
|
tcg_gen_add_i64(d, d, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_mls64_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
{
|
|
|
|
tcg_gen_mul_i64(a, a, b);
|
|
|
|
tcg_gen_sub_i64(d, d, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_mla_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
|
|
|
|
{
|
|
|
|
tcg_gen_mul_vec(vece, a, a, b);
|
|
|
|
tcg_gen_add_vec(vece, d, d, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_mls_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
|
|
|
|
{
|
|
|
|
tcg_gen_mul_vec(vece, a, a, b);
|
|
|
|
tcg_gen_sub_vec(vece, d, d, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Note that while NEON does not support VMLA and VMLS as 64-bit ops,
|
|
|
|
* these tables are shared with AArch64 which does support them.
|
|
|
|
*/
|
2020-05-14 00:32:36 +08:00
|
|
|
void gen_gvec_mla(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = {
|
|
|
|
INDEX_op_mul_vec, INDEX_op_add_vec, 0
|
|
|
|
};
|
|
|
|
static const GVecGen3 ops[4] = {
|
|
|
|
{ .fni4 = gen_mla8_i32,
|
|
|
|
.fniv = gen_mla_vec,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fni4 = gen_mla16_i32,
|
|
|
|
.fniv = gen_mla_vec,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fni4 = gen_mla32_i32,
|
|
|
|
.fniv = gen_mla_vec,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fni8 = gen_mla64_i64,
|
|
|
|
.fniv = gen_mla_vec,
|
|
|
|
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
|
|
}
|
2019-03-17 08:27:29 +08:00
|
|
|
|
2020-05-14 00:32:36 +08:00
|
|
|
void gen_gvec_mls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = {
|
|
|
|
INDEX_op_mul_vec, INDEX_op_sub_vec, 0
|
|
|
|
};
|
|
|
|
static const GVecGen3 ops[4] = {
|
|
|
|
{ .fni4 = gen_mls8_i32,
|
|
|
|
.fniv = gen_mls_vec,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fni4 = gen_mls16_i32,
|
|
|
|
.fniv = gen_mls_vec,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fni4 = gen_mls32_i32,
|
|
|
|
.fniv = gen_mls_vec,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fni8 = gen_mls64_i64,
|
|
|
|
.fniv = gen_mls_vec,
|
|
|
|
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
|
|
|
.load_dest = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
|
|
}
|
2018-10-24 14:50:19 +08:00
|
|
|
|
2018-10-24 14:50:20 +08:00
|
|
|
/* CMTST : test is "if (X & Y != 0)". */
|
|
|
|
static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
{
|
|
|
|
tcg_gen_and_i32(d, a, b);
|
|
|
|
tcg_gen_setcondi_i32(TCG_COND_NE, d, d, 0);
|
|
|
|
tcg_gen_neg_i32(d, d);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
{
|
|
|
|
tcg_gen_and_i64(d, a, b);
|
|
|
|
tcg_gen_setcondi_i64(TCG_COND_NE, d, d, 0);
|
|
|
|
tcg_gen_neg_i64(d, d);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
|
|
|
|
{
|
|
|
|
tcg_gen_and_vec(vece, d, a, b);
|
|
|
|
tcg_gen_dupi_vec(vece, a, 0);
|
|
|
|
tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
|
|
|
|
}
|
|
|
|
|
2020-05-14 00:32:38 +08:00
|
|
|
void gen_gvec_cmtst(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = { INDEX_op_cmp_vec, 0 };
|
|
|
|
static const GVecGen3 ops[4] = {
|
|
|
|
{ .fni4 = gen_helper_neon_tst_u8,
|
|
|
|
.fniv = gen_cmtst_vec,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fni4 = gen_helper_neon_tst_u16,
|
|
|
|
.fniv = gen_cmtst_vec,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fni4 = gen_cmtst_i32,
|
|
|
|
.fniv = gen_cmtst_vec,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fni8 = gen_cmtst_i64,
|
|
|
|
.fniv = gen_cmtst_vec,
|
|
|
|
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
|
|
}
|
2018-10-24 14:50:20 +08:00
|
|
|
|
2020-02-17 05:42:29 +08:00
|
|
|
void gen_ushl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift)
|
|
|
|
{
|
|
|
|
TCGv_i32 lval = tcg_temp_new_i32();
|
|
|
|
TCGv_i32 rval = tcg_temp_new_i32();
|
|
|
|
TCGv_i32 lsh = tcg_temp_new_i32();
|
|
|
|
TCGv_i32 rsh = tcg_temp_new_i32();
|
|
|
|
TCGv_i32 zero = tcg_const_i32(0);
|
|
|
|
TCGv_i32 max = tcg_const_i32(32);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Rely on the TCG guarantee that out of range shifts produce
|
|
|
|
* unspecified results, not undefined behaviour (i.e. no trap).
|
|
|
|
* Discard out-of-range results after the fact.
|
|
|
|
*/
|
|
|
|
tcg_gen_ext8s_i32(lsh, shift);
|
|
|
|
tcg_gen_neg_i32(rsh, lsh);
|
|
|
|
tcg_gen_shl_i32(lval, src, lsh);
|
|
|
|
tcg_gen_shr_i32(rval, src, rsh);
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_LTU, dst, lsh, max, lval, zero);
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_LTU, dst, rsh, max, rval, dst);
|
|
|
|
|
|
|
|
tcg_temp_free_i32(lval);
|
|
|
|
tcg_temp_free_i32(rval);
|
|
|
|
tcg_temp_free_i32(lsh);
|
|
|
|
tcg_temp_free_i32(rsh);
|
|
|
|
tcg_temp_free_i32(zero);
|
|
|
|
tcg_temp_free_i32(max);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen_ushl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift)
|
|
|
|
{
|
|
|
|
TCGv_i64 lval = tcg_temp_new_i64();
|
|
|
|
TCGv_i64 rval = tcg_temp_new_i64();
|
|
|
|
TCGv_i64 lsh = tcg_temp_new_i64();
|
|
|
|
TCGv_i64 rsh = tcg_temp_new_i64();
|
|
|
|
TCGv_i64 zero = tcg_const_i64(0);
|
|
|
|
TCGv_i64 max = tcg_const_i64(64);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Rely on the TCG guarantee that out of range shifts produce
|
|
|
|
* unspecified results, not undefined behaviour (i.e. no trap).
|
|
|
|
* Discard out-of-range results after the fact.
|
|
|
|
*/
|
|
|
|
tcg_gen_ext8s_i64(lsh, shift);
|
|
|
|
tcg_gen_neg_i64(rsh, lsh);
|
|
|
|
tcg_gen_shl_i64(lval, src, lsh);
|
|
|
|
tcg_gen_shr_i64(rval, src, rsh);
|
|
|
|
tcg_gen_movcond_i64(TCG_COND_LTU, dst, lsh, max, lval, zero);
|
|
|
|
tcg_gen_movcond_i64(TCG_COND_LTU, dst, rsh, max, rval, dst);
|
|
|
|
|
|
|
|
tcg_temp_free_i64(lval);
|
|
|
|
tcg_temp_free_i64(rval);
|
|
|
|
tcg_temp_free_i64(lsh);
|
|
|
|
tcg_temp_free_i64(rsh);
|
|
|
|
tcg_temp_free_i64(zero);
|
|
|
|
tcg_temp_free_i64(max);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_ushl_vec(unsigned vece, TCGv_vec dst,
|
|
|
|
TCGv_vec src, TCGv_vec shift)
|
|
|
|
{
|
|
|
|
TCGv_vec lval = tcg_temp_new_vec_matching(dst);
|
|
|
|
TCGv_vec rval = tcg_temp_new_vec_matching(dst);
|
|
|
|
TCGv_vec lsh = tcg_temp_new_vec_matching(dst);
|
|
|
|
TCGv_vec rsh = tcg_temp_new_vec_matching(dst);
|
|
|
|
TCGv_vec msk, max;
|
|
|
|
|
|
|
|
tcg_gen_neg_vec(vece, rsh, shift);
|
|
|
|
if (vece == MO_8) {
|
|
|
|
tcg_gen_mov_vec(lsh, shift);
|
|
|
|
} else {
|
|
|
|
msk = tcg_temp_new_vec_matching(dst);
|
|
|
|
tcg_gen_dupi_vec(vece, msk, 0xff);
|
|
|
|
tcg_gen_and_vec(vece, lsh, shift, msk);
|
|
|
|
tcg_gen_and_vec(vece, rsh, rsh, msk);
|
|
|
|
tcg_temp_free_vec(msk);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Rely on the TCG guarantee that out of range shifts produce
|
|
|
|
* unspecified results, not undefined behaviour (i.e. no trap).
|
|
|
|
* Discard out-of-range results after the fact.
|
|
|
|
*/
|
|
|
|
tcg_gen_shlv_vec(vece, lval, src, lsh);
|
|
|
|
tcg_gen_shrv_vec(vece, rval, src, rsh);
|
|
|
|
|
|
|
|
max = tcg_temp_new_vec_matching(dst);
|
|
|
|
tcg_gen_dupi_vec(vece, max, 8 << vece);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The choice of LT (signed) and GEU (unsigned) are biased toward
|
|
|
|
* the instructions of the x86_64 host. For MO_8, the whole byte
|
|
|
|
* is significant so we must use an unsigned compare; otherwise we
|
|
|
|
* have already masked to a byte and so a signed compare works.
|
|
|
|
* Other tcg hosts have a full set of comparisons and do not care.
|
|
|
|
*/
|
|
|
|
if (vece == MO_8) {
|
|
|
|
tcg_gen_cmp_vec(TCG_COND_GEU, vece, lsh, lsh, max);
|
|
|
|
tcg_gen_cmp_vec(TCG_COND_GEU, vece, rsh, rsh, max);
|
|
|
|
tcg_gen_andc_vec(vece, lval, lval, lsh);
|
|
|
|
tcg_gen_andc_vec(vece, rval, rval, rsh);
|
|
|
|
} else {
|
|
|
|
tcg_gen_cmp_vec(TCG_COND_LT, vece, lsh, lsh, max);
|
|
|
|
tcg_gen_cmp_vec(TCG_COND_LT, vece, rsh, rsh, max);
|
|
|
|
tcg_gen_and_vec(vece, lval, lval, lsh);
|
|
|
|
tcg_gen_and_vec(vece, rval, rval, rsh);
|
|
|
|
}
|
|
|
|
tcg_gen_or_vec(vece, dst, lval, rval);
|
|
|
|
|
|
|
|
tcg_temp_free_vec(max);
|
|
|
|
tcg_temp_free_vec(lval);
|
|
|
|
tcg_temp_free_vec(rval);
|
|
|
|
tcg_temp_free_vec(lsh);
|
|
|
|
tcg_temp_free_vec(rsh);
|
|
|
|
}
|
|
|
|
|
2020-05-14 00:32:38 +08:00
|
|
|
void gen_gvec_ushl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = {
|
|
|
|
INDEX_op_neg_vec, INDEX_op_shlv_vec,
|
|
|
|
INDEX_op_shrv_vec, INDEX_op_cmp_vec, 0
|
|
|
|
};
|
|
|
|
static const GVecGen3 ops[4] = {
|
|
|
|
{ .fniv = gen_ushl_vec,
|
|
|
|
.fno = gen_helper_gvec_ushl_b,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fniv = gen_ushl_vec,
|
|
|
|
.fno = gen_helper_gvec_ushl_h,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fni4 = gen_ushl_i32,
|
|
|
|
.fniv = gen_ushl_vec,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fni8 = gen_ushl_i64,
|
|
|
|
.fniv = gen_ushl_vec,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
|
|
}
|
2020-02-17 05:42:29 +08:00
|
|
|
|
|
|
|
void gen_sshl_i32(TCGv_i32 dst, TCGv_i32 src, TCGv_i32 shift)
|
|
|
|
{
|
|
|
|
TCGv_i32 lval = tcg_temp_new_i32();
|
|
|
|
TCGv_i32 rval = tcg_temp_new_i32();
|
|
|
|
TCGv_i32 lsh = tcg_temp_new_i32();
|
|
|
|
TCGv_i32 rsh = tcg_temp_new_i32();
|
|
|
|
TCGv_i32 zero = tcg_const_i32(0);
|
|
|
|
TCGv_i32 max = tcg_const_i32(31);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Rely on the TCG guarantee that out of range shifts produce
|
|
|
|
* unspecified results, not undefined behaviour (i.e. no trap).
|
|
|
|
* Discard out-of-range results after the fact.
|
|
|
|
*/
|
|
|
|
tcg_gen_ext8s_i32(lsh, shift);
|
|
|
|
tcg_gen_neg_i32(rsh, lsh);
|
|
|
|
tcg_gen_shl_i32(lval, src, lsh);
|
|
|
|
tcg_gen_umin_i32(rsh, rsh, max);
|
|
|
|
tcg_gen_sar_i32(rval, src, rsh);
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_LEU, lval, lsh, max, lval, zero);
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_LT, dst, lsh, zero, rval, lval);
|
|
|
|
|
|
|
|
tcg_temp_free_i32(lval);
|
|
|
|
tcg_temp_free_i32(rval);
|
|
|
|
tcg_temp_free_i32(lsh);
|
|
|
|
tcg_temp_free_i32(rsh);
|
|
|
|
tcg_temp_free_i32(zero);
|
|
|
|
tcg_temp_free_i32(max);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen_sshl_i64(TCGv_i64 dst, TCGv_i64 src, TCGv_i64 shift)
|
|
|
|
{
|
|
|
|
TCGv_i64 lval = tcg_temp_new_i64();
|
|
|
|
TCGv_i64 rval = tcg_temp_new_i64();
|
|
|
|
TCGv_i64 lsh = tcg_temp_new_i64();
|
|
|
|
TCGv_i64 rsh = tcg_temp_new_i64();
|
|
|
|
TCGv_i64 zero = tcg_const_i64(0);
|
|
|
|
TCGv_i64 max = tcg_const_i64(63);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Rely on the TCG guarantee that out of range shifts produce
|
|
|
|
* unspecified results, not undefined behaviour (i.e. no trap).
|
|
|
|
* Discard out-of-range results after the fact.
|
|
|
|
*/
|
|
|
|
tcg_gen_ext8s_i64(lsh, shift);
|
|
|
|
tcg_gen_neg_i64(rsh, lsh);
|
|
|
|
tcg_gen_shl_i64(lval, src, lsh);
|
|
|
|
tcg_gen_umin_i64(rsh, rsh, max);
|
|
|
|
tcg_gen_sar_i64(rval, src, rsh);
|
|
|
|
tcg_gen_movcond_i64(TCG_COND_LEU, lval, lsh, max, lval, zero);
|
|
|
|
tcg_gen_movcond_i64(TCG_COND_LT, dst, lsh, zero, rval, lval);
|
|
|
|
|
|
|
|
tcg_temp_free_i64(lval);
|
|
|
|
tcg_temp_free_i64(rval);
|
|
|
|
tcg_temp_free_i64(lsh);
|
|
|
|
tcg_temp_free_i64(rsh);
|
|
|
|
tcg_temp_free_i64(zero);
|
|
|
|
tcg_temp_free_i64(max);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_sshl_vec(unsigned vece, TCGv_vec dst,
|
|
|
|
TCGv_vec src, TCGv_vec shift)
|
|
|
|
{
|
|
|
|
TCGv_vec lval = tcg_temp_new_vec_matching(dst);
|
|
|
|
TCGv_vec rval = tcg_temp_new_vec_matching(dst);
|
|
|
|
TCGv_vec lsh = tcg_temp_new_vec_matching(dst);
|
|
|
|
TCGv_vec rsh = tcg_temp_new_vec_matching(dst);
|
|
|
|
TCGv_vec tmp = tcg_temp_new_vec_matching(dst);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Rely on the TCG guarantee that out of range shifts produce
|
|
|
|
* unspecified results, not undefined behaviour (i.e. no trap).
|
|
|
|
* Discard out-of-range results after the fact.
|
|
|
|
*/
|
|
|
|
tcg_gen_neg_vec(vece, rsh, shift);
|
|
|
|
if (vece == MO_8) {
|
|
|
|
tcg_gen_mov_vec(lsh, shift);
|
|
|
|
} else {
|
|
|
|
tcg_gen_dupi_vec(vece, tmp, 0xff);
|
|
|
|
tcg_gen_and_vec(vece, lsh, shift, tmp);
|
|
|
|
tcg_gen_and_vec(vece, rsh, rsh, tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Bound rsh so out of bound right shift gets -1. */
|
|
|
|
tcg_gen_dupi_vec(vece, tmp, (8 << vece) - 1);
|
|
|
|
tcg_gen_umin_vec(vece, rsh, rsh, tmp);
|
|
|
|
tcg_gen_cmp_vec(TCG_COND_GT, vece, tmp, lsh, tmp);
|
|
|
|
|
|
|
|
tcg_gen_shlv_vec(vece, lval, src, lsh);
|
|
|
|
tcg_gen_sarv_vec(vece, rval, src, rsh);
|
|
|
|
|
|
|
|
/* Select in-bound left shift. */
|
|
|
|
tcg_gen_andc_vec(vece, lval, lval, tmp);
|
|
|
|
|
|
|
|
/* Select between left and right shift. */
|
|
|
|
if (vece == MO_8) {
|
|
|
|
tcg_gen_dupi_vec(vece, tmp, 0);
|
|
|
|
tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, rval, lval);
|
|
|
|
} else {
|
|
|
|
tcg_gen_dupi_vec(vece, tmp, 0x80);
|
|
|
|
tcg_gen_cmpsel_vec(TCG_COND_LT, vece, dst, lsh, tmp, lval, rval);
|
|
|
|
}
|
|
|
|
|
|
|
|
tcg_temp_free_vec(lval);
|
|
|
|
tcg_temp_free_vec(rval);
|
|
|
|
tcg_temp_free_vec(lsh);
|
|
|
|
tcg_temp_free_vec(rsh);
|
|
|
|
tcg_temp_free_vec(tmp);
|
|
|
|
}
|
|
|
|
|
2020-05-14 00:32:38 +08:00
|
|
|
void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = {
|
|
|
|
INDEX_op_neg_vec, INDEX_op_umin_vec, INDEX_op_shlv_vec,
|
|
|
|
INDEX_op_sarv_vec, INDEX_op_cmp_vec, INDEX_op_cmpsel_vec, 0
|
|
|
|
};
|
|
|
|
static const GVecGen3 ops[4] = {
|
|
|
|
{ .fniv = gen_sshl_vec,
|
|
|
|
.fno = gen_helper_gvec_sshl_b,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fniv = gen_sshl_vec,
|
|
|
|
.fno = gen_helper_gvec_sshl_h,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fni4 = gen_sshl_i32,
|
|
|
|
.fniv = gen_sshl_vec,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fni8 = gen_sshl_i64,
|
|
|
|
.fniv = gen_sshl_vec,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
|
|
}
|
2020-02-17 05:42:29 +08:00
|
|
|
|
2019-02-15 17:56:41 +08:00
|
|
|
static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
|
|
|
|
TCGv_vec a, TCGv_vec b)
|
|
|
|
{
|
|
|
|
TCGv_vec x = tcg_temp_new_vec_matching(t);
|
|
|
|
tcg_gen_add_vec(vece, x, a, b);
|
|
|
|
tcg_gen_usadd_vec(vece, t, a, b);
|
|
|
|
tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
|
|
|
|
tcg_gen_or_vec(vece, sat, sat, x);
|
|
|
|
tcg_temp_free_vec(x);
|
|
|
|
}
|
|
|
|
|
2020-05-14 00:32:39 +08:00
|
|
|
void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = {
|
|
|
|
INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
|
|
|
|
};
|
|
|
|
static const GVecGen4 ops[4] = {
|
|
|
|
{ .fniv = gen_uqadd_vec,
|
|
|
|
.fno = gen_helper_gvec_uqadd_b,
|
|
|
|
.write_aofs = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fniv = gen_uqadd_vec,
|
|
|
|
.fno = gen_helper_gvec_uqadd_h,
|
|
|
|
.write_aofs = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fniv = gen_uqadd_vec,
|
|
|
|
.fno = gen_helper_gvec_uqadd_s,
|
|
|
|
.write_aofs = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fniv = gen_uqadd_vec,
|
|
|
|
.fno = gen_helper_gvec_uqadd_d,
|
|
|
|
.write_aofs = true,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
|
|
|
|
rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
|
|
}
|
2019-02-15 17:56:41 +08:00
|
|
|
|
|
|
|
static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
|
|
|
|
TCGv_vec a, TCGv_vec b)
|
|
|
|
{
|
|
|
|
TCGv_vec x = tcg_temp_new_vec_matching(t);
|
|
|
|
tcg_gen_add_vec(vece, x, a, b);
|
|
|
|
tcg_gen_ssadd_vec(vece, t, a, b);
|
|
|
|
tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
|
|
|
|
tcg_gen_or_vec(vece, sat, sat, x);
|
|
|
|
tcg_temp_free_vec(x);
|
|
|
|
}
|
|
|
|
|
2020-05-14 00:32:39 +08:00
|
|
|
void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = {
|
|
|
|
INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
|
|
|
|
};
|
|
|
|
static const GVecGen4 ops[4] = {
|
|
|
|
{ .fniv = gen_sqadd_vec,
|
|
|
|
.fno = gen_helper_gvec_sqadd_b,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.write_aofs = true,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fniv = gen_sqadd_vec,
|
|
|
|
.fno = gen_helper_gvec_sqadd_h,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.write_aofs = true,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fniv = gen_sqadd_vec,
|
|
|
|
.fno = gen_helper_gvec_sqadd_s,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.write_aofs = true,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fniv = gen_sqadd_vec,
|
|
|
|
.fno = gen_helper_gvec_sqadd_d,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.write_aofs = true,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
|
|
|
|
rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
|
|
}
|
2019-02-15 17:56:41 +08:00
|
|
|
|
|
|
|
static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
|
|
|
|
TCGv_vec a, TCGv_vec b)
|
|
|
|
{
|
|
|
|
TCGv_vec x = tcg_temp_new_vec_matching(t);
|
|
|
|
tcg_gen_sub_vec(vece, x, a, b);
|
|
|
|
tcg_gen_ussub_vec(vece, t, a, b);
|
|
|
|
tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
|
|
|
|
tcg_gen_or_vec(vece, sat, sat, x);
|
|
|
|
tcg_temp_free_vec(x);
|
|
|
|
}
|
|
|
|
|
2020-05-14 00:32:39 +08:00
|
|
|
void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = {
|
|
|
|
INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
|
|
|
|
};
|
|
|
|
static const GVecGen4 ops[4] = {
|
|
|
|
{ .fniv = gen_uqsub_vec,
|
|
|
|
.fno = gen_helper_gvec_uqsub_b,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.write_aofs = true,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fniv = gen_uqsub_vec,
|
|
|
|
.fno = gen_helper_gvec_uqsub_h,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.write_aofs = true,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fniv = gen_uqsub_vec,
|
|
|
|
.fno = gen_helper_gvec_uqsub_s,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.write_aofs = true,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fniv = gen_uqsub_vec,
|
|
|
|
.fno = gen_helper_gvec_uqsub_d,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.write_aofs = true,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
|
|
|
|
rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
|
|
}
|
2019-02-15 17:56:41 +08:00
|
|
|
|
|
|
|
static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
|
|
|
|
TCGv_vec a, TCGv_vec b)
|
|
|
|
{
|
|
|
|
TCGv_vec x = tcg_temp_new_vec_matching(t);
|
|
|
|
tcg_gen_sub_vec(vece, x, a, b);
|
|
|
|
tcg_gen_sssub_vec(vece, t, a, b);
|
|
|
|
tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
|
|
|
|
tcg_gen_or_vec(vece, sat, sat, x);
|
|
|
|
tcg_temp_free_vec(x);
|
|
|
|
}
|
|
|
|
|
2020-05-14 00:32:39 +08:00
|
|
|
void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = {
|
|
|
|
INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
|
|
|
|
};
|
|
|
|
static const GVecGen4 ops[4] = {
|
|
|
|
{ .fniv = gen_sqsub_vec,
|
|
|
|
.fno = gen_helper_gvec_sqsub_b,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.write_aofs = true,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fniv = gen_sqsub_vec,
|
|
|
|
.fno = gen_helper_gvec_sqsub_h,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.write_aofs = true,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fniv = gen_sqsub_vec,
|
|
|
|
.fno = gen_helper_gvec_sqsub_s,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.write_aofs = true,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fniv = gen_sqsub_vec,
|
|
|
|
.fno = gen_helper_gvec_sqsub_d,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.write_aofs = true,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
|
|
|
|
rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
|
|
}
|
2019-02-15 17:56:41 +08:00
|
|
|
|
2020-05-14 00:32:44 +08:00
|
|
|
static void gen_sabd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
{
|
|
|
|
TCGv_i32 t = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
tcg_gen_sub_i32(t, a, b);
|
|
|
|
tcg_gen_sub_i32(d, b, a);
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_LT, d, a, b, d, t);
|
|
|
|
tcg_temp_free_i32(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_sabd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
{
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
tcg_gen_sub_i64(t, a, b);
|
|
|
|
tcg_gen_sub_i64(d, b, a);
|
|
|
|
tcg_gen_movcond_i64(TCG_COND_LT, d, a, b, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_sabd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
|
|
|
|
{
|
|
|
|
TCGv_vec t = tcg_temp_new_vec_matching(d);
|
|
|
|
|
|
|
|
tcg_gen_smin_vec(vece, t, a, b);
|
|
|
|
tcg_gen_smax_vec(vece, d, a, b);
|
|
|
|
tcg_gen_sub_vec(vece, d, d, t);
|
|
|
|
tcg_temp_free_vec(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen_gvec_sabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = {
|
|
|
|
INDEX_op_sub_vec, INDEX_op_smin_vec, INDEX_op_smax_vec, 0
|
|
|
|
};
|
|
|
|
static const GVecGen3 ops[4] = {
|
|
|
|
{ .fniv = gen_sabd_vec,
|
|
|
|
.fno = gen_helper_gvec_sabd_b,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fniv = gen_sabd_vec,
|
|
|
|
.fno = gen_helper_gvec_sabd_h,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fni4 = gen_sabd_i32,
|
|
|
|
.fniv = gen_sabd_vec,
|
|
|
|
.fno = gen_helper_gvec_sabd_s,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fni8 = gen_sabd_i64,
|
|
|
|
.fniv = gen_sabd_vec,
|
|
|
|
.fno = gen_helper_gvec_sabd_d,
|
|
|
|
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_uabd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
{
|
|
|
|
TCGv_i32 t = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
tcg_gen_sub_i32(t, a, b);
|
|
|
|
tcg_gen_sub_i32(d, b, a);
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_LTU, d, a, b, d, t);
|
|
|
|
tcg_temp_free_i32(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_uabd_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
{
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
tcg_gen_sub_i64(t, a, b);
|
|
|
|
tcg_gen_sub_i64(d, b, a);
|
|
|
|
tcg_gen_movcond_i64(TCG_COND_LTU, d, a, b, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_uabd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
|
|
|
|
{
|
|
|
|
TCGv_vec t = tcg_temp_new_vec_matching(d);
|
|
|
|
|
|
|
|
tcg_gen_umin_vec(vece, t, a, b);
|
|
|
|
tcg_gen_umax_vec(vece, d, a, b);
|
|
|
|
tcg_gen_sub_vec(vece, d, d, t);
|
|
|
|
tcg_temp_free_vec(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen_gvec_uabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = {
|
|
|
|
INDEX_op_sub_vec, INDEX_op_umin_vec, INDEX_op_umax_vec, 0
|
|
|
|
};
|
|
|
|
static const GVecGen3 ops[4] = {
|
|
|
|
{ .fniv = gen_uabd_vec,
|
|
|
|
.fno = gen_helper_gvec_uabd_b,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fniv = gen_uabd_vec,
|
|
|
|
.fno = gen_helper_gvec_uabd_h,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fni4 = gen_uabd_i32,
|
|
|
|
.fniv = gen_uabd_vec,
|
|
|
|
.fno = gen_helper_gvec_uabd_s,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fni8 = gen_uabd_i64,
|
|
|
|
.fniv = gen_uabd_vec,
|
|
|
|
.fno = gen_helper_gvec_uabd_d,
|
|
|
|
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
|
|
}
|
|
|
|
|
2020-05-14 00:32:45 +08:00
|
|
|
static void gen_saba_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
{
|
|
|
|
TCGv_i32 t = tcg_temp_new_i32();
|
|
|
|
gen_sabd_i32(t, a, b);
|
|
|
|
tcg_gen_add_i32(d, d, t);
|
|
|
|
tcg_temp_free_i32(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_saba_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
{
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
gen_sabd_i64(t, a, b);
|
|
|
|
tcg_gen_add_i64(d, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_saba_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
|
|
|
|
{
|
|
|
|
TCGv_vec t = tcg_temp_new_vec_matching(d);
|
|
|
|
gen_sabd_vec(vece, t, a, b);
|
|
|
|
tcg_gen_add_vec(vece, d, d, t);
|
|
|
|
tcg_temp_free_vec(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen_gvec_saba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = {
|
|
|
|
INDEX_op_sub_vec, INDEX_op_add_vec,
|
|
|
|
INDEX_op_smin_vec, INDEX_op_smax_vec, 0
|
|
|
|
};
|
|
|
|
static const GVecGen3 ops[4] = {
|
|
|
|
{ .fniv = gen_saba_vec,
|
|
|
|
.fno = gen_helper_gvec_saba_b,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.load_dest = true,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fniv = gen_saba_vec,
|
|
|
|
.fno = gen_helper_gvec_saba_h,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.load_dest = true,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fni4 = gen_saba_i32,
|
|
|
|
.fniv = gen_saba_vec,
|
|
|
|
.fno = gen_helper_gvec_saba_s,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.load_dest = true,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fni8 = gen_saba_i64,
|
|
|
|
.fniv = gen_saba_vec,
|
|
|
|
.fno = gen_helper_gvec_saba_d,
|
|
|
|
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.load_dest = true,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_uaba_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
{
|
|
|
|
TCGv_i32 t = tcg_temp_new_i32();
|
|
|
|
gen_uabd_i32(t, a, b);
|
|
|
|
tcg_gen_add_i32(d, d, t);
|
|
|
|
tcg_temp_free_i32(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_uaba_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
{
|
|
|
|
TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
gen_uabd_i64(t, a, b);
|
|
|
|
tcg_gen_add_i64(d, d, t);
|
|
|
|
tcg_temp_free_i64(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_uaba_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
|
|
|
|
{
|
|
|
|
TCGv_vec t = tcg_temp_new_vec_matching(d);
|
|
|
|
gen_uabd_vec(vece, t, a, b);
|
|
|
|
tcg_gen_add_vec(vece, d, d, t);
|
|
|
|
tcg_temp_free_vec(t);
|
|
|
|
}
|
|
|
|
|
|
|
|
void gen_gvec_uaba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
{
|
|
|
|
static const TCGOpcode vecop_list[] = {
|
|
|
|
INDEX_op_sub_vec, INDEX_op_add_vec,
|
|
|
|
INDEX_op_umin_vec, INDEX_op_umax_vec, 0
|
|
|
|
};
|
|
|
|
static const GVecGen3 ops[4] = {
|
|
|
|
{ .fniv = gen_uaba_vec,
|
|
|
|
.fno = gen_helper_gvec_uaba_b,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.load_dest = true,
|
|
|
|
.vece = MO_8 },
|
|
|
|
{ .fniv = gen_uaba_vec,
|
|
|
|
.fno = gen_helper_gvec_uaba_h,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.load_dest = true,
|
|
|
|
.vece = MO_16 },
|
|
|
|
{ .fni4 = gen_uaba_i32,
|
|
|
|
.fniv = gen_uaba_vec,
|
|
|
|
.fno = gen_helper_gvec_uaba_s,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.load_dest = true,
|
|
|
|
.vece = MO_32 },
|
|
|
|
{ .fni8 = gen_uaba_i64,
|
|
|
|
.fniv = gen_uaba_vec,
|
|
|
|
.fno = gen_helper_gvec_uaba_d,
|
|
|
|
.prefer_i64 = TCG_TARGET_REG_BITS == 64,
|
|
|
|
.opt_opc = vecop_list,
|
|
|
|
.load_dest = true,
|
|
|
|
.vece = MO_64 },
|
|
|
|
};
|
|
|
|
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
|
|
}
|
|
|
|
|
2020-08-03 19:18:44 +08:00
|
|
|
static void do_coproc_insn(DisasContext *s, int cpnum, int is64,
|
|
|
|
int opc1, int crn, int crm, int opc2,
|
|
|
|
bool isread, int rt, int rt2)
|
2007-11-11 08:04:49 +08:00
|
|
|
{
|
2012-06-20 19:57:06 +08:00
|
|
|
const ARMCPRegInfo *ri;
|
2007-11-11 08:04:49 +08:00
|
|
|
|
2014-01-05 06:15:44 +08:00
|
|
|
ri = get_arm_cp_reginfo(s->cp_regs,
|
2014-12-11 20:07:49 +08:00
|
|
|
ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
|
2012-06-20 19:57:06 +08:00
|
|
|
if (ri) {
|
2019-10-23 23:00:50 +08:00
|
|
|
bool need_exit_tb;
|
|
|
|
|
2012-06-20 19:57:06 +08:00
|
|
|
/* Check access permissions */
|
2014-10-24 19:19:14 +08:00
|
|
|
if (!cp_access_ok(s->current_el, ri, isread)) {
|
2020-08-03 19:18:44 +08:00
|
|
|
unallocated_encoding(s);
|
|
|
|
return;
|
2012-06-20 19:57:06 +08:00
|
|
|
}
|
|
|
|
|
2019-12-01 20:20:17 +08:00
|
|
|
if (s->hstr_active || ri->accessfn ||
|
2014-10-29 03:24:01 +08:00
|
|
|
(arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
|
2014-02-20 18:35:52 +08:00
|
|
|
/* Emit code to perform further access permissions checks at
|
|
|
|
* runtime; this may result in an exception.
|
2014-09-30 01:48:48 +08:00
|
|
|
* Note that on XScale all cp0..c13 registers do an access check
|
|
|
|
* call in order to handle c15_cpar.
|
2014-02-20 18:35:52 +08:00
|
|
|
*/
|
|
|
|
TCGv_ptr tmpptr;
|
2016-02-11 19:17:31 +08:00
|
|
|
TCGv_i32 tcg_syn, tcg_isread;
|
2014-04-16 02:18:38 +08:00
|
|
|
uint32_t syndrome;
|
|
|
|
|
|
|
|
/* Note that since we are an implementation which takes an
|
|
|
|
* exception on a trapped conditional instruction only if the
|
|
|
|
* instruction passes its condition code check, we can take
|
|
|
|
* advantage of the clause in the ARM ARM that allows us to set
|
|
|
|
* the COND field in the instruction to 0xE in all cases.
|
|
|
|
* We could fish the actual condition out of the insn (ARM)
|
|
|
|
* or the condexec bits (Thumb) but it isn't necessary.
|
|
|
|
*/
|
|
|
|
switch (cpnum) {
|
|
|
|
case 14:
|
|
|
|
if (is64) {
|
|
|
|
syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
|
2016-02-11 19:17:31 +08:00
|
|
|
isread, false);
|
2014-04-16 02:18:38 +08:00
|
|
|
} else {
|
|
|
|
syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
|
2016-02-11 19:17:31 +08:00
|
|
|
rt, isread, false);
|
2014-04-16 02:18:38 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
case 15:
|
|
|
|
if (is64) {
|
|
|
|
syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
|
2016-02-11 19:17:31 +08:00
|
|
|
isread, false);
|
2014-04-16 02:18:38 +08:00
|
|
|
} else {
|
|
|
|
syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
|
2016-02-11 19:17:31 +08:00
|
|
|
rt, isread, false);
|
2014-04-16 02:18:38 +08:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* ARMv8 defines that only coprocessors 14 and 15 exist,
|
|
|
|
* so this can only happen if this is an ARMv7 or earlier CPU,
|
|
|
|
* in which case the syndrome information won't actually be
|
|
|
|
* guest visible.
|
|
|
|
*/
|
2014-10-29 03:24:01 +08:00
|
|
|
assert(!arm_dc_feature(s, ARM_FEATURE_V8));
|
2014-04-16 02:18:38 +08:00
|
|
|
syndrome = syn_uncategorized();
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2015-11-17 21:38:46 +08:00
|
|
|
gen_set_condexec(s);
|
2019-08-15 16:46:43 +08:00
|
|
|
gen_set_pc_im(s, s->pc_curr);
|
2014-02-20 18:35:52 +08:00
|
|
|
tmpptr = tcg_const_ptr(ri);
|
2014-04-16 02:18:38 +08:00
|
|
|
tcg_syn = tcg_const_i32(syndrome);
|
2016-02-11 19:17:31 +08:00
|
|
|
tcg_isread = tcg_const_i32(isread);
|
|
|
|
gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn,
|
|
|
|
tcg_isread);
|
2014-02-20 18:35:52 +08:00
|
|
|
tcg_temp_free_ptr(tmpptr);
|
2014-04-16 02:18:38 +08:00
|
|
|
tcg_temp_free_i32(tcg_syn);
|
2016-02-11 19:17:31 +08:00
|
|
|
tcg_temp_free_i32(tcg_isread);
|
2019-08-16 20:58:01 +08:00
|
|
|
} else if (ri->type & ARM_CP_RAISES_EXC) {
|
|
|
|
/*
|
|
|
|
* The readfn or writefn might raise an exception;
|
|
|
|
* synchronize the CPU state in case it does.
|
|
|
|
*/
|
|
|
|
gen_set_condexec(s);
|
|
|
|
gen_set_pc_im(s, s->pc_curr);
|
2014-02-20 18:35:52 +08:00
|
|
|
}
|
|
|
|
|
2012-06-20 19:57:06 +08:00
|
|
|
/* Handle special cases first */
|
|
|
|
switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
|
|
|
|
case ARM_CP_NOP:
|
2020-08-03 19:18:44 +08:00
|
|
|
return;
|
2012-06-20 19:57:06 +08:00
|
|
|
case ARM_CP_WFI:
|
|
|
|
if (isread) {
|
2020-08-03 19:18:44 +08:00
|
|
|
unallocated_encoding(s);
|
|
|
|
return;
|
2012-06-20 19:57:06 +08:00
|
|
|
}
|
2019-08-15 16:46:44 +08:00
|
|
|
gen_set_pc_im(s, s->base.pc_next);
|
2017-07-14 17:01:59 +08:00
|
|
|
s->base.is_jmp = DISAS_WFI;
|
2020-08-03 19:18:44 +08:00
|
|
|
return;
|
2012-06-20 19:57:06 +08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2017-07-19 08:46:52 +08:00
|
|
|
if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
|
2013-08-20 21:54:31 +08:00
|
|
|
gen_io_start();
|
|
|
|
}
|
|
|
|
|
2012-06-20 19:57:06 +08:00
|
|
|
if (isread) {
|
|
|
|
/* Read */
|
|
|
|
if (is64) {
|
|
|
|
TCGv_i64 tmp64;
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
if (ri->type & ARM_CP_CONST) {
|
|
|
|
tmp64 = tcg_const_i64(ri->resetvalue);
|
|
|
|
} else if (ri->readfn) {
|
|
|
|
TCGv_ptr tmpptr;
|
|
|
|
tmp64 = tcg_temp_new_i64();
|
|
|
|
tmpptr = tcg_const_ptr(ri);
|
|
|
|
gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
|
|
|
|
tcg_temp_free_ptr(tmpptr);
|
|
|
|
} else {
|
|
|
|
tmp64 = tcg_temp_new_i64();
|
|
|
|
tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
|
|
|
|
}
|
|
|
|
tmp = tcg_temp_new_i32();
|
2015-07-25 02:49:53 +08:00
|
|
|
tcg_gen_extrl_i64_i32(tmp, tmp64);
|
2012-06-20 19:57:06 +08:00
|
|
|
store_reg(s, rt, tmp);
|
2012-07-12 18:59:04 +08:00
|
|
|
tmp = tcg_temp_new_i32();
|
2019-08-09 04:26:16 +08:00
|
|
|
tcg_gen_extrh_i64_i32(tmp, tmp64);
|
2012-07-12 18:59:04 +08:00
|
|
|
tcg_temp_free_i64(tmp64);
|
2012-06-20 19:57:06 +08:00
|
|
|
store_reg(s, rt2, tmp);
|
|
|
|
} else {
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp;
|
2012-06-20 19:57:06 +08:00
|
|
|
if (ri->type & ARM_CP_CONST) {
|
|
|
|
tmp = tcg_const_i32(ri->resetvalue);
|
|
|
|
} else if (ri->readfn) {
|
|
|
|
TCGv_ptr tmpptr;
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
tmpptr = tcg_const_ptr(ri);
|
|
|
|
gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
|
|
|
|
tcg_temp_free_ptr(tmpptr);
|
|
|
|
} else {
|
|
|
|
tmp = load_cpu_offset(ri->fieldoffset);
|
|
|
|
}
|
|
|
|
if (rt == 15) {
|
|
|
|
/* Destination register of r15 for 32 bit loads sets
|
|
|
|
* the condition codes from the high 4 bits of the value
|
|
|
|
*/
|
|
|
|
gen_set_nzcv(tmp);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
} else {
|
|
|
|
store_reg(s, rt, tmp);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* Write */
|
|
|
|
if (ri->type & ARM_CP_CONST) {
|
|
|
|
/* If not forbidden by access permissions, treat as WI */
|
2020-08-03 19:18:44 +08:00
|
|
|
return;
|
2012-06-20 19:57:06 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (is64) {
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmplo, tmphi;
|
2012-06-20 19:57:06 +08:00
|
|
|
TCGv_i64 tmp64 = tcg_temp_new_i64();
|
|
|
|
tmplo = load_reg(s, rt);
|
|
|
|
tmphi = load_reg(s, rt2);
|
|
|
|
tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
|
|
|
|
tcg_temp_free_i32(tmplo);
|
|
|
|
tcg_temp_free_i32(tmphi);
|
|
|
|
if (ri->writefn) {
|
|
|
|
TCGv_ptr tmpptr = tcg_const_ptr(ri);
|
|
|
|
gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
|
|
|
|
tcg_temp_free_ptr(tmpptr);
|
|
|
|
} else {
|
|
|
|
tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
|
|
|
|
}
|
|
|
|
tcg_temp_free_i64(tmp64);
|
|
|
|
} else {
|
|
|
|
if (ri->writefn) {
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp;
|
2012-06-20 19:57:06 +08:00
|
|
|
TCGv_ptr tmpptr;
|
|
|
|
tmp = load_reg(s, rt);
|
|
|
|
tmpptr = tcg_const_ptr(ri);
|
|
|
|
gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
|
|
|
|
tcg_temp_free_ptr(tmpptr);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
} else {
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp = load_reg(s, rt);
|
2012-06-20 19:57:06 +08:00
|
|
|
store_cpu_offset(tmp, ri->fieldoffset);
|
|
|
|
}
|
|
|
|
}
|
2013-08-20 21:54:31 +08:00
|
|
|
}
|
|
|
|
|
2019-10-23 23:00:50 +08:00
|
|
|
/* I/O operations must end the TB here (whether read or write) */
|
|
|
|
need_exit_tb = ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) &&
|
|
|
|
(ri->type & ARM_CP_IO));
|
|
|
|
|
|
|
|
if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
|
|
|
|
/*
|
2020-03-04 01:49:50 +08:00
|
|
|
* A write to any coprocessor register that ends a TB
|
2019-10-23 23:00:50 +08:00
|
|
|
* must rebuild the hflags for the next TB.
|
|
|
|
*/
|
|
|
|
TCGv_i32 tcg_el = tcg_const_i32(s->current_el);
|
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
gen_helper_rebuild_hflags_m32(cpu_env, tcg_el);
|
|
|
|
} else {
|
2019-12-12 19:47:34 +08:00
|
|
|
if (ri->type & ARM_CP_NEWEL) {
|
|
|
|
gen_helper_rebuild_hflags_a32_newel(cpu_env);
|
|
|
|
} else {
|
|
|
|
gen_helper_rebuild_hflags_a32(cpu_env, tcg_el);
|
|
|
|
}
|
2019-10-23 23:00:50 +08:00
|
|
|
}
|
|
|
|
tcg_temp_free_i32(tcg_el);
|
|
|
|
/*
|
|
|
|
* We default to ending the TB on a coprocessor register write,
|
2012-06-20 19:57:06 +08:00
|
|
|
* but allow this to be suppressed by the register definition
|
|
|
|
* (usually only necessary to work around guest bugs).
|
|
|
|
*/
|
2019-10-23 23:00:50 +08:00
|
|
|
need_exit_tb = true;
|
|
|
|
}
|
|
|
|
if (need_exit_tb) {
|
2013-08-20 21:54:31 +08:00
|
|
|
gen_lookup_tb(s);
|
2012-06-20 19:57:06 +08:00
|
|
|
}
|
2013-08-20 21:54:31 +08:00
|
|
|
|
2020-08-03 19:18:44 +08:00
|
|
|
return;
|
2012-06-20 19:57:06 +08:00
|
|
|
}
|
|
|
|
|
2014-02-20 18:35:52 +08:00
|
|
|
/* Unknown register; this might be a guest error or a QEMU
|
|
|
|
* unimplemented feature.
|
|
|
|
*/
|
|
|
|
if (is64) {
|
|
|
|
qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
|
2014-12-11 20:07:49 +08:00
|
|
|
"64 bit system register cp:%d opc1: %d crm:%d "
|
|
|
|
"(%s)\n",
|
|
|
|
isread ? "read" : "write", cpnum, opc1, crm,
|
|
|
|
s->ns ? "non-secure" : "secure");
|
2014-02-20 18:35:52 +08:00
|
|
|
} else {
|
|
|
|
qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
|
2014-12-11 20:07:49 +08:00
|
|
|
"system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
|
|
|
|
"(%s)\n",
|
|
|
|
isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
|
|
|
|
s->ns ? "non-secure" : "secure");
|
2014-02-20 18:35:52 +08:00
|
|
|
}
|
|
|
|
|
2020-08-03 19:18:44 +08:00
|
|
|
unallocated_encoding(s);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-08-03 19:18:43 +08:00
|
|
|
/* Decode XScale DSP or iWMMXt insn (in the copro space, cp=0 or 1) */
|
|
|
|
static void disas_xscale_insn(DisasContext *s, uint32_t insn)
|
|
|
|
{
|
|
|
|
int cpnum = (insn >> 8) & 0xf;
|
|
|
|
|
|
|
|
if (extract32(s->c15_cpar, cpnum, 1) == 0) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
} else if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
|
|
|
|
if (disas_iwmmxt_insn(s, insn)) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
}
|
|
|
|
} else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
|
|
|
|
if (disas_dsp_insn(s, insn)) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2008-03-31 11:47:34 +08:00
|
|
|
|
|
|
|
/* Store a 64-bit value to a register pair. Clobbers val. */
|
2008-11-17 22:43:54 +08:00
|
|
|
static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
|
2008-03-31 11:47:34 +08:00
|
|
|
{
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp;
|
2011-03-07 05:39:54 +08:00
|
|
|
tmp = tcg_temp_new_i32();
|
2015-07-25 02:49:53 +08:00
|
|
|
tcg_gen_extrl_i64_i32(tmp, val);
|
2008-03-31 11:47:34 +08:00
|
|
|
store_reg(s, rlow, tmp);
|
2011-03-07 05:39:54 +08:00
|
|
|
tmp = tcg_temp_new_i32();
|
2019-08-09 04:26:16 +08:00
|
|
|
tcg_gen_extrh_i64_i32(tmp, val);
|
2008-03-31 11:47:34 +08:00
|
|
|
store_reg(s, rhigh, tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* load and add a 64-bit value from a register pair. */
|
2008-11-17 22:43:54 +08:00
|
|
|
static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
|
2008-03-31 11:47:34 +08:00
|
|
|
{
|
2008-11-17 22:43:54 +08:00
|
|
|
TCGv_i64 tmp;
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmpl;
|
|
|
|
TCGv_i32 tmph;
|
2008-03-31 11:47:34 +08:00
|
|
|
|
|
|
|
/* Load 64-bit value rd:rn. */
|
2008-09-21 21:48:32 +08:00
|
|
|
tmpl = load_reg(s, rlow);
|
|
|
|
tmph = load_reg(s, rhigh);
|
2008-11-17 22:43:54 +08:00
|
|
|
tmp = tcg_temp_new_i64();
|
2008-09-21 21:48:32 +08:00
|
|
|
tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
|
2011-03-07 05:39:54 +08:00
|
|
|
tcg_temp_free_i32(tmpl);
|
|
|
|
tcg_temp_free_i32(tmph);
|
2008-03-31 11:47:34 +08:00
|
|
|
tcg_gen_add_i64(val, val, tmp);
|
2009-10-22 20:17:36 +08:00
|
|
|
tcg_temp_free_i64(tmp);
|
2008-03-31 11:47:34 +08:00
|
|
|
}
|
|
|
|
|
2013-02-20 15:52:06 +08:00
|
|
|
/* Set N and Z flags from hi|lo. */
|
2013-05-23 19:59:55 +08:00
|
|
|
static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
|
2008-03-31 11:47:34 +08:00
|
|
|
{
|
2013-02-20 15:52:06 +08:00
|
|
|
tcg_gen_mov_i32(cpu_NF, hi);
|
|
|
|
tcg_gen_or_i32(cpu_ZF, lo, hi);
|
2008-03-31 11:47:34 +08:00
|
|
|
}
|
|
|
|
|
2009-11-23 05:35:13 +08:00
|
|
|
/* Load/Store exclusive instructions are implemented by remembering
|
|
|
|
the value/address loaded, and seeing if these are the same
|
target-arm: emulate LL/SC using cmpxchg helpers
Emulating LL/SC with cmpxchg is not correct, since it can
suffer from the ABA problem. Portable parallel code, however,
is written assuming only cmpxchg--and not LL/SC--is available.
This means that in practice emulating LL/SC with cmpxchg is
a viable alternative.
The appended emulates LL/SC pairs in ARM with cmpxchg helpers.
This works in both user and system mode. In usermode, it avoids
pausing all other CPUs to perform the LL/SC pair. The subsequent
performance and scalability improvement is significant, as the
plots below show. They plot the throughput of atomic_add-bench
compiled for ARM and executed on a 64-core x86 machine.
Hi-res plots: http://imgur.com/a/aNQpB
atomic_add-bench: 1000000 ops/thread, [0,1] range
9 ++---------+----------+----------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
8 +Emaster +-H--+ ++
| | |
7 ++E ++
| | |
6 ++++ ++
| | |
5 ++ | ++
4 ++ | ++
| | |
3 ++ | ++
| | |
2 ++ | ++
|H++E+--- +++ ---+E+------+E+------+E|
1 +++ +E+-----+E+------+E+------+E+------+E+-- +++ +++ ++
++H+ + +++ + +++ ++++ + + + |
0 ++--H----H-+-----H----+----------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,2] range
16 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
14 ++master +-H--+ ++
| | |
12 ++| ++
| E |
10 ++| ++
| | |
8 ++++ ++
|E+| |
| | |
6 ++ | ++
| | |
4 ++ | ++
| +E+--- +++ +++ +++ ---+E+------+E|
2 +H+ +E+------E-------+E+-----+E+------+E+------+E+-- +++
+ | + +++ + ++++ + + + |
0 ++H-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,128] range
70 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + ++++ + |
60 ++master +-H--+ ----E------+E+-------++
| -+E+--- +++ +++ +E|
| +++ ---- +++ ++|
50 ++ +++ ---+E+- ++
| -E--- |
40 ++ ---+++ ++
| +++--- |
| -+E+ |
30 ++ +++---- ++
| +E+ |
20 ++ +++-- ++
| +E+ |
|+E+ |
10 +E+ ++
+ + + + + + + |
0 +HH-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,1024] range
120 ++---------+---------+----------+---------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
| master +-H--+ ++|
100 ++ ----E+
| +++ ---+E+--- ++|
| --E--- +++ |
80 ++ ---- +++ ++
| ---+E+- |
60 ++ -+E+-- ++
| +++ ---- +++ |
| -+E+- |
40 ++ +++---- ++
| +++ ---+E+ |
| -+E+--- |
20 ++ +E+ ++
|+E+++ |
+E+ + + + + + + |
0 +HH-H---H--+-----H---+----------+---------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
[rth: Enforce alignment for ldrexd.]
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Message-Id: <1467054136-10430-23-git-send-email-cota@braap.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2016-06-28 03:02:08 +08:00
|
|
|
when the store is performed. This should be sufficient to implement
|
2009-11-23 05:35:13 +08:00
|
|
|
the architecturally mandated semantics, and avoids having to monitor
|
target-arm: emulate LL/SC using cmpxchg helpers
Emulating LL/SC with cmpxchg is not correct, since it can
suffer from the ABA problem. Portable parallel code, however,
is written assuming only cmpxchg--and not LL/SC--is available.
This means that in practice emulating LL/SC with cmpxchg is
a viable alternative.
The appended emulates LL/SC pairs in ARM with cmpxchg helpers.
This works in both user and system mode. In usermode, it avoids
pausing all other CPUs to perform the LL/SC pair. The subsequent
performance and scalability improvement is significant, as the
plots below show. They plot the throughput of atomic_add-bench
compiled for ARM and executed on a 64-core x86 machine.
Hi-res plots: http://imgur.com/a/aNQpB
atomic_add-bench: 1000000 ops/thread, [0,1] range
9 ++---------+----------+----------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
8 +Emaster +-H--+ ++
| | |
7 ++E ++
| | |
6 ++++ ++
| | |
5 ++ | ++
4 ++ | ++
| | |
3 ++ | ++
| | |
2 ++ | ++
|H++E+--- +++ ---+E+------+E+------+E|
1 +++ +E+-----+E+------+E+------+E+------+E+-- +++ +++ ++
++H+ + +++ + +++ ++++ + + + |
0 ++--H----H-+-----H----+----------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,2] range
16 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
14 ++master +-H--+ ++
| | |
12 ++| ++
| E |
10 ++| ++
| | |
8 ++++ ++
|E+| |
| | |
6 ++ | ++
| | |
4 ++ | ++
| +E+--- +++ +++ +++ ---+E+------+E|
2 +H+ +E+------E-------+E+-----+E+------+E+------+E+-- +++
+ | + +++ + ++++ + + + |
0 ++H-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,128] range
70 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + ++++ + |
60 ++master +-H--+ ----E------+E+-------++
| -+E+--- +++ +++ +E|
| +++ ---- +++ ++|
50 ++ +++ ---+E+- ++
| -E--- |
40 ++ ---+++ ++
| +++--- |
| -+E+ |
30 ++ +++---- ++
| +E+ |
20 ++ +++-- ++
| +E+ |
|+E+ |
10 +E+ ++
+ + + + + + + |
0 +HH-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,1024] range
120 ++---------+---------+----------+---------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
| master +-H--+ ++|
100 ++ ----E+
| +++ ---+E+--- ++|
| --E--- +++ |
80 ++ ---- +++ ++
| ---+E+- |
60 ++ -+E+-- ++
| +++ ---- +++ |
| -+E+- |
40 ++ +++---- ++
| +++ ---+E+ |
| -+E+--- |
20 ++ +E+ ++
|+E+++ |
+E+ + + + + + + |
0 +HH-H---H--+-----H---+----------+---------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
[rth: Enforce alignment for ldrexd.]
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Message-Id: <1467054136-10430-23-git-send-email-cota@braap.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2016-06-28 03:02:08 +08:00
|
|
|
regular stores. The compare vs the remembered value is done during
|
|
|
|
the cmpxchg operation, but we must compare the addresses manually. */
|
2009-11-23 05:35:13 +08:00
|
|
|
static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 addr, int size)
|
2009-11-23 05:35:13 +08:00
|
|
|
{
|
2013-05-23 19:59:59 +08:00
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
2019-08-24 02:10:58 +08:00
|
|
|
MemOp opc = size | MO_ALIGN | s->be_data;
|
2009-11-23 05:35:13 +08:00
|
|
|
|
2014-08-20 01:56:27 +08:00
|
|
|
s->is_ldex = true;
|
|
|
|
|
2009-11-23 05:35:13 +08:00
|
|
|
if (size == 3) {
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 tmp2 = tcg_temp_new_i32();
|
target-arm: emulate LL/SC using cmpxchg helpers
Emulating LL/SC with cmpxchg is not correct, since it can
suffer from the ABA problem. Portable parallel code, however,
is written assuming only cmpxchg--and not LL/SC--is available.
This means that in practice emulating LL/SC with cmpxchg is
a viable alternative.
The appended emulates LL/SC pairs in ARM with cmpxchg helpers.
This works in both user and system mode. In usermode, it avoids
pausing all other CPUs to perform the LL/SC pair. The subsequent
performance and scalability improvement is significant, as the
plots below show. They plot the throughput of atomic_add-bench
compiled for ARM and executed on a 64-core x86 machine.
Hi-res plots: http://imgur.com/a/aNQpB
atomic_add-bench: 1000000 ops/thread, [0,1] range
9 ++---------+----------+----------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
8 +Emaster +-H--+ ++
| | |
7 ++E ++
| | |
6 ++++ ++
| | |
5 ++ | ++
4 ++ | ++
| | |
3 ++ | ++
| | |
2 ++ | ++
|H++E+--- +++ ---+E+------+E+------+E|
1 +++ +E+-----+E+------+E+------+E+------+E+-- +++ +++ ++
++H+ + +++ + +++ ++++ + + + |
0 ++--H----H-+-----H----+----------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,2] range
16 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
14 ++master +-H--+ ++
| | |
12 ++| ++
| E |
10 ++| ++
| | |
8 ++++ ++
|E+| |
| | |
6 ++ | ++
| | |
4 ++ | ++
| +E+--- +++ +++ +++ ---+E+------+E|
2 +H+ +E+------E-------+E+-----+E+------+E+------+E+-- +++
+ | + +++ + ++++ + + + |
0 ++H-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,128] range
70 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + ++++ + |
60 ++master +-H--+ ----E------+E+-------++
| -+E+--- +++ +++ +E|
| +++ ---- +++ ++|
50 ++ +++ ---+E+- ++
| -E--- |
40 ++ ---+++ ++
| +++--- |
| -+E+ |
30 ++ +++---- ++
| +E+ |
20 ++ +++-- ++
| +E+ |
|+E+ |
10 +E+ ++
+ + + + + + + |
0 +HH-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,1024] range
120 ++---------+---------+----------+---------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
| master +-H--+ ++|
100 ++ ----E+
| +++ ---+E+--- ++|
| --E--- +++ |
80 ++ ---- +++ ++
| ---+E+- |
60 ++ -+E+-- ++
| +++ ---- +++ |
| -+E+- |
40 ++ +++---- ++
| +++ ---+E+ |
| -+E+--- |
20 ++ +E+ ++
|+E+++ |
+E+ + + + + + + |
0 +HH-H---H--+-----H---+----------+---------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
[rth: Enforce alignment for ldrexd.]
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Message-Id: <1467054136-10430-23-git-send-email-cota@braap.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2016-06-28 03:02:08 +08:00
|
|
|
TCGv_i64 t64 = tcg_temp_new_i64();
|
2014-01-05 06:15:47 +08:00
|
|
|
|
2017-11-07 21:03:51 +08:00
|
|
|
/* For AArch32, architecturally the 32-bit word at the lowest
|
|
|
|
* address is always Rt and the one at addr+4 is Rt2, even if
|
|
|
|
* the CPU is big-endian. That means we don't want to do a
|
|
|
|
* gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
|
|
|
|
* for an architecturally 64-bit access, but instead do a
|
|
|
|
* 64-bit access using MO_BE if appropriate and then split
|
|
|
|
* the two halves.
|
|
|
|
* This only makes a difference for BE32 user-mode, where
|
|
|
|
* frob64() must not flip the two halves of the 64-bit data
|
|
|
|
* but this code must treat BE32 user-mode like BE32 system.
|
|
|
|
*/
|
|
|
|
TCGv taddr = gen_aa32_addr(s, addr, opc);
|
|
|
|
|
|
|
|
tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
|
|
|
|
tcg_temp_free(taddr);
|
target-arm: emulate LL/SC using cmpxchg helpers
Emulating LL/SC with cmpxchg is not correct, since it can
suffer from the ABA problem. Portable parallel code, however,
is written assuming only cmpxchg--and not LL/SC--is available.
This means that in practice emulating LL/SC with cmpxchg is
a viable alternative.
The appended emulates LL/SC pairs in ARM with cmpxchg helpers.
This works in both user and system mode. In usermode, it avoids
pausing all other CPUs to perform the LL/SC pair. The subsequent
performance and scalability improvement is significant, as the
plots below show. They plot the throughput of atomic_add-bench
compiled for ARM and executed on a 64-core x86 machine.
Hi-res plots: http://imgur.com/a/aNQpB
atomic_add-bench: 1000000 ops/thread, [0,1] range
9 ++---------+----------+----------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
8 +Emaster +-H--+ ++
| | |
7 ++E ++
| | |
6 ++++ ++
| | |
5 ++ | ++
4 ++ | ++
| | |
3 ++ | ++
| | |
2 ++ | ++
|H++E+--- +++ ---+E+------+E+------+E|
1 +++ +E+-----+E+------+E+------+E+------+E+-- +++ +++ ++
++H+ + +++ + +++ ++++ + + + |
0 ++--H----H-+-----H----+----------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,2] range
16 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
14 ++master +-H--+ ++
| | |
12 ++| ++
| E |
10 ++| ++
| | |
8 ++++ ++
|E+| |
| | |
6 ++ | ++
| | |
4 ++ | ++
| +E+--- +++ +++ +++ ---+E+------+E|
2 +H+ +E+------E-------+E+-----+E+------+E+------+E+-- +++
+ | + +++ + ++++ + + + |
0 ++H-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,128] range
70 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + ++++ + |
60 ++master +-H--+ ----E------+E+-------++
| -+E+--- +++ +++ +E|
| +++ ---- +++ ++|
50 ++ +++ ---+E+- ++
| -E--- |
40 ++ ---+++ ++
| +++--- |
| -+E+ |
30 ++ +++---- ++
| +E+ |
20 ++ +++-- ++
| +E+ |
|+E+ |
10 +E+ ++
+ + + + + + + |
0 +HH-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,1024] range
120 ++---------+---------+----------+---------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
| master +-H--+ ++|
100 ++ ----E+
| +++ ---+E+--- ++|
| --E--- +++ |
80 ++ ---- +++ ++
| ---+E+- |
60 ++ -+E+-- ++
| +++ ---- +++ |
| -+E+- |
40 ++ +++---- ++
| +++ ---+E+ |
| -+E+--- |
20 ++ +E+ ++
|+E+++ |
+E+ + + + + + + |
0 +HH-H---H--+-----H---+----------+---------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
[rth: Enforce alignment for ldrexd.]
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Message-Id: <1467054136-10430-23-git-send-email-cota@braap.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2016-06-28 03:02:08 +08:00
|
|
|
tcg_gen_mov_i64(cpu_exclusive_val, t64);
|
2017-11-07 21:03:51 +08:00
|
|
|
if (s->be_data == MO_BE) {
|
|
|
|
tcg_gen_extr_i64_i32(tmp2, tmp, t64);
|
|
|
|
} else {
|
|
|
|
tcg_gen_extr_i64_i32(tmp, tmp2, t64);
|
|
|
|
}
|
target-arm: emulate LL/SC using cmpxchg helpers
Emulating LL/SC with cmpxchg is not correct, since it can
suffer from the ABA problem. Portable parallel code, however,
is written assuming only cmpxchg--and not LL/SC--is available.
This means that in practice emulating LL/SC with cmpxchg is
a viable alternative.
The appended emulates LL/SC pairs in ARM with cmpxchg helpers.
This works in both user and system mode. In usermode, it avoids
pausing all other CPUs to perform the LL/SC pair. The subsequent
performance and scalability improvement is significant, as the
plots below show. They plot the throughput of atomic_add-bench
compiled for ARM and executed on a 64-core x86 machine.
Hi-res plots: http://imgur.com/a/aNQpB
atomic_add-bench: 1000000 ops/thread, [0,1] range
9 ++---------+----------+----------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
8 +Emaster +-H--+ ++
| | |
7 ++E ++
| | |
6 ++++ ++
| | |
5 ++ | ++
4 ++ | ++
| | |
3 ++ | ++
| | |
2 ++ | ++
|H++E+--- +++ ---+E+------+E+------+E|
1 +++ +E+-----+E+------+E+------+E+------+E+-- +++ +++ ++
++H+ + +++ + +++ ++++ + + + |
0 ++--H----H-+-----H----+----------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,2] range
16 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
14 ++master +-H--+ ++
| | |
12 ++| ++
| E |
10 ++| ++
| | |
8 ++++ ++
|E+| |
| | |
6 ++ | ++
| | |
4 ++ | ++
| +E+--- +++ +++ +++ ---+E+------+E|
2 +H+ +E+------E-------+E+-----+E+------+E+------+E+-- +++
+ | + +++ + ++++ + + + |
0 ++H-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,128] range
70 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + ++++ + |
60 ++master +-H--+ ----E------+E+-------++
| -+E+--- +++ +++ +E|
| +++ ---- +++ ++|
50 ++ +++ ---+E+- ++
| -E--- |
40 ++ ---+++ ++
| +++--- |
| -+E+ |
30 ++ +++---- ++
| +E+ |
20 ++ +++-- ++
| +E+ |
|+E+ |
10 +E+ ++
+ + + + + + + |
0 +HH-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,1024] range
120 ++---------+---------+----------+---------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
| master +-H--+ ++|
100 ++ ----E+
| +++ ---+E+--- ++|
| --E--- +++ |
80 ++ ---- +++ ++
| ---+E+- |
60 ++ -+E+-- ++
| +++ ---- +++ |
| -+E+- |
40 ++ +++---- ++
| +++ ---+E+ |
| -+E+--- |
20 ++ +E+ ++
|+E+++ |
+E+ + + + + + + |
0 +HH-H---H--+-----H---+----------+---------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
[rth: Enforce alignment for ldrexd.]
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Message-Id: <1467054136-10430-23-git-send-email-cota@braap.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2016-06-28 03:02:08 +08:00
|
|
|
tcg_temp_free_i64(t64);
|
|
|
|
|
|
|
|
store_reg(s, rt2, tmp2);
|
2014-01-05 06:15:47 +08:00
|
|
|
} else {
|
target-arm: emulate LL/SC using cmpxchg helpers
Emulating LL/SC with cmpxchg is not correct, since it can
suffer from the ABA problem. Portable parallel code, however,
is written assuming only cmpxchg--and not LL/SC--is available.
This means that in practice emulating LL/SC with cmpxchg is
a viable alternative.
The appended emulates LL/SC pairs in ARM with cmpxchg helpers.
This works in both user and system mode. In usermode, it avoids
pausing all other CPUs to perform the LL/SC pair. The subsequent
performance and scalability improvement is significant, as the
plots below show. They plot the throughput of atomic_add-bench
compiled for ARM and executed on a 64-core x86 machine.
Hi-res plots: http://imgur.com/a/aNQpB
atomic_add-bench: 1000000 ops/thread, [0,1] range
9 ++---------+----------+----------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
8 +Emaster +-H--+ ++
| | |
7 ++E ++
| | |
6 ++++ ++
| | |
5 ++ | ++
4 ++ | ++
| | |
3 ++ | ++
| | |
2 ++ | ++
|H++E+--- +++ ---+E+------+E+------+E|
1 +++ +E+-----+E+------+E+------+E+------+E+-- +++ +++ ++
++H+ + +++ + +++ ++++ + + + |
0 ++--H----H-+-----H----+----------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,2] range
16 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
14 ++master +-H--+ ++
| | |
12 ++| ++
| E |
10 ++| ++
| | |
8 ++++ ++
|E+| |
| | |
6 ++ | ++
| | |
4 ++ | ++
| +E+--- +++ +++ +++ ---+E+------+E|
2 +H+ +E+------E-------+E+-----+E+------+E+------+E+-- +++
+ | + +++ + ++++ + + + |
0 ++H-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,128] range
70 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + ++++ + |
60 ++master +-H--+ ----E------+E+-------++
| -+E+--- +++ +++ +E|
| +++ ---- +++ ++|
50 ++ +++ ---+E+- ++
| -E--- |
40 ++ ---+++ ++
| +++--- |
| -+E+ |
30 ++ +++---- ++
| +E+ |
20 ++ +++-- ++
| +E+ |
|+E+ |
10 +E+ ++
+ + + + + + + |
0 +HH-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,1024] range
120 ++---------+---------+----------+---------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
| master +-H--+ ++|
100 ++ ----E+
| +++ ---+E+--- ++|
| --E--- +++ |
80 ++ ---- +++ ++
| ---+E+- |
60 ++ -+E+-- ++
| +++ ---- +++ |
| -+E+- |
40 ++ +++---- ++
| +++ ---+E+ |
| -+E+--- |
20 ++ +E+ ++
|+E+++ |
+E+ + + + + + + |
0 +HH-H---H--+-----H---+----------+---------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
[rth: Enforce alignment for ldrexd.]
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Message-Id: <1467054136-10430-23-git-send-email-cota@braap.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2016-06-28 03:02:08 +08:00
|
|
|
gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), opc);
|
2014-01-05 06:15:47 +08:00
|
|
|
tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
|
2009-11-23 05:35:13 +08:00
|
|
|
}
|
2014-01-05 06:15:47 +08:00
|
|
|
|
|
|
|
store_reg(s, rt, tmp);
|
|
|
|
tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
|
2009-11-23 05:35:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_clrex(DisasContext *s)
|
|
|
|
{
|
2014-01-05 06:15:47 +08:00
|
|
|
tcg_gen_movi_i64(cpu_exclusive_addr, -1);
|
2009-11-23 05:35:13 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
|
2013-05-23 19:59:55 +08:00
|
|
|
TCGv_i32 addr, int size)
|
2009-11-23 05:35:13 +08:00
|
|
|
{
|
target-arm: emulate LL/SC using cmpxchg helpers
Emulating LL/SC with cmpxchg is not correct, since it can
suffer from the ABA problem. Portable parallel code, however,
is written assuming only cmpxchg--and not LL/SC--is available.
This means that in practice emulating LL/SC with cmpxchg is
a viable alternative.
The appended emulates LL/SC pairs in ARM with cmpxchg helpers.
This works in both user and system mode. In usermode, it avoids
pausing all other CPUs to perform the LL/SC pair. The subsequent
performance and scalability improvement is significant, as the
plots below show. They plot the throughput of atomic_add-bench
compiled for ARM and executed on a 64-core x86 machine.
Hi-res plots: http://imgur.com/a/aNQpB
atomic_add-bench: 1000000 ops/thread, [0,1] range
9 ++---------+----------+----------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
8 +Emaster +-H--+ ++
| | |
7 ++E ++
| | |
6 ++++ ++
| | |
5 ++ | ++
4 ++ | ++
| | |
3 ++ | ++
| | |
2 ++ | ++
|H++E+--- +++ ---+E+------+E+------+E|
1 +++ +E+-----+E+------+E+------+E+------+E+-- +++ +++ ++
++H+ + +++ + +++ ++++ + + + |
0 ++--H----H-+-----H----+----------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,2] range
16 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
14 ++master +-H--+ ++
| | |
12 ++| ++
| E |
10 ++| ++
| | |
8 ++++ ++
|E+| |
| | |
6 ++ | ++
| | |
4 ++ | ++
| +E+--- +++ +++ +++ ---+E+------+E|
2 +H+ +E+------E-------+E+-----+E+------+E+------+E+-- +++
+ | + +++ + ++++ + + + |
0 ++H-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,128] range
70 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + ++++ + |
60 ++master +-H--+ ----E------+E+-------++
| -+E+--- +++ +++ +E|
| +++ ---- +++ ++|
50 ++ +++ ---+E+- ++
| -E--- |
40 ++ ---+++ ++
| +++--- |
| -+E+ |
30 ++ +++---- ++
| +E+ |
20 ++ +++-- ++
| +E+ |
|+E+ |
10 +E+ ++
+ + + + + + + |
0 +HH-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,1024] range
120 ++---------+---------+----------+---------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
| master +-H--+ ++|
100 ++ ----E+
| +++ ---+E+--- ++|
| --E--- +++ |
80 ++ ---- +++ ++
| ---+E+- |
60 ++ -+E+-- ++
| +++ ---- +++ |
| -+E+- |
40 ++ +++---- ++
| +++ ---+E+ |
| -+E+--- |
20 ++ +E+ ++
|+E+++ |
+E+ + + + + + + |
0 +HH-H---H--+-----H---+----------+---------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
[rth: Enforce alignment for ldrexd.]
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Message-Id: <1467054136-10430-23-git-send-email-cota@braap.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2016-06-28 03:02:08 +08:00
|
|
|
TCGv_i32 t0, t1, t2;
|
|
|
|
TCGv_i64 extaddr;
|
|
|
|
TCGv taddr;
|
2015-02-14 04:51:55 +08:00
|
|
|
TCGLabel *done_label;
|
|
|
|
TCGLabel *fail_label;
|
2019-08-24 02:10:58 +08:00
|
|
|
MemOp opc = size | MO_ALIGN | s->be_data;
|
2009-11-23 05:35:13 +08:00
|
|
|
|
|
|
|
/* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
|
|
|
|
[addr] = {Rt};
|
|
|
|
{Rd} = 0;
|
|
|
|
} else {
|
|
|
|
{Rd} = 1;
|
|
|
|
} */
|
|
|
|
fail_label = gen_new_label();
|
|
|
|
done_label = gen_new_label();
|
2014-01-05 06:15:47 +08:00
|
|
|
extaddr = tcg_temp_new_i64();
|
|
|
|
tcg_gen_extu_i32_i64(extaddr, addr);
|
|
|
|
tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
|
|
|
|
tcg_temp_free_i64(extaddr);
|
|
|
|
|
target-arm: emulate LL/SC using cmpxchg helpers
Emulating LL/SC with cmpxchg is not correct, since it can
suffer from the ABA problem. Portable parallel code, however,
is written assuming only cmpxchg--and not LL/SC--is available.
This means that in practice emulating LL/SC with cmpxchg is
a viable alternative.
The appended emulates LL/SC pairs in ARM with cmpxchg helpers.
This works in both user and system mode. In usermode, it avoids
pausing all other CPUs to perform the LL/SC pair. The subsequent
performance and scalability improvement is significant, as the
plots below show. They plot the throughput of atomic_add-bench
compiled for ARM and executed on a 64-core x86 machine.
Hi-res plots: http://imgur.com/a/aNQpB
atomic_add-bench: 1000000 ops/thread, [0,1] range
9 ++---------+----------+----------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
8 +Emaster +-H--+ ++
| | |
7 ++E ++
| | |
6 ++++ ++
| | |
5 ++ | ++
4 ++ | ++
| | |
3 ++ | ++
| | |
2 ++ | ++
|H++E+--- +++ ---+E+------+E+------+E|
1 +++ +E+-----+E+------+E+------+E+------+E+-- +++ +++ ++
++H+ + +++ + +++ ++++ + + + |
0 ++--H----H-+-----H----+----------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,2] range
16 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
14 ++master +-H--+ ++
| | |
12 ++| ++
| E |
10 ++| ++
| | |
8 ++++ ++
|E+| |
| | |
6 ++ | ++
| | |
4 ++ | ++
| +E+--- +++ +++ +++ ---+E+------+E|
2 +H+ +E+------E-------+E+-----+E+------+E+------+E+-- +++
+ | + +++ + ++++ + + + |
0 ++H-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,128] range
70 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + ++++ + |
60 ++master +-H--+ ----E------+E+-------++
| -+E+--- +++ +++ +E|
| +++ ---- +++ ++|
50 ++ +++ ---+E+- ++
| -E--- |
40 ++ ---+++ ++
| +++--- |
| -+E+ |
30 ++ +++---- ++
| +E+ |
20 ++ +++-- ++
| +E+ |
|+E+ |
10 +E+ ++
+ + + + + + + |
0 +HH-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,1024] range
120 ++---------+---------+----------+---------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
| master +-H--+ ++|
100 ++ ----E+
| +++ ---+E+--- ++|
| --E--- +++ |
80 ++ ---- +++ ++
| ---+E+- |
60 ++ -+E+-- ++
| +++ ---- +++ |
| -+E+- |
40 ++ +++---- ++
| +++ ---+E+ |
| -+E+--- |
20 ++ +E+ ++
|+E+++ |
+E+ + + + + + + |
0 +HH-H---H--+-----H---+----------+---------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
[rth: Enforce alignment for ldrexd.]
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Message-Id: <1467054136-10430-23-git-send-email-cota@braap.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2016-06-28 03:02:08 +08:00
|
|
|
taddr = gen_aa32_addr(s, addr, opc);
|
|
|
|
t0 = tcg_temp_new_i32();
|
|
|
|
t1 = load_reg(s, rt);
|
2009-11-23 05:35:13 +08:00
|
|
|
if (size == 3) {
|
target-arm: emulate LL/SC using cmpxchg helpers
Emulating LL/SC with cmpxchg is not correct, since it can
suffer from the ABA problem. Portable parallel code, however,
is written assuming only cmpxchg--and not LL/SC--is available.
This means that in practice emulating LL/SC with cmpxchg is
a viable alternative.
The appended emulates LL/SC pairs in ARM with cmpxchg helpers.
This works in both user and system mode. In usermode, it avoids
pausing all other CPUs to perform the LL/SC pair. The subsequent
performance and scalability improvement is significant, as the
plots below show. They plot the throughput of atomic_add-bench
compiled for ARM and executed on a 64-core x86 machine.
Hi-res plots: http://imgur.com/a/aNQpB
atomic_add-bench: 1000000 ops/thread, [0,1] range
9 ++---------+----------+----------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
8 +Emaster +-H--+ ++
| | |
7 ++E ++
| | |
6 ++++ ++
| | |
5 ++ | ++
4 ++ | ++
| | |
3 ++ | ++
| | |
2 ++ | ++
|H++E+--- +++ ---+E+------+E+------+E|
1 +++ +E+-----+E+------+E+------+E+------+E+-- +++ +++ ++
++H+ + +++ + +++ ++++ + + + |
0 ++--H----H-+-----H----+----------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,2] range
16 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
14 ++master +-H--+ ++
| | |
12 ++| ++
| E |
10 ++| ++
| | |
8 ++++ ++
|E+| |
| | |
6 ++ | ++
| | |
4 ++ | ++
| +E+--- +++ +++ +++ ---+E+------+E|
2 +H+ +E+------E-------+E+-----+E+------+E+------+E+-- +++
+ | + +++ + ++++ + + + |
0 ++H-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,128] range
70 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + ++++ + |
60 ++master +-H--+ ----E------+E+-------++
| -+E+--- +++ +++ +E|
| +++ ---- +++ ++|
50 ++ +++ ---+E+- ++
| -E--- |
40 ++ ---+++ ++
| +++--- |
| -+E+ |
30 ++ +++---- ++
| +E+ |
20 ++ +++-- ++
| +E+ |
|+E+ |
10 +E+ ++
+ + + + + + + |
0 +HH-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,1024] range
120 ++---------+---------+----------+---------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
| master +-H--+ ++|
100 ++ ----E+
| +++ ---+E+--- ++|
| --E--- +++ |
80 ++ ---- +++ ++
| ---+E+- |
60 ++ -+E+-- ++
| +++ ---- +++ |
| -+E+- |
40 ++ +++---- ++
| +++ ---+E+ |
| -+E+--- |
20 ++ +E+ ++
|+E+++ |
+E+ + + + + + + |
0 +HH-H---H--+-----H---+----------+---------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
[rth: Enforce alignment for ldrexd.]
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Message-Id: <1467054136-10430-23-git-send-email-cota@braap.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2016-06-28 03:02:08 +08:00
|
|
|
TCGv_i64 o64 = tcg_temp_new_i64();
|
|
|
|
TCGv_i64 n64 = tcg_temp_new_i64();
|
2014-01-05 06:15:47 +08:00
|
|
|
|
target-arm: emulate LL/SC using cmpxchg helpers
Emulating LL/SC with cmpxchg is not correct, since it can
suffer from the ABA problem. Portable parallel code, however,
is written assuming only cmpxchg--and not LL/SC--is available.
This means that in practice emulating LL/SC with cmpxchg is
a viable alternative.
The appended emulates LL/SC pairs in ARM with cmpxchg helpers.
This works in both user and system mode. In usermode, it avoids
pausing all other CPUs to perform the LL/SC pair. The subsequent
performance and scalability improvement is significant, as the
plots below show. They plot the throughput of atomic_add-bench
compiled for ARM and executed on a 64-core x86 machine.
Hi-res plots: http://imgur.com/a/aNQpB
atomic_add-bench: 1000000 ops/thread, [0,1] range
9 ++---------+----------+----------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
8 +Emaster +-H--+ ++
| | |
7 ++E ++
| | |
6 ++++ ++
| | |
5 ++ | ++
4 ++ | ++
| | |
3 ++ | ++
| | |
2 ++ | ++
|H++E+--- +++ ---+E+------+E+------+E|
1 +++ +E+-----+E+------+E+------+E+------+E+-- +++ +++ ++
++H+ + +++ + +++ ++++ + + + |
0 ++--H----H-+-----H----+----------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,2] range
16 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
14 ++master +-H--+ ++
| | |
12 ++| ++
| E |
10 ++| ++
| | |
8 ++++ ++
|E+| |
| | |
6 ++ | ++
| | |
4 ++ | ++
| +E+--- +++ +++ +++ ---+E+------+E|
2 +H+ +E+------E-------+E+-----+E+------+E+------+E+-- +++
+ | + +++ + ++++ + + + |
0 ++H-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,128] range
70 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + ++++ + |
60 ++master +-H--+ ----E------+E+-------++
| -+E+--- +++ +++ +E|
| +++ ---- +++ ++|
50 ++ +++ ---+E+- ++
| -E--- |
40 ++ ---+++ ++
| +++--- |
| -+E+ |
30 ++ +++---- ++
| +E+ |
20 ++ +++-- ++
| +E+ |
|+E+ |
10 +E+ ++
+ + + + + + + |
0 +HH-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,1024] range
120 ++---------+---------+----------+---------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
| master +-H--+ ++|
100 ++ ----E+
| +++ ---+E+--- ++|
| --E--- +++ |
80 ++ ---- +++ ++
| ---+E+- |
60 ++ -+E+-- ++
| +++ ---- +++ |
| -+E+- |
40 ++ +++---- ++
| +++ ---+E+ |
| -+E+--- |
20 ++ +E+ ++
|+E+++ |
+E+ + + + + + + |
0 +HH-H---H--+-----H---+----------+---------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
[rth: Enforce alignment for ldrexd.]
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Message-Id: <1467054136-10430-23-git-send-email-cota@braap.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2016-06-28 03:02:08 +08:00
|
|
|
t2 = load_reg(s, rt2);
|
2017-11-07 21:03:51 +08:00
|
|
|
/* For AArch32, architecturally the 32-bit word at the lowest
|
|
|
|
* address is always Rt and the one at addr+4 is Rt2, even if
|
|
|
|
* the CPU is big-endian. Since we're going to treat this as a
|
|
|
|
* single 64-bit BE store, we need to put the two halves in the
|
|
|
|
* opposite order for BE to LE, so that they end up in the right
|
|
|
|
* places.
|
|
|
|
* We don't want gen_aa32_frob64() because that does the wrong
|
|
|
|
* thing for BE32 usermode.
|
|
|
|
*/
|
|
|
|
if (s->be_data == MO_BE) {
|
|
|
|
tcg_gen_concat_i32_i64(n64, t2, t1);
|
|
|
|
} else {
|
|
|
|
tcg_gen_concat_i32_i64(n64, t1, t2);
|
|
|
|
}
|
target-arm: emulate LL/SC using cmpxchg helpers
Emulating LL/SC with cmpxchg is not correct, since it can
suffer from the ABA problem. Portable parallel code, however,
is written assuming only cmpxchg--and not LL/SC--is available.
This means that in practice emulating LL/SC with cmpxchg is
a viable alternative.
The appended emulates LL/SC pairs in ARM with cmpxchg helpers.
This works in both user and system mode. In usermode, it avoids
pausing all other CPUs to perform the LL/SC pair. The subsequent
performance and scalability improvement is significant, as the
plots below show. They plot the throughput of atomic_add-bench
compiled for ARM and executed on a 64-core x86 machine.
Hi-res plots: http://imgur.com/a/aNQpB
atomic_add-bench: 1000000 ops/thread, [0,1] range
9 ++---------+----------+----------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
8 +Emaster +-H--+ ++
| | |
7 ++E ++
| | |
6 ++++ ++
| | |
5 ++ | ++
4 ++ | ++
| | |
3 ++ | ++
| | |
2 ++ | ++
|H++E+--- +++ ---+E+------+E+------+E|
1 +++ +E+-----+E+------+E+------+E+------+E+-- +++ +++ ++
++H+ + +++ + +++ ++++ + + + |
0 ++--H----H-+-----H----+----------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,2] range
16 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
14 ++master +-H--+ ++
| | |
12 ++| ++
| E |
10 ++| ++
| | |
8 ++++ ++
|E+| |
| | |
6 ++ | ++
| | |
4 ++ | ++
| +E+--- +++ +++ +++ ---+E+------+E|
2 +H+ +E+------E-------+E+-----+E+------+E+------+E+-- +++
+ | + +++ + ++++ + + + |
0 ++H-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,128] range
70 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + ++++ + |
60 ++master +-H--+ ----E------+E+-------++
| -+E+--- +++ +++ +E|
| +++ ---- +++ ++|
50 ++ +++ ---+E+- ++
| -E--- |
40 ++ ---+++ ++
| +++--- |
| -+E+ |
30 ++ +++---- ++
| +E+ |
20 ++ +++-- ++
| +E+ |
|+E+ |
10 +E+ ++
+ + + + + + + |
0 +HH-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,1024] range
120 ++---------+---------+----------+---------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
| master +-H--+ ++|
100 ++ ----E+
| +++ ---+E+--- ++|
| --E--- +++ |
80 ++ ---- +++ ++
| ---+E+- |
60 ++ -+E+-- ++
| +++ ---- +++ |
| -+E+- |
40 ++ +++---- ++
| +++ ---+E+ |
| -+E+--- |
20 ++ +E+ ++
|+E+++ |
+E+ + + + + + + |
0 +HH-H---H--+-----H---+----------+---------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
[rth: Enforce alignment for ldrexd.]
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Message-Id: <1467054136-10430-23-git-send-email-cota@braap.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2016-06-28 03:02:08 +08:00
|
|
|
tcg_temp_free_i32(t2);
|
2014-01-05 06:15:47 +08:00
|
|
|
|
target-arm: emulate LL/SC using cmpxchg helpers
Emulating LL/SC with cmpxchg is not correct, since it can
suffer from the ABA problem. Portable parallel code, however,
is written assuming only cmpxchg--and not LL/SC--is available.
This means that in practice emulating LL/SC with cmpxchg is
a viable alternative.
The appended emulates LL/SC pairs in ARM with cmpxchg helpers.
This works in both user and system mode. In usermode, it avoids
pausing all other CPUs to perform the LL/SC pair. The subsequent
performance and scalability improvement is significant, as the
plots below show. They plot the throughput of atomic_add-bench
compiled for ARM and executed on a 64-core x86 machine.
Hi-res plots: http://imgur.com/a/aNQpB
atomic_add-bench: 1000000 ops/thread, [0,1] range
9 ++---------+----------+----------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
8 +Emaster +-H--+ ++
| | |
7 ++E ++
| | |
6 ++++ ++
| | |
5 ++ | ++
4 ++ | ++
| | |
3 ++ | ++
| | |
2 ++ | ++
|H++E+--- +++ ---+E+------+E+------+E|
1 +++ +E+-----+E+------+E+------+E+------+E+-- +++ +++ ++
++H+ + +++ + +++ ++++ + + + |
0 ++--H----H-+-----H----+----------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,2] range
16 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
14 ++master +-H--+ ++
| | |
12 ++| ++
| E |
10 ++| ++
| | |
8 ++++ ++
|E+| |
| | |
6 ++ | ++
| | |
4 ++ | ++
| +E+--- +++ +++ +++ ---+E+------+E|
2 +H+ +E+------E-------+E+-----+E+------+E+------+E+-- +++
+ | + +++ + ++++ + + + |
0 ++H-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,128] range
70 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + ++++ + |
60 ++master +-H--+ ----E------+E+-------++
| -+E+--- +++ +++ +E|
| +++ ---- +++ ++|
50 ++ +++ ---+E+- ++
| -E--- |
40 ++ ---+++ ++
| +++--- |
| -+E+ |
30 ++ +++---- ++
| +E+ |
20 ++ +++-- ++
| +E+ |
|+E+ |
10 +E+ ++
+ + + + + + + |
0 +HH-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,1024] range
120 ++---------+---------+----------+---------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
| master +-H--+ ++|
100 ++ ----E+
| +++ ---+E+--- ++|
| --E--- +++ |
80 ++ ---- +++ ++
| ---+E+- |
60 ++ -+E+-- ++
| +++ ---- +++ |
| -+E+- |
40 ++ +++---- ++
| +++ ---+E+ |
| -+E+--- |
20 ++ +E+ ++
|+E+++ |
+E+ + + + + + + |
0 +HH-H---H--+-----H---+----------+---------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
[rth: Enforce alignment for ldrexd.]
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Message-Id: <1467054136-10430-23-git-send-email-cota@braap.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2016-06-28 03:02:08 +08:00
|
|
|
tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
|
|
|
|
get_mem_index(s), opc);
|
|
|
|
tcg_temp_free_i64(n64);
|
|
|
|
|
|
|
|
tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
|
|
|
|
tcg_gen_extrl_i64_i32(t0, o64);
|
|
|
|
|
|
|
|
tcg_temp_free_i64(o64);
|
|
|
|
} else {
|
|
|
|
t2 = tcg_temp_new_i32();
|
|
|
|
tcg_gen_extrl_i64_i32(t2, cpu_exclusive_val);
|
|
|
|
tcg_gen_atomic_cmpxchg_i32(t0, taddr, t2, t1, get_mem_index(s), opc);
|
|
|
|
tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t2);
|
|
|
|
tcg_temp_free_i32(t2);
|
2009-11-23 05:35:13 +08:00
|
|
|
}
|
target-arm: emulate LL/SC using cmpxchg helpers
Emulating LL/SC with cmpxchg is not correct, since it can
suffer from the ABA problem. Portable parallel code, however,
is written assuming only cmpxchg--and not LL/SC--is available.
This means that in practice emulating LL/SC with cmpxchg is
a viable alternative.
The appended emulates LL/SC pairs in ARM with cmpxchg helpers.
This works in both user and system mode. In usermode, it avoids
pausing all other CPUs to perform the LL/SC pair. The subsequent
performance and scalability improvement is significant, as the
plots below show. They plot the throughput of atomic_add-bench
compiled for ARM and executed on a 64-core x86 machine.
Hi-res plots: http://imgur.com/a/aNQpB
atomic_add-bench: 1000000 ops/thread, [0,1] range
9 ++---------+----------+----------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
8 +Emaster +-H--+ ++
| | |
7 ++E ++
| | |
6 ++++ ++
| | |
5 ++ | ++
4 ++ | ++
| | |
3 ++ | ++
| | |
2 ++ | ++
|H++E+--- +++ ---+E+------+E+------+E|
1 +++ +E+-----+E+------+E+------+E+------+E+-- +++ +++ ++
++H+ + +++ + +++ ++++ + + + |
0 ++--H----H-+-----H----+----------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,2] range
16 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
14 ++master +-H--+ ++
| | |
12 ++| ++
| E |
10 ++| ++
| | |
8 ++++ ++
|E+| |
| | |
6 ++ | ++
| | |
4 ++ | ++
| +E+--- +++ +++ +++ ---+E+------+E|
2 +H+ +E+------E-------+E+-----+E+------+E+------+E+-- +++
+ | + +++ + ++++ + + + |
0 ++H-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,128] range
70 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + ++++ + |
60 ++master +-H--+ ----E------+E+-------++
| -+E+--- +++ +++ +E|
| +++ ---- +++ ++|
50 ++ +++ ---+E+- ++
| -E--- |
40 ++ ---+++ ++
| +++--- |
| -+E+ |
30 ++ +++---- ++
| +E+ |
20 ++ +++-- ++
| +E+ |
|+E+ |
10 +E+ ++
+ + + + + + + |
0 +HH-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,1024] range
120 ++---------+---------+----------+---------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
| master +-H--+ ++|
100 ++ ----E+
| +++ ---+E+--- ++|
| --E--- +++ |
80 ++ ---- +++ ++
| ---+E+- |
60 ++ -+E+-- ++
| +++ ---- +++ |
| -+E+- |
40 ++ +++---- ++
| +++ ---+E+ |
| -+E+--- |
20 ++ +E+ ++
|+E+++ |
+E+ + + + + + + |
0 +HH-H---H--+-----H---+----------+---------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
[rth: Enforce alignment for ldrexd.]
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Message-Id: <1467054136-10430-23-git-send-email-cota@braap.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2016-06-28 03:02:08 +08:00
|
|
|
tcg_temp_free_i32(t1);
|
|
|
|
tcg_temp_free(taddr);
|
|
|
|
tcg_gen_mov_i32(cpu_R[rd], t0);
|
|
|
|
tcg_temp_free_i32(t0);
|
2009-11-23 05:35:13 +08:00
|
|
|
tcg_gen_br(done_label);
|
target-arm: emulate LL/SC using cmpxchg helpers
Emulating LL/SC with cmpxchg is not correct, since it can
suffer from the ABA problem. Portable parallel code, however,
is written assuming only cmpxchg--and not LL/SC--is available.
This means that in practice emulating LL/SC with cmpxchg is
a viable alternative.
The appended emulates LL/SC pairs in ARM with cmpxchg helpers.
This works in both user and system mode. In usermode, it avoids
pausing all other CPUs to perform the LL/SC pair. The subsequent
performance and scalability improvement is significant, as the
plots below show. They plot the throughput of atomic_add-bench
compiled for ARM and executed on a 64-core x86 machine.
Hi-res plots: http://imgur.com/a/aNQpB
atomic_add-bench: 1000000 ops/thread, [0,1] range
9 ++---------+----------+----------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
8 +Emaster +-H--+ ++
| | |
7 ++E ++
| | |
6 ++++ ++
| | |
5 ++ | ++
4 ++ | ++
| | |
3 ++ | ++
| | |
2 ++ | ++
|H++E+--- +++ ---+E+------+E+------+E|
1 +++ +E+-----+E+------+E+------+E+------+E+-- +++ +++ ++
++H+ + +++ + +++ ++++ + + + |
0 ++--H----H-+-----H----+----------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,2] range
16 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
14 ++master +-H--+ ++
| | |
12 ++| ++
| E |
10 ++| ++
| | |
8 ++++ ++
|E+| |
| | |
6 ++ | ++
| | |
4 ++ | ++
| +E+--- +++ +++ +++ ---+E+------+E|
2 +H+ +E+------E-------+E+-----+E+------+E+------+E+-- +++
+ | + +++ + ++++ + + + |
0 ++H-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,128] range
70 ++---------+----------+---------+----------+----------+----------+---++
+cmpxchg +-E--+ + + + ++++ + |
60 ++master +-H--+ ----E------+E+-------++
| -+E+--- +++ +++ +E|
| +++ ---- +++ ++|
50 ++ +++ ---+E+- ++
| -E--- |
40 ++ ---+++ ++
| +++--- |
| -+E+ |
30 ++ +++---- ++
| +E+ |
20 ++ +++-- ++
| +E+ |
|+E+ |
10 +E+ ++
+ + + + + + + |
0 +HH-H----H-+-----H----+---------+----------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
atomic_add-bench: 1000000 ops/thread, [0,1024] range
120 ++---------+---------+----------+---------+----------+----------+---++
+cmpxchg +-E--+ + + + + + |
| master +-H--+ ++|
100 ++ ----E+
| +++ ---+E+--- ++|
| --E--- +++ |
80 ++ ---- +++ ++
| ---+E+- |
60 ++ -+E+-- ++
| +++ ---- +++ |
| -+E+- |
40 ++ +++---- ++
| +++ ---+E+ |
| -+E+--- |
20 ++ +E+ ++
|+E+++ |
+E+ + + + + + + |
0 +HH-H---H--+-----H---+----------+---------+----------+----------+---++
0 10 20 30 40 50 60
Number of threads
[rth: Enforce alignment for ldrexd.]
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
Message-Id: <1467054136-10430-23-git-send-email-cota@braap.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
2016-06-28 03:02:08 +08:00
|
|
|
|
2009-11-23 05:35:13 +08:00
|
|
|
gen_set_label(fail_label);
|
|
|
|
tcg_gen_movi_i32(cpu_R[rd], 1);
|
|
|
|
gen_set_label(done_label);
|
2014-01-05 06:15:47 +08:00
|
|
|
tcg_gen_movi_i64(cpu_exclusive_addr, -1);
|
2009-11-23 05:35:13 +08:00
|
|
|
}
|
|
|
|
|
2013-03-05 08:31:17 +08:00
|
|
|
/* gen_srs:
|
|
|
|
* @env: CPUARMState
|
|
|
|
* @s: DisasContext
|
|
|
|
* @mode: mode field from insn (which stack to store to)
|
|
|
|
* @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
|
|
|
|
* @writeback: true if writeback bit set
|
|
|
|
*
|
|
|
|
* Generate code for the SRS (Store Return State) insn.
|
|
|
|
*/
|
|
|
|
static void gen_srs(DisasContext *s,
|
|
|
|
uint32_t mode, uint32_t amode, bool writeback)
|
|
|
|
{
|
|
|
|
int32_t offset;
|
2016-02-18 22:16:16 +08:00
|
|
|
TCGv_i32 addr, tmp;
|
|
|
|
bool undef = false;
|
|
|
|
|
|
|
|
/* SRS is:
|
|
|
|
* - trapped to EL3 if EL3 is AArch64 and we are at Secure EL1
|
2016-03-04 19:30:22 +08:00
|
|
|
* and specified mode is monitor mode
|
2016-02-18 22:16:16 +08:00
|
|
|
* - UNDEFINED in Hyp mode
|
|
|
|
* - UNPREDICTABLE in User or System mode
|
|
|
|
* - UNPREDICTABLE if the specified mode is:
|
|
|
|
* -- not implemented
|
|
|
|
* -- not a valid mode number
|
|
|
|
* -- a mode that's at a higher exception level
|
|
|
|
* -- Monitor, if we are Non-secure
|
2016-02-18 22:16:17 +08:00
|
|
|
* For the UNPREDICTABLE cases we choose to UNDEF.
|
2016-02-18 22:16:16 +08:00
|
|
|
*/
|
2016-03-04 19:30:22 +08:00
|
|
|
if (s->current_el == 1 && !s->ns && mode == ARM_CPU_MODE_MON) {
|
2019-08-15 16:46:44 +08:00
|
|
|
gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syn_uncategorized(), 3);
|
2016-02-18 22:16:16 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->current_el == 0 || s->current_el == 2) {
|
|
|
|
undef = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (mode) {
|
|
|
|
case ARM_CPU_MODE_USR:
|
|
|
|
case ARM_CPU_MODE_FIQ:
|
|
|
|
case ARM_CPU_MODE_IRQ:
|
|
|
|
case ARM_CPU_MODE_SVC:
|
|
|
|
case ARM_CPU_MODE_ABT:
|
|
|
|
case ARM_CPU_MODE_UND:
|
|
|
|
case ARM_CPU_MODE_SYS:
|
|
|
|
break;
|
|
|
|
case ARM_CPU_MODE_HYP:
|
|
|
|
if (s->current_el == 1 || !arm_dc_feature(s, ARM_FEATURE_EL2)) {
|
|
|
|
undef = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
case ARM_CPU_MODE_MON:
|
|
|
|
/* No need to check specifically for "are we non-secure" because
|
|
|
|
* we've already made EL0 UNDEF and handled the trap for S-EL1;
|
|
|
|
* so if this isn't EL3 then we must be non-secure.
|
|
|
|
*/
|
|
|
|
if (s->current_el != 3) {
|
|
|
|
undef = true;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
undef = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (undef) {
|
2019-08-26 23:15:36 +08:00
|
|
|
unallocated_encoding(s);
|
2016-02-18 22:16:16 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = tcg_temp_new_i32();
|
|
|
|
tmp = tcg_const_i32(mode);
|
2016-02-18 22:16:17 +08:00
|
|
|
/* get_r13_banked() will raise an exception if called from System mode */
|
|
|
|
gen_set_condexec(s);
|
2019-08-15 16:46:43 +08:00
|
|
|
gen_set_pc_im(s, s->pc_curr);
|
2013-03-05 08:31:17 +08:00
|
|
|
gen_helper_get_r13_banked(addr, cpu_env, tmp);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
switch (amode) {
|
|
|
|
case 0: /* DA */
|
|
|
|
offset = -4;
|
|
|
|
break;
|
|
|
|
case 1: /* IA */
|
|
|
|
offset = 0;
|
|
|
|
break;
|
|
|
|
case 2: /* DB */
|
|
|
|
offset = -8;
|
|
|
|
break;
|
|
|
|
case 3: /* IB */
|
|
|
|
offset = 4;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
tcg_gen_addi_i32(addr, addr, offset);
|
|
|
|
tmp = load_reg(s, 14);
|
2016-03-04 19:30:20 +08:00
|
|
|
gen_aa32_st32(s, tmp, addr, get_mem_index(s));
|
2013-05-23 20:00:00 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2013-03-05 08:31:17 +08:00
|
|
|
tmp = load_cpu_field(spsr);
|
|
|
|
tcg_gen_addi_i32(addr, addr, 4);
|
2016-03-04 19:30:20 +08:00
|
|
|
gen_aa32_st32(s, tmp, addr, get_mem_index(s));
|
2013-05-23 20:00:00 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
2013-03-05 08:31:17 +08:00
|
|
|
if (writeback) {
|
|
|
|
switch (amode) {
|
|
|
|
case 0:
|
|
|
|
offset = -8;
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
offset = 4;
|
|
|
|
break;
|
|
|
|
case 2:
|
|
|
|
offset = -4;
|
|
|
|
break;
|
|
|
|
case 3:
|
|
|
|
offset = 0;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
abort();
|
|
|
|
}
|
|
|
|
tcg_gen_addi_i32(addr, addr, offset);
|
|
|
|
tmp = tcg_const_i32(mode);
|
|
|
|
gen_helper_set_r13_banked(cpu_env, tmp, addr);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
}
|
|
|
|
tcg_temp_free_i32(addr);
|
2020-06-26 11:31:03 +08:00
|
|
|
s->base.is_jmp = DISAS_UPDATE_EXIT;
|
2013-03-05 08:31:17 +08:00
|
|
|
}
|
|
|
|
|
2018-08-20 18:24:31 +08:00
|
|
|
/* Skip this instruction if the ARM condition is false */
|
|
|
|
static void arm_skip_unless(DisasContext *s, uint32_t cond)
|
|
|
|
{
|
|
|
|
arm_gen_condlabel(s);
|
|
|
|
arm_gen_test_cc(cond ^ 1, s->condlabel);
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:29:55 +08:00
|
|
|
|
|
|
|
/*
|
2020-11-20 05:55:57 +08:00
|
|
|
* Constant expanders used by T16/T32 decode
|
2019-09-05 03:29:55 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
/* Return only the rotation part of T32ExpandImm. */
|
|
|
|
static int t32_expandimm_rot(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return x & 0xc00 ? extract32(x, 7, 5) : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return the unrotated immediate from T32ExpandImm. */
|
|
|
|
static int t32_expandimm_imm(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
int imm = extract32(x, 0, 8);
|
|
|
|
|
|
|
|
switch (extract32(x, 8, 4)) {
|
|
|
|
case 0: /* XY */
|
|
|
|
/* Nothing to do. */
|
|
|
|
break;
|
|
|
|
case 1: /* 00XY00XY */
|
|
|
|
imm *= 0x00010001;
|
|
|
|
break;
|
|
|
|
case 2: /* XY00XY00 */
|
|
|
|
imm *= 0x01000100;
|
|
|
|
break;
|
|
|
|
case 3: /* XYXYXYXY */
|
|
|
|
imm *= 0x01010101;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
/* Rotated constant. */
|
|
|
|
imm |= 0x80;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return imm;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:22 +08:00
|
|
|
static int t32_branch24(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
/* Convert J1:J2 at x[22:21] to I2:I1, which involves I=J^~S. */
|
|
|
|
x ^= !(x < 0) * (3 << 21);
|
|
|
|
/* Append the final zero. */
|
|
|
|
return x << 1;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:36 +08:00
|
|
|
static int t16_setflags(DisasContext *s)
|
|
|
|
{
|
|
|
|
return s->condexec_mask == 0;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:51 +08:00
|
|
|
static int t16_push_list(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return (x & 0xff) | (x & 0x100) << (14 - 8);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int t16_pop_list(DisasContext *s, int x)
|
|
|
|
{
|
|
|
|
return (x & 0xff) | (x & 0x100) << (15 - 8);
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:29:52 +08:00
|
|
|
/*
|
|
|
|
* Include the generated decoders.
|
|
|
|
*/
|
|
|
|
|
2020-02-04 19:41:01 +08:00
|
|
|
#include "decode-a32.c.inc"
|
|
|
|
#include "decode-a32-uncond.c.inc"
|
|
|
|
#include "decode-t32.c.inc"
|
|
|
|
#include "decode-t16.c.inc"
|
2019-09-05 03:29:52 +08:00
|
|
|
|
2020-08-03 19:18:45 +08:00
|
|
|
static bool valid_cp(DisasContext *s, int cp)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Return true if this coprocessor field indicates something
|
|
|
|
* that's really a possible coprocessor.
|
|
|
|
* For v7 and earlier, coprocessors 8..15 were reserved for Arm use,
|
|
|
|
* and of those only cp14 and cp15 were used for registers.
|
|
|
|
* cp10 and cp11 were used for VFP and Neon, whose decode is
|
|
|
|
* dealt with elsewhere. With the advent of fp16, cp9 is also
|
|
|
|
* now part of VFP.
|
|
|
|
* For v8A and later, the encoding has been tightened so that
|
|
|
|
* only cp14 and cp15 are valid, and other values aren't considered
|
|
|
|
* to be in the coprocessor-instruction space at all. v8M still
|
|
|
|
* permits coprocessors 0..7.
|
2021-01-09 03:51:57 +08:00
|
|
|
* For XScale, we must not decode the XScale cp0, cp1 space as
|
|
|
|
* a standard coprocessor insn, because we want to fall through to
|
|
|
|
* the legacy disas_xscale_insn() decoder after decodetree is done.
|
2020-08-03 19:18:45 +08:00
|
|
|
*/
|
2021-01-09 03:51:57 +08:00
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cp == 0 || cp == 1)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-08-03 19:18:45 +08:00
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_V8) &&
|
|
|
|
!arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
return cp >= 14;
|
|
|
|
}
|
|
|
|
return cp < 8 || cp >= 14;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_MCR(DisasContext *s, arg_MCR *a)
|
|
|
|
{
|
|
|
|
if (!valid_cp(s, a->cp)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
do_coproc_insn(s, a->cp, false, a->opc1, a->crn, a->crm, a->opc2,
|
|
|
|
false, a->rt, 0);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_MRC(DisasContext *s, arg_MRC *a)
|
|
|
|
{
|
|
|
|
if (!valid_cp(s, a->cp)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
do_coproc_insn(s, a->cp, false, a->opc1, a->crn, a->crm, a->opc2,
|
|
|
|
true, a->rt, 0);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_MCRR(DisasContext *s, arg_MCRR *a)
|
|
|
|
{
|
|
|
|
if (!valid_cp(s, a->cp)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
do_coproc_insn(s, a->cp, true, a->opc1, 0, a->crm, 0,
|
|
|
|
false, a->rt, a->rt2);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_MRRC(DisasContext *s, arg_MRRC *a)
|
|
|
|
{
|
|
|
|
if (!valid_cp(s, a->cp)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
do_coproc_insn(s, a->cp, true, a->opc1, 0, a->crm, 0,
|
|
|
|
true, a->rt, a->rt2);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:29:53 +08:00
|
|
|
/* Helpers to swap operands for reverse-subtract. */
|
|
|
|
static void gen_rsb(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
{
|
|
|
|
tcg_gen_sub_i32(dst, b, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_rsb_CC(TCGv_i32 dst, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
{
|
|
|
|
gen_sub_CC(dst, b, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_rsc(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
{
|
|
|
|
gen_sub_carry(dest, b, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_rsc_CC(TCGv_i32 dest, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
{
|
|
|
|
gen_sbc_CC(dest, b, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Helpers for the data processing routines.
|
|
|
|
*
|
|
|
|
* After the computation store the results back.
|
|
|
|
* This may be suppressed altogether (STREG_NONE), require a runtime
|
|
|
|
* check against the stack limits (STREG_SP_CHECK), or generate an
|
|
|
|
* exception return. Oh, or store into a register.
|
|
|
|
*
|
|
|
|
* Always return true, indicating success for a trans_* function.
|
|
|
|
*/
|
|
|
|
typedef enum {
|
|
|
|
STREG_NONE,
|
|
|
|
STREG_NORMAL,
|
|
|
|
STREG_SP_CHECK,
|
|
|
|
STREG_EXC_RET,
|
|
|
|
} StoreRegKind;
|
|
|
|
|
|
|
|
static bool store_reg_kind(DisasContext *s, int rd,
|
|
|
|
TCGv_i32 val, StoreRegKind kind)
|
|
|
|
{
|
|
|
|
switch (kind) {
|
|
|
|
case STREG_NONE:
|
|
|
|
tcg_temp_free_i32(val);
|
|
|
|
return true;
|
|
|
|
case STREG_NORMAL:
|
|
|
|
/* See ALUWritePC: Interworking only from a32 mode. */
|
|
|
|
if (s->thumb) {
|
|
|
|
store_reg(s, rd, val);
|
|
|
|
} else {
|
|
|
|
store_reg_bx(s, rd, val);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
case STREG_SP_CHECK:
|
|
|
|
store_sp_checked(s, val);
|
|
|
|
return true;
|
|
|
|
case STREG_EXC_RET:
|
|
|
|
gen_exception_return(s, val);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Data Processing (register)
|
|
|
|
*
|
|
|
|
* Operate, with set flags, one register source,
|
|
|
|
* one immediate shifted register source, and a destination.
|
|
|
|
*/
|
|
|
|
static bool op_s_rrr_shi(DisasContext *s, arg_s_rrr_shi *a,
|
|
|
|
void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
|
|
|
|
int logic_cc, StoreRegKind kind)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp1, tmp2;
|
|
|
|
|
|
|
|
tmp2 = load_reg(s, a->rm);
|
|
|
|
gen_arm_shift_im(tmp2, a->shty, a->shim, logic_cc);
|
|
|
|
tmp1 = load_reg(s, a->rn);
|
|
|
|
|
|
|
|
gen(tmp1, tmp1, tmp2);
|
|
|
|
tcg_temp_free_i32(tmp2);
|
|
|
|
|
|
|
|
if (logic_cc) {
|
|
|
|
gen_logic_CC(tmp1);
|
|
|
|
}
|
|
|
|
return store_reg_kind(s, a->rd, tmp1, kind);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_s_rxr_shi(DisasContext *s, arg_s_rrr_shi *a,
|
|
|
|
void (*gen)(TCGv_i32, TCGv_i32),
|
|
|
|
int logic_cc, StoreRegKind kind)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
tmp = load_reg(s, a->rm);
|
|
|
|
gen_arm_shift_im(tmp, a->shty, a->shim, logic_cc);
|
|
|
|
|
|
|
|
gen(tmp, tmp);
|
|
|
|
if (logic_cc) {
|
|
|
|
gen_logic_CC(tmp);
|
|
|
|
}
|
|
|
|
return store_reg_kind(s, a->rd, tmp, kind);
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:29:54 +08:00
|
|
|
/*
|
|
|
|
* Data-processing (register-shifted register)
|
|
|
|
*
|
|
|
|
* Operate, with set flags, one register source,
|
|
|
|
* one register shifted register source, and a destination.
|
|
|
|
*/
|
|
|
|
static bool op_s_rrr_shr(DisasContext *s, arg_s_rrr_shr *a,
|
|
|
|
void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
|
|
|
|
int logic_cc, StoreRegKind kind)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp1, tmp2;
|
|
|
|
|
|
|
|
tmp1 = load_reg(s, a->rs);
|
|
|
|
tmp2 = load_reg(s, a->rm);
|
|
|
|
gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
|
|
|
|
tmp1 = load_reg(s, a->rn);
|
|
|
|
|
|
|
|
gen(tmp1, tmp1, tmp2);
|
|
|
|
tcg_temp_free_i32(tmp2);
|
|
|
|
|
|
|
|
if (logic_cc) {
|
|
|
|
gen_logic_CC(tmp1);
|
|
|
|
}
|
|
|
|
return store_reg_kind(s, a->rd, tmp1, kind);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_s_rxr_shr(DisasContext *s, arg_s_rrr_shr *a,
|
|
|
|
void (*gen)(TCGv_i32, TCGv_i32),
|
|
|
|
int logic_cc, StoreRegKind kind)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp1, tmp2;
|
|
|
|
|
|
|
|
tmp1 = load_reg(s, a->rs);
|
|
|
|
tmp2 = load_reg(s, a->rm);
|
|
|
|
gen_arm_shift_reg(tmp2, a->shty, tmp1, logic_cc);
|
|
|
|
|
|
|
|
gen(tmp2, tmp2);
|
|
|
|
if (logic_cc) {
|
|
|
|
gen_logic_CC(tmp2);
|
|
|
|
}
|
|
|
|
return store_reg_kind(s, a->rd, tmp2, kind);
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:29:55 +08:00
|
|
|
/*
|
|
|
|
* Data-processing (immediate)
|
|
|
|
*
|
|
|
|
* Operate, with set flags, one register source,
|
|
|
|
* one rotated immediate, and a destination.
|
|
|
|
*
|
|
|
|
* Note that logic_cc && a->rot setting CF based on the msb of the
|
|
|
|
* immediate is the reason why we must pass in the unrotated form
|
|
|
|
* of the immediate.
|
|
|
|
*/
|
|
|
|
static bool op_s_rri_rot(DisasContext *s, arg_s_rri_rot *a,
|
|
|
|
void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32),
|
|
|
|
int logic_cc, StoreRegKind kind)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp1, tmp2;
|
|
|
|
uint32_t imm;
|
|
|
|
|
|
|
|
imm = ror32(a->imm, a->rot);
|
|
|
|
if (logic_cc && a->rot) {
|
|
|
|
tcg_gen_movi_i32(cpu_CF, imm >> 31);
|
|
|
|
}
|
|
|
|
tmp2 = tcg_const_i32(imm);
|
|
|
|
tmp1 = load_reg(s, a->rn);
|
|
|
|
|
|
|
|
gen(tmp1, tmp1, tmp2);
|
|
|
|
tcg_temp_free_i32(tmp2);
|
|
|
|
|
|
|
|
if (logic_cc) {
|
|
|
|
gen_logic_CC(tmp1);
|
|
|
|
}
|
|
|
|
return store_reg_kind(s, a->rd, tmp1, kind);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_s_rxi_rot(DisasContext *s, arg_s_rri_rot *a,
|
|
|
|
void (*gen)(TCGv_i32, TCGv_i32),
|
|
|
|
int logic_cc, StoreRegKind kind)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
uint32_t imm;
|
|
|
|
|
|
|
|
imm = ror32(a->imm, a->rot);
|
|
|
|
if (logic_cc && a->rot) {
|
|
|
|
tcg_gen_movi_i32(cpu_CF, imm >> 31);
|
|
|
|
}
|
|
|
|
tmp = tcg_const_i32(imm);
|
|
|
|
|
|
|
|
gen(tmp, tmp);
|
|
|
|
if (logic_cc) {
|
|
|
|
gen_logic_CC(tmp);
|
|
|
|
}
|
|
|
|
return store_reg_kind(s, a->rd, tmp, kind);
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:29:53 +08:00
|
|
|
#define DO_ANY3(NAME, OP, L, K) \
|
|
|
|
static bool trans_##NAME##_rrri(DisasContext *s, arg_s_rrr_shi *a) \
|
2019-09-05 03:29:54 +08:00
|
|
|
{ StoreRegKind k = (K); return op_s_rrr_shi(s, a, OP, L, k); } \
|
|
|
|
static bool trans_##NAME##_rrrr(DisasContext *s, arg_s_rrr_shr *a) \
|
2019-09-05 03:29:55 +08:00
|
|
|
{ StoreRegKind k = (K); return op_s_rrr_shr(s, a, OP, L, k); } \
|
|
|
|
static bool trans_##NAME##_rri(DisasContext *s, arg_s_rri_rot *a) \
|
|
|
|
{ StoreRegKind k = (K); return op_s_rri_rot(s, a, OP, L, k); }
|
2019-09-05 03:29:53 +08:00
|
|
|
|
|
|
|
#define DO_ANY2(NAME, OP, L, K) \
|
|
|
|
static bool trans_##NAME##_rxri(DisasContext *s, arg_s_rrr_shi *a) \
|
2019-09-05 03:29:54 +08:00
|
|
|
{ StoreRegKind k = (K); return op_s_rxr_shi(s, a, OP, L, k); } \
|
|
|
|
static bool trans_##NAME##_rxrr(DisasContext *s, arg_s_rrr_shr *a) \
|
2019-09-05 03:29:55 +08:00
|
|
|
{ StoreRegKind k = (K); return op_s_rxr_shr(s, a, OP, L, k); } \
|
|
|
|
static bool trans_##NAME##_rxi(DisasContext *s, arg_s_rri_rot *a) \
|
|
|
|
{ StoreRegKind k = (K); return op_s_rxi_rot(s, a, OP, L, k); }
|
2019-09-05 03:29:53 +08:00
|
|
|
|
|
|
|
#define DO_CMP2(NAME, OP, L) \
|
|
|
|
static bool trans_##NAME##_xrri(DisasContext *s, arg_s_rrr_shi *a) \
|
2019-09-05 03:29:54 +08:00
|
|
|
{ return op_s_rrr_shi(s, a, OP, L, STREG_NONE); } \
|
|
|
|
static bool trans_##NAME##_xrrr(DisasContext *s, arg_s_rrr_shr *a) \
|
2019-09-05 03:29:55 +08:00
|
|
|
{ return op_s_rrr_shr(s, a, OP, L, STREG_NONE); } \
|
|
|
|
static bool trans_##NAME##_xri(DisasContext *s, arg_s_rri_rot *a) \
|
|
|
|
{ return op_s_rri_rot(s, a, OP, L, STREG_NONE); }
|
2019-09-05 03:29:53 +08:00
|
|
|
|
|
|
|
DO_ANY3(AND, tcg_gen_and_i32, a->s, STREG_NORMAL)
|
|
|
|
DO_ANY3(EOR, tcg_gen_xor_i32, a->s, STREG_NORMAL)
|
|
|
|
DO_ANY3(ORR, tcg_gen_or_i32, a->s, STREG_NORMAL)
|
|
|
|
DO_ANY3(BIC, tcg_gen_andc_i32, a->s, STREG_NORMAL)
|
|
|
|
|
|
|
|
DO_ANY3(RSB, a->s ? gen_rsb_CC : gen_rsb, false, STREG_NORMAL)
|
|
|
|
DO_ANY3(ADC, a->s ? gen_adc_CC : gen_add_carry, false, STREG_NORMAL)
|
|
|
|
DO_ANY3(SBC, a->s ? gen_sbc_CC : gen_sub_carry, false, STREG_NORMAL)
|
|
|
|
DO_ANY3(RSC, a->s ? gen_rsc_CC : gen_rsc, false, STREG_NORMAL)
|
|
|
|
|
|
|
|
DO_CMP2(TST, tcg_gen_and_i32, true)
|
|
|
|
DO_CMP2(TEQ, tcg_gen_xor_i32, true)
|
|
|
|
DO_CMP2(CMN, gen_add_CC, false)
|
|
|
|
DO_CMP2(CMP, gen_sub_CC, false)
|
|
|
|
|
|
|
|
DO_ANY3(ADD, a->s ? gen_add_CC : tcg_gen_add_i32, false,
|
|
|
|
a->rd == 13 && a->rn == 13 ? STREG_SP_CHECK : STREG_NORMAL)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note for the computation of StoreRegKind we return out of the
|
|
|
|
* middle of the functions that are expanded by DO_ANY3, and that
|
|
|
|
* we modify a->s via that parameter before it is used by OP.
|
|
|
|
*/
|
|
|
|
DO_ANY3(SUB, a->s ? gen_sub_CC : tcg_gen_sub_i32, false,
|
|
|
|
({
|
|
|
|
StoreRegKind ret = STREG_NORMAL;
|
|
|
|
if (a->rd == 15 && a->s) {
|
|
|
|
/*
|
|
|
|
* See ALUExceptionReturn:
|
|
|
|
* In User mode, UNPREDICTABLE; we choose UNDEF.
|
|
|
|
* In Hyp mode, UNDEFINED.
|
|
|
|
*/
|
|
|
|
if (IS_USER(s) || s->current_el == 2) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
/* There is no writeback of nzcv to PSTATE. */
|
|
|
|
a->s = 0;
|
|
|
|
ret = STREG_EXC_RET;
|
|
|
|
} else if (a->rd == 13 && a->rn == 13) {
|
|
|
|
ret = STREG_SP_CHECK;
|
|
|
|
}
|
|
|
|
ret;
|
|
|
|
}))
|
|
|
|
|
|
|
|
DO_ANY2(MOV, tcg_gen_mov_i32, a->s,
|
|
|
|
({
|
|
|
|
StoreRegKind ret = STREG_NORMAL;
|
|
|
|
if (a->rd == 15 && a->s) {
|
|
|
|
/*
|
|
|
|
* See ALUExceptionReturn:
|
|
|
|
* In User mode, UNPREDICTABLE; we choose UNDEF.
|
|
|
|
* In Hyp mode, UNDEFINED.
|
|
|
|
*/
|
|
|
|
if (IS_USER(s) || s->current_el == 2) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
/* There is no writeback of nzcv to PSTATE. */
|
|
|
|
a->s = 0;
|
|
|
|
ret = STREG_EXC_RET;
|
|
|
|
} else if (a->rd == 13) {
|
|
|
|
ret = STREG_SP_CHECK;
|
|
|
|
}
|
|
|
|
ret;
|
|
|
|
}))
|
|
|
|
|
|
|
|
DO_ANY2(MVN, tcg_gen_not_i32, a->s, STREG_NORMAL)
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ORN is only available with T32, so there is no register-shifted-register
|
|
|
|
* form of the insn. Using the DO_ANY3 macro would create an unused function.
|
|
|
|
*/
|
|
|
|
static bool trans_ORN_rrri(DisasContext *s, arg_s_rrr_shi *a)
|
|
|
|
{
|
|
|
|
return op_s_rrr_shi(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:29:55 +08:00
|
|
|
static bool trans_ORN_rri(DisasContext *s, arg_s_rri_rot *a)
|
|
|
|
{
|
|
|
|
return op_s_rri_rot(s, a, tcg_gen_orc_i32, a->s, STREG_NORMAL);
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:29:53 +08:00
|
|
|
#undef DO_ANY3
|
|
|
|
#undef DO_ANY2
|
|
|
|
#undef DO_CMP2
|
|
|
|
|
2019-09-05 03:30:09 +08:00
|
|
|
static bool trans_ADR(DisasContext *s, arg_ri *a)
|
|
|
|
{
|
|
|
|
store_reg_bx(s, a->rd, add_reg_for_lit(s, 15, a->imm));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:17 +08:00
|
|
|
static bool trans_MOVW(DisasContext *s, arg_MOVW *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
if (!ENABLE_ARCH_6T2) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = tcg_const_i32(a->imm);
|
|
|
|
store_reg(s, a->rd, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_MOVT(DisasContext *s, arg_MOVW *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
if (!ENABLE_ARCH_6T2) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = load_reg(s, a->rd);
|
|
|
|
tcg_gen_ext16u_i32(tmp, tmp);
|
|
|
|
tcg_gen_ori_i32(tmp, tmp, a->imm << 16);
|
|
|
|
store_reg(s, a->rd, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:29:56 +08:00
|
|
|
/*
|
|
|
|
* Multiply and multiply accumulate
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool op_mla(DisasContext *s, arg_s_rrrr *a, bool add)
|
|
|
|
{
|
|
|
|
TCGv_i32 t1, t2;
|
|
|
|
|
|
|
|
t1 = load_reg(s, a->rn);
|
|
|
|
t2 = load_reg(s, a->rm);
|
|
|
|
tcg_gen_mul_i32(t1, t1, t2);
|
|
|
|
tcg_temp_free_i32(t2);
|
|
|
|
if (add) {
|
|
|
|
t2 = load_reg(s, a->ra);
|
|
|
|
tcg_gen_add_i32(t1, t1, t2);
|
|
|
|
tcg_temp_free_i32(t2);
|
|
|
|
}
|
|
|
|
if (a->s) {
|
|
|
|
gen_logic_CC(t1);
|
|
|
|
}
|
|
|
|
store_reg(s, a->rd, t1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_MUL(DisasContext *s, arg_MUL *a)
|
|
|
|
{
|
|
|
|
return op_mla(s, a, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_MLA(DisasContext *s, arg_MLA *a)
|
|
|
|
{
|
|
|
|
return op_mla(s, a, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_MLS(DisasContext *s, arg_MLS *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 t1, t2;
|
|
|
|
|
|
|
|
if (!ENABLE_ARCH_6T2) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
t1 = load_reg(s, a->rn);
|
|
|
|
t2 = load_reg(s, a->rm);
|
|
|
|
tcg_gen_mul_i32(t1, t1, t2);
|
|
|
|
tcg_temp_free_i32(t2);
|
|
|
|
t2 = load_reg(s, a->ra);
|
|
|
|
tcg_gen_sub_i32(t1, t2, t1);
|
|
|
|
tcg_temp_free_i32(t2);
|
|
|
|
store_reg(s, a->rd, t1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_mlal(DisasContext *s, arg_s_rrrr *a, bool uns, bool add)
|
|
|
|
{
|
|
|
|
TCGv_i32 t0, t1, t2, t3;
|
|
|
|
|
|
|
|
t0 = load_reg(s, a->rm);
|
|
|
|
t1 = load_reg(s, a->rn);
|
|
|
|
if (uns) {
|
|
|
|
tcg_gen_mulu2_i32(t0, t1, t0, t1);
|
|
|
|
} else {
|
|
|
|
tcg_gen_muls2_i32(t0, t1, t0, t1);
|
|
|
|
}
|
|
|
|
if (add) {
|
|
|
|
t2 = load_reg(s, a->ra);
|
|
|
|
t3 = load_reg(s, a->rd);
|
|
|
|
tcg_gen_add2_i32(t0, t1, t0, t1, t2, t3);
|
|
|
|
tcg_temp_free_i32(t2);
|
|
|
|
tcg_temp_free_i32(t3);
|
|
|
|
}
|
|
|
|
if (a->s) {
|
|
|
|
gen_logicq_cc(t0, t1);
|
|
|
|
}
|
|
|
|
store_reg(s, a->ra, t0);
|
|
|
|
store_reg(s, a->rd, t1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_UMULL(DisasContext *s, arg_UMULL *a)
|
|
|
|
{
|
|
|
|
return op_mlal(s, a, true, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SMULL(DisasContext *s, arg_SMULL *a)
|
|
|
|
{
|
|
|
|
return op_mlal(s, a, false, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_UMLAL(DisasContext *s, arg_UMLAL *a)
|
|
|
|
{
|
|
|
|
return op_mlal(s, a, true, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SMLAL(DisasContext *s, arg_SMLAL *a)
|
|
|
|
{
|
|
|
|
return op_mlal(s, a, false, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_UMAAL(DisasContext *s, arg_UMAAL *a)
|
|
|
|
{
|
2019-09-05 03:29:57 +08:00
|
|
|
TCGv_i32 t0, t1, t2, zero;
|
2019-09-05 03:29:56 +08:00
|
|
|
|
|
|
|
if (s->thumb
|
|
|
|
? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
|
|
|
|
: !ENABLE_ARCH_6) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
t0 = load_reg(s, a->rm);
|
|
|
|
t1 = load_reg(s, a->rn);
|
2019-09-05 03:29:57 +08:00
|
|
|
tcg_gen_mulu2_i32(t0, t1, t0, t1);
|
|
|
|
zero = tcg_const_i32(0);
|
|
|
|
t2 = load_reg(s, a->ra);
|
|
|
|
tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
|
|
|
|
tcg_temp_free_i32(t2);
|
|
|
|
t2 = load_reg(s, a->rd);
|
|
|
|
tcg_gen_add2_i32(t0, t1, t0, t1, t2, zero);
|
|
|
|
tcg_temp_free_i32(t2);
|
|
|
|
tcg_temp_free_i32(zero);
|
|
|
|
store_reg(s, a->ra, t0);
|
|
|
|
store_reg(s, a->rd, t1);
|
2019-09-05 03:29:56 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:29:58 +08:00
|
|
|
/*
|
|
|
|
* Saturating addition and subtraction
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool op_qaddsub(DisasContext *s, arg_rrr *a, bool add, bool doub)
|
|
|
|
{
|
|
|
|
TCGv_i32 t0, t1;
|
|
|
|
|
|
|
|
if (s->thumb
|
|
|
|
? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
|
|
|
|
: !ENABLE_ARCH_5TE) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
t0 = load_reg(s, a->rm);
|
|
|
|
t1 = load_reg(s, a->rn);
|
|
|
|
if (doub) {
|
|
|
|
gen_helper_add_saturate(t1, cpu_env, t1, t1);
|
|
|
|
}
|
|
|
|
if (add) {
|
|
|
|
gen_helper_add_saturate(t0, cpu_env, t0, t1);
|
|
|
|
} else {
|
|
|
|
gen_helper_sub_saturate(t0, cpu_env, t0, t1);
|
|
|
|
}
|
|
|
|
tcg_temp_free_i32(t1);
|
|
|
|
store_reg(s, a->rd, t0);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_QADDSUB(NAME, ADD, DOUB) \
|
|
|
|
static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
|
|
|
|
{ \
|
|
|
|
return op_qaddsub(s, a, ADD, DOUB); \
|
|
|
|
}
|
|
|
|
|
|
|
|
DO_QADDSUB(QADD, true, false)
|
|
|
|
DO_QADDSUB(QSUB, false, false)
|
|
|
|
DO_QADDSUB(QDADD, true, true)
|
|
|
|
DO_QADDSUB(QDSUB, false, true)
|
|
|
|
|
|
|
|
#undef DO_QADDSUB
|
|
|
|
|
2019-09-05 03:29:59 +08:00
|
|
|
/*
|
|
|
|
* Halfword multiply and multiply accumulate
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool op_smlaxxx(DisasContext *s, arg_rrrr *a,
|
|
|
|
int add_long, bool nt, bool mt)
|
|
|
|
{
|
2019-09-05 03:30:00 +08:00
|
|
|
TCGv_i32 t0, t1, tl, th;
|
2019-09-05 03:29:59 +08:00
|
|
|
|
|
|
|
if (s->thumb
|
|
|
|
? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
|
|
|
|
: !ENABLE_ARCH_5TE) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
t0 = load_reg(s, a->rn);
|
|
|
|
t1 = load_reg(s, a->rm);
|
|
|
|
gen_mulxy(t0, t1, nt, mt);
|
|
|
|
tcg_temp_free_i32(t1);
|
|
|
|
|
|
|
|
switch (add_long) {
|
|
|
|
case 0:
|
|
|
|
store_reg(s, a->rd, t0);
|
|
|
|
break;
|
|
|
|
case 1:
|
|
|
|
t1 = load_reg(s, a->ra);
|
|
|
|
gen_helper_add_setq(t0, cpu_env, t0, t1);
|
|
|
|
tcg_temp_free_i32(t1);
|
|
|
|
store_reg(s, a->rd, t0);
|
|
|
|
break;
|
|
|
|
case 2:
|
2019-09-05 03:30:00 +08:00
|
|
|
tl = load_reg(s, a->ra);
|
|
|
|
th = load_reg(s, a->rd);
|
2019-10-22 23:50:35 +08:00
|
|
|
/* Sign-extend the 32-bit product to 64 bits. */
|
|
|
|
t1 = tcg_temp_new_i32();
|
|
|
|
tcg_gen_sari_i32(t1, t0, 31);
|
2019-09-05 03:30:00 +08:00
|
|
|
tcg_gen_add2_i32(tl, th, tl, th, t0, t1);
|
2019-09-05 03:29:59 +08:00
|
|
|
tcg_temp_free_i32(t0);
|
2019-09-05 03:30:00 +08:00
|
|
|
tcg_temp_free_i32(t1);
|
|
|
|
store_reg(s, a->ra, tl);
|
|
|
|
store_reg(s, a->rd, th);
|
2019-09-05 03:29:59 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_SMLAX(NAME, add, nt, mt) \
|
|
|
|
static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
|
|
|
|
{ \
|
|
|
|
return op_smlaxxx(s, a, add, nt, mt); \
|
|
|
|
}
|
|
|
|
|
|
|
|
DO_SMLAX(SMULBB, 0, 0, 0)
|
|
|
|
DO_SMLAX(SMULBT, 0, 0, 1)
|
|
|
|
DO_SMLAX(SMULTB, 0, 1, 0)
|
|
|
|
DO_SMLAX(SMULTT, 0, 1, 1)
|
|
|
|
|
|
|
|
DO_SMLAX(SMLABB, 1, 0, 0)
|
|
|
|
DO_SMLAX(SMLABT, 1, 0, 1)
|
|
|
|
DO_SMLAX(SMLATB, 1, 1, 0)
|
|
|
|
DO_SMLAX(SMLATT, 1, 1, 1)
|
|
|
|
|
|
|
|
DO_SMLAX(SMLALBB, 2, 0, 0)
|
|
|
|
DO_SMLAX(SMLALBT, 2, 0, 1)
|
|
|
|
DO_SMLAX(SMLALTB, 2, 1, 0)
|
|
|
|
DO_SMLAX(SMLALTT, 2, 1, 1)
|
|
|
|
|
|
|
|
#undef DO_SMLAX
|
|
|
|
|
|
|
|
static bool op_smlawx(DisasContext *s, arg_rrrr *a, bool add, bool mt)
|
|
|
|
{
|
|
|
|
TCGv_i32 t0, t1;
|
|
|
|
|
|
|
|
if (!ENABLE_ARCH_5TE) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
t0 = load_reg(s, a->rn);
|
|
|
|
t1 = load_reg(s, a->rm);
|
2019-09-05 03:30:01 +08:00
|
|
|
/*
|
|
|
|
* Since the nominal result is product<47:16>, shift the 16-bit
|
|
|
|
* input up by 16 bits, so that the result is at product<63:32>.
|
|
|
|
*/
|
2019-09-05 03:29:59 +08:00
|
|
|
if (mt) {
|
2019-09-05 03:30:01 +08:00
|
|
|
tcg_gen_andi_i32(t1, t1, 0xffff0000);
|
2019-09-05 03:29:59 +08:00
|
|
|
} else {
|
2019-09-05 03:30:01 +08:00
|
|
|
tcg_gen_shli_i32(t1, t1, 16);
|
2019-09-05 03:29:59 +08:00
|
|
|
}
|
2019-09-05 03:30:01 +08:00
|
|
|
tcg_gen_muls2_i32(t0, t1, t0, t1);
|
|
|
|
tcg_temp_free_i32(t0);
|
2019-09-05 03:29:59 +08:00
|
|
|
if (add) {
|
|
|
|
t0 = load_reg(s, a->ra);
|
|
|
|
gen_helper_add_setq(t1, cpu_env, t1, t0);
|
|
|
|
tcg_temp_free_i32(t0);
|
|
|
|
}
|
|
|
|
store_reg(s, a->rd, t1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_SMLAWX(NAME, add, mt) \
|
|
|
|
static bool trans_##NAME(DisasContext *s, arg_rrrr *a) \
|
|
|
|
{ \
|
|
|
|
return op_smlawx(s, a, add, mt); \
|
|
|
|
}
|
|
|
|
|
|
|
|
DO_SMLAWX(SMULWB, 0, 0)
|
|
|
|
DO_SMLAWX(SMULWT, 0, 1)
|
|
|
|
DO_SMLAWX(SMLAWB, 1, 0)
|
|
|
|
DO_SMLAWX(SMLAWT, 1, 1)
|
|
|
|
|
|
|
|
#undef DO_SMLAWX
|
|
|
|
|
2019-09-05 03:30:02 +08:00
|
|
|
/*
|
|
|
|
* MSR (immediate) and hints
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool trans_YIELD(DisasContext *s, arg_YIELD *a)
|
|
|
|
{
|
2019-09-05 03:30:50 +08:00
|
|
|
/*
|
|
|
|
* When running single-threaded TCG code, use the helper to ensure that
|
|
|
|
* the next round-robin scheduled vCPU gets a crack. When running in
|
|
|
|
* MTTCG we don't generate jumps to the helper as it won't affect the
|
|
|
|
* scheduling of other vCPUs.
|
|
|
|
*/
|
|
|
|
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
|
|
|
|
gen_set_pc_im(s, s->base.pc_next);
|
|
|
|
s->base.is_jmp = DISAS_YIELD;
|
|
|
|
}
|
2019-09-05 03:30:02 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_WFE(DisasContext *s, arg_WFE *a)
|
|
|
|
{
|
2019-09-05 03:30:50 +08:00
|
|
|
/*
|
|
|
|
* When running single-threaded TCG code, use the helper to ensure that
|
|
|
|
* the next round-robin scheduled vCPU gets a crack. In MTTCG mode we
|
|
|
|
* just skip this instruction. Currently the SEV/SEVL instructions,
|
|
|
|
* which are *one* of many ways to wake the CPU from WFE, are not
|
|
|
|
* implemented so we can't sleep like WFI does.
|
|
|
|
*/
|
|
|
|
if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
|
|
|
|
gen_set_pc_im(s, s->base.pc_next);
|
|
|
|
s->base.is_jmp = DISAS_WFE;
|
|
|
|
}
|
2019-09-05 03:30:02 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_WFI(DisasContext *s, arg_WFI *a)
|
|
|
|
{
|
2019-09-05 03:30:50 +08:00
|
|
|
/* For WFI, halt the vCPU until an IRQ. */
|
|
|
|
gen_set_pc_im(s, s->base.pc_next);
|
|
|
|
s->base.is_jmp = DISAS_WFI;
|
2019-09-05 03:30:02 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_NOP(DisasContext *s, arg_NOP *a)
|
|
|
|
{
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_MSR_imm(DisasContext *s, arg_MSR_imm *a)
|
|
|
|
{
|
|
|
|
uint32_t val = ror32(a->imm, a->rot * 2);
|
|
|
|
uint32_t mask = msr_mask(s, a->mask, a->r);
|
|
|
|
|
|
|
|
if (gen_set_psr_im(s, mask, a->r, val)) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:04 +08:00
|
|
|
/*
|
|
|
|
* Cyclic Redundancy Check
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool op_crc32(DisasContext *s, arg_rrr *a, bool c, MemOp sz)
|
|
|
|
{
|
|
|
|
TCGv_i32 t1, t2, t3;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_crc32, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
t1 = load_reg(s, a->rn);
|
|
|
|
t2 = load_reg(s, a->rm);
|
|
|
|
switch (sz) {
|
|
|
|
case MO_8:
|
|
|
|
gen_uxtb(t2);
|
|
|
|
break;
|
|
|
|
case MO_16:
|
|
|
|
gen_uxth(t2);
|
|
|
|
break;
|
|
|
|
case MO_32:
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
t3 = tcg_const_i32(1 << sz);
|
|
|
|
if (c) {
|
|
|
|
gen_helper_crc32c(t1, t1, t2, t3);
|
|
|
|
} else {
|
|
|
|
gen_helper_crc32(t1, t1, t2, t3);
|
|
|
|
}
|
|
|
|
tcg_temp_free_i32(t2);
|
|
|
|
tcg_temp_free_i32(t3);
|
|
|
|
store_reg(s, a->rd, t1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_CRC32(NAME, c, sz) \
|
|
|
|
static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
|
|
|
|
{ return op_crc32(s, a, c, sz); }
|
|
|
|
|
|
|
|
DO_CRC32(CRC32B, false, MO_8)
|
|
|
|
DO_CRC32(CRC32H, false, MO_16)
|
|
|
|
DO_CRC32(CRC32W, false, MO_32)
|
|
|
|
DO_CRC32(CRC32CB, true, MO_8)
|
|
|
|
DO_CRC32(CRC32CH, true, MO_16)
|
|
|
|
DO_CRC32(CRC32CW, true, MO_32)
|
|
|
|
|
|
|
|
#undef DO_CRC32
|
|
|
|
|
2019-09-05 03:30:03 +08:00
|
|
|
/*
|
|
|
|
* Miscellaneous instructions
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool trans_MRS_bank(DisasContext *s, arg_MRS_bank *a)
|
|
|
|
{
|
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
gen_mrs_banked(s, a->r, a->sysm, a->rd);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_MSR_bank(DisasContext *s, arg_MSR_bank *a)
|
|
|
|
{
|
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
gen_msr_banked(s, a->r, a->sysm, a->rn);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_MRS_reg(DisasContext *s, arg_MRS_reg *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (a->r) {
|
|
|
|
if (IS_USER(s)) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
tmp = load_cpu_field(spsr);
|
|
|
|
} else {
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
gen_helper_cpsr_read(tmp, cpu_env);
|
|
|
|
}
|
|
|
|
store_reg(s, a->rd, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_MSR_reg(DisasContext *s, arg_MSR_reg *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
uint32_t mask = msr_mask(s, a->mask, a->r);
|
|
|
|
|
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
tmp = load_reg(s, a->rn);
|
|
|
|
if (gen_set_psr(s, mask, a->r, tmp)) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_MRS_v7m(DisasContext *s, arg_MRS_v7m *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
if (!arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
tmp = tcg_const_i32(a->sysm);
|
|
|
|
gen_helper_v7m_mrs(tmp, cpu_env, tmp);
|
|
|
|
store_reg(s, a->rd, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_MSR_v7m(DisasContext *s, arg_MSR_v7m *a)
|
|
|
|
{
|
2020-03-04 01:49:49 +08:00
|
|
|
TCGv_i32 addr, reg;
|
2019-09-05 03:30:03 +08:00
|
|
|
|
|
|
|
if (!arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
addr = tcg_const_i32((a->mask << 10) | a->sysm);
|
|
|
|
reg = load_reg(s, a->rn);
|
|
|
|
gen_helper_v7m_msr(cpu_env, addr, reg);
|
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
tcg_temp_free_i32(reg);
|
2020-03-04 01:49:49 +08:00
|
|
|
/* If we wrote to CONTROL, the EL might have changed */
|
|
|
|
gen_helper_rebuild_hflags_m32_newel(cpu_env);
|
2019-09-05 03:30:03 +08:00
|
|
|
gen_lookup_tb(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:05 +08:00
|
|
|
static bool trans_BX(DisasContext *s, arg_BX *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_4T) {
|
|
|
|
return false;
|
|
|
|
}
|
2019-09-05 03:30:43 +08:00
|
|
|
gen_bx_excret(s, load_reg(s, a->rm));
|
2019-09-05 03:30:05 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_BXJ(DisasContext *s, arg_BXJ *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_5J || arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
/* Trivial implementation equivalent to bx. */
|
|
|
|
gen_bx(s, load_reg(s, a->rm));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_BLX_r(DisasContext *s, arg_BLX_r *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
if (!ENABLE_ARCH_5) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
tmp = load_reg(s, a->rm);
|
|
|
|
tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
|
|
|
|
gen_bx(s, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:43 +08:00
|
|
|
/*
|
|
|
|
* BXNS/BLXNS: only exist for v8M with the security extensions,
|
|
|
|
* and always UNDEF if NonSecure. We don't implement these in
|
|
|
|
* the user-only mode either (in theory you can use them from
|
|
|
|
* Secure User mode but they are too tied in to system emulation).
|
|
|
|
*/
|
|
|
|
static bool trans_BXNS(DisasContext *s, arg_BXNS *a)
|
|
|
|
{
|
|
|
|
if (!s->v8m_secure || IS_USER_ONLY) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
} else {
|
|
|
|
gen_bxns(s, a->rm);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_BLXNS(DisasContext *s, arg_BLXNS *a)
|
|
|
|
{
|
|
|
|
if (!s->v8m_secure || IS_USER_ONLY) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
} else {
|
|
|
|
gen_blxns(s, a->rm);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:06 +08:00
|
|
|
static bool trans_CLZ(DisasContext *s, arg_CLZ *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
if (!ENABLE_ARCH_5) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
tmp = load_reg(s, a->rm);
|
|
|
|
tcg_gen_clzi_i32(tmp, tmp, 32);
|
|
|
|
store_reg(s, a->rd, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:07 +08:00
|
|
|
static bool trans_ERET(DisasContext *s, arg_ERET *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
if (!arm_dc_feature(s, ARM_FEATURE_V7VE)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (IS_USER(s)) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (s->current_el == 2) {
|
|
|
|
/* ERET from Hyp uses ELR_Hyp, not LR */
|
|
|
|
tmp = load_cpu_field(elr_el[2]);
|
|
|
|
} else {
|
|
|
|
tmp = load_reg(s, 14);
|
|
|
|
}
|
|
|
|
gen_exception_return(s, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:08 +08:00
|
|
|
static bool trans_HLT(DisasContext *s, arg_HLT *a)
|
|
|
|
{
|
|
|
|
gen_hlt(s, a->imm);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_BKPT(DisasContext *s, arg_BKPT *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_5) {
|
|
|
|
return false;
|
|
|
|
}
|
2019-09-19 21:18:40 +08:00
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_M) &&
|
|
|
|
semihosting_enabled() &&
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
!IS_USER(s) &&
|
|
|
|
#endif
|
|
|
|
(a->imm == 0xab)) {
|
2019-12-17 23:08:57 +08:00
|
|
|
gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
|
2019-09-19 21:18:40 +08:00
|
|
|
} else {
|
|
|
|
gen_exception_bkpt_insn(s, syn_aa32_bkpt(a->imm, false));
|
|
|
|
}
|
2019-09-05 03:30:08 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_HVC(DisasContext *s, arg_HVC *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_7 || arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (IS_USER(s)) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
} else {
|
|
|
|
gen_hvc(s, a->imm);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SMC(DisasContext *s, arg_SMC *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_6K || arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (IS_USER(s)) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
} else {
|
|
|
|
gen_smc(s);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:31 +08:00
|
|
|
static bool trans_SG(DisasContext *s, arg_SG *a)
|
|
|
|
{
|
|
|
|
if (!arm_dc_feature(s, ARM_FEATURE_M) ||
|
|
|
|
!arm_dc_feature(s, ARM_FEATURE_V8)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* SG (v8M only)
|
|
|
|
* The bulk of the behaviour for this instruction is implemented
|
|
|
|
* in v7m_handle_execute_nsc(), which deals with the insn when
|
|
|
|
* it is executed by a CPU in non-secure state from memory
|
|
|
|
* which is Secure & NonSecure-Callable.
|
|
|
|
* Here we only need to handle the remaining cases:
|
|
|
|
* * in NS memory (including the "security extension not
|
|
|
|
* implemented" case) : NOP
|
|
|
|
* * in S memory but CPU already secure (clear IT bits)
|
|
|
|
* We know that the attribute for the memory this insn is
|
|
|
|
* in must match the current CPU state, because otherwise
|
|
|
|
* get_phys_addr_pmsav8 would have generated an exception.
|
|
|
|
*/
|
|
|
|
if (s->v8m_secure) {
|
|
|
|
/* Like the IT insn, we don't need to generate any code */
|
|
|
|
s->condexec_cond = 0;
|
|
|
|
s->condexec_mask = 0;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:32 +08:00
|
|
|
static bool trans_TT(DisasContext *s, arg_TT *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 addr, tmp;
|
|
|
|
|
|
|
|
if (!arm_dc_feature(s, ARM_FEATURE_M) ||
|
|
|
|
!arm_dc_feature(s, ARM_FEATURE_V8)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (a->rd == 13 || a->rd == 15 || a->rn == 15) {
|
|
|
|
/* We UNDEF for these UNPREDICTABLE cases */
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (a->A && !s->v8m_secure) {
|
|
|
|
/* This case is UNDEFINED. */
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = load_reg(s, a->rn);
|
|
|
|
tmp = tcg_const_i32((a->A << 1) | a->T);
|
|
|
|
gen_helper_v7m_tt(tmp, cpu_env, addr, tmp);
|
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
store_reg(s, a->rd, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:10 +08:00
|
|
|
/*
|
|
|
|
* Load/store register index
|
|
|
|
*/
|
|
|
|
|
|
|
|
static ISSInfo make_issinfo(DisasContext *s, int rd, bool p, bool w)
|
|
|
|
{
|
|
|
|
ISSInfo ret;
|
|
|
|
|
|
|
|
/* ISS not valid if writeback */
|
|
|
|
if (p && !w) {
|
|
|
|
ret = rd;
|
2020-01-17 22:09:31 +08:00
|
|
|
if (s->base.pc_next - s->pc_curr == 2) {
|
|
|
|
ret |= ISSIs16Bit;
|
|
|
|
}
|
2019-09-05 03:30:10 +08:00
|
|
|
} else {
|
|
|
|
ret = ISSInvalid;
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static TCGv_i32 op_addr_rr_pre(DisasContext *s, arg_ldst_rr *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 addr = load_reg(s, a->rn);
|
|
|
|
|
|
|
|
if (s->v8m_stackcheck && a->rn == 13 && a->w) {
|
|
|
|
gen_helper_v8m_stackcheck(cpu_env, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (a->p) {
|
|
|
|
TCGv_i32 ofs = load_reg(s, a->rm);
|
|
|
|
gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
|
|
|
|
if (a->u) {
|
|
|
|
tcg_gen_add_i32(addr, addr, ofs);
|
|
|
|
} else {
|
|
|
|
tcg_gen_sub_i32(addr, addr, ofs);
|
|
|
|
}
|
|
|
|
tcg_temp_free_i32(ofs);
|
|
|
|
}
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void op_addr_rr_post(DisasContext *s, arg_ldst_rr *a,
|
|
|
|
TCGv_i32 addr, int address_offset)
|
|
|
|
{
|
|
|
|
if (!a->p) {
|
|
|
|
TCGv_i32 ofs = load_reg(s, a->rm);
|
|
|
|
gen_arm_shift_im(ofs, a->shtype, a->shimm, 0);
|
|
|
|
if (a->u) {
|
|
|
|
tcg_gen_add_i32(addr, addr, ofs);
|
|
|
|
} else {
|
|
|
|
tcg_gen_sub_i32(addr, addr, ofs);
|
|
|
|
}
|
|
|
|
tcg_temp_free_i32(ofs);
|
|
|
|
} else if (!a->w) {
|
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
tcg_gen_addi_i32(addr, addr, address_offset);
|
|
|
|
store_reg(s, a->rn, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_load_rr(DisasContext *s, arg_ldst_rr *a,
|
|
|
|
MemOp mop, int mem_idx)
|
|
|
|
{
|
|
|
|
ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
|
|
|
|
TCGv_i32 addr, tmp;
|
|
|
|
|
|
|
|
addr = op_addr_rr_pre(s, a);
|
|
|
|
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
|
|
|
|
disas_set_da_iss(s, mop, issinfo);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform base writeback before the loaded value to
|
|
|
|
* ensure correct behavior with overlapping index registers.
|
|
|
|
*/
|
|
|
|
op_addr_rr_post(s, a, addr, 0);
|
|
|
|
store_reg_from_load(s, a->rt, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_store_rr(DisasContext *s, arg_ldst_rr *a,
|
|
|
|
MemOp mop, int mem_idx)
|
|
|
|
{
|
|
|
|
ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
|
|
|
|
TCGv_i32 addr, tmp;
|
|
|
|
|
|
|
|
addr = op_addr_rr_pre(s, a);
|
|
|
|
|
|
|
|
tmp = load_reg(s, a->rt);
|
|
|
|
gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
|
|
|
|
disas_set_da_iss(s, mop, issinfo);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
|
|
|
|
op_addr_rr_post(s, a, addr, 0);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_LDRD_rr(DisasContext *s, arg_ldst_rr *a)
|
|
|
|
{
|
|
|
|
int mem_idx = get_mem_index(s);
|
|
|
|
TCGv_i32 addr, tmp;
|
|
|
|
|
|
|
|
if (!ENABLE_ARCH_5TE) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (a->rt & 1) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
addr = op_addr_rr_pre(s, a);
|
|
|
|
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
|
|
|
|
store_reg(s, a->rt, tmp);
|
|
|
|
|
|
|
|
tcg_gen_addi_i32(addr, addr, 4);
|
|
|
|
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
|
|
|
|
store_reg(s, a->rt + 1, tmp);
|
|
|
|
|
|
|
|
/* LDRD w/ base writeback is undefined if the registers overlap. */
|
|
|
|
op_addr_rr_post(s, a, addr, -4);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_STRD_rr(DisasContext *s, arg_ldst_rr *a)
|
|
|
|
{
|
|
|
|
int mem_idx = get_mem_index(s);
|
|
|
|
TCGv_i32 addr, tmp;
|
|
|
|
|
|
|
|
if (!ENABLE_ARCH_5TE) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (a->rt & 1) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
addr = op_addr_rr_pre(s, a);
|
|
|
|
|
|
|
|
tmp = load_reg(s, a->rt);
|
|
|
|
gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
|
|
|
|
tcg_gen_addi_i32(addr, addr, 4);
|
|
|
|
|
|
|
|
tmp = load_reg(s, a->rt + 1);
|
|
|
|
gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
|
|
|
|
op_addr_rr_post(s, a, addr, -4);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Load/store immediate index
|
|
|
|
*/
|
|
|
|
|
|
|
|
static TCGv_i32 op_addr_ri_pre(DisasContext *s, arg_ldst_ri *a)
|
|
|
|
{
|
|
|
|
int ofs = a->imm;
|
|
|
|
|
|
|
|
if (!a->u) {
|
|
|
|
ofs = -ofs;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->v8m_stackcheck && a->rn == 13 && a->w) {
|
|
|
|
/*
|
|
|
|
* Stackcheck. Here we know 'addr' is the current SP;
|
|
|
|
* U is set if we're moving SP up, else down. It is
|
|
|
|
* UNKNOWN whether the limit check triggers when SP starts
|
|
|
|
* below the limit and ends up above it; we chose to do so.
|
|
|
|
*/
|
|
|
|
if (!a->u) {
|
|
|
|
TCGv_i32 newsp = tcg_temp_new_i32();
|
|
|
|
tcg_gen_addi_i32(newsp, cpu_R[13], ofs);
|
|
|
|
gen_helper_v8m_stackcheck(cpu_env, newsp);
|
|
|
|
tcg_temp_free_i32(newsp);
|
|
|
|
} else {
|
|
|
|
gen_helper_v8m_stackcheck(cpu_env, cpu_R[13]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return add_reg_for_lit(s, a->rn, a->p ? ofs : 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void op_addr_ri_post(DisasContext *s, arg_ldst_ri *a,
|
|
|
|
TCGv_i32 addr, int address_offset)
|
|
|
|
{
|
|
|
|
if (!a->p) {
|
|
|
|
if (a->u) {
|
|
|
|
address_offset += a->imm;
|
|
|
|
} else {
|
|
|
|
address_offset -= a->imm;
|
|
|
|
}
|
|
|
|
} else if (!a->w) {
|
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
tcg_gen_addi_i32(addr, addr, address_offset);
|
|
|
|
store_reg(s, a->rn, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_load_ri(DisasContext *s, arg_ldst_ri *a,
|
|
|
|
MemOp mop, int mem_idx)
|
|
|
|
{
|
|
|
|
ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w);
|
|
|
|
TCGv_i32 addr, tmp;
|
|
|
|
|
|
|
|
addr = op_addr_ri_pre(s, a);
|
|
|
|
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
gen_aa32_ld_i32(s, tmp, addr, mem_idx, mop | s->be_data);
|
|
|
|
disas_set_da_iss(s, mop, issinfo);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Perform base writeback before the loaded value to
|
|
|
|
* ensure correct behavior with overlapping index registers.
|
|
|
|
*/
|
|
|
|
op_addr_ri_post(s, a, addr, 0);
|
|
|
|
store_reg_from_load(s, a->rt, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_store_ri(DisasContext *s, arg_ldst_ri *a,
|
|
|
|
MemOp mop, int mem_idx)
|
|
|
|
{
|
|
|
|
ISSInfo issinfo = make_issinfo(s, a->rt, a->p, a->w) | ISSIsWrite;
|
|
|
|
TCGv_i32 addr, tmp;
|
|
|
|
|
|
|
|
addr = op_addr_ri_pre(s, a);
|
|
|
|
|
|
|
|
tmp = load_reg(s, a->rt);
|
|
|
|
gen_aa32_st_i32(s, tmp, addr, mem_idx, mop | s->be_data);
|
|
|
|
disas_set_da_iss(s, mop, issinfo);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
|
|
|
|
op_addr_ri_post(s, a, addr, 0);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_ldrd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
|
|
|
|
{
|
|
|
|
int mem_idx = get_mem_index(s);
|
|
|
|
TCGv_i32 addr, tmp;
|
|
|
|
|
|
|
|
addr = op_addr_ri_pre(s, a);
|
|
|
|
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
|
|
|
|
store_reg(s, a->rt, tmp);
|
|
|
|
|
|
|
|
tcg_gen_addi_i32(addr, addr, 4);
|
|
|
|
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
gen_aa32_ld_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
|
|
|
|
store_reg(s, rt2, tmp);
|
|
|
|
|
|
|
|
/* LDRD w/ base writeback is undefined if the registers overlap. */
|
|
|
|
op_addr_ri_post(s, a, addr, -4);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_LDRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_ldrd_ri(s, a, a->rt + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_LDRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
|
|
|
|
{
|
|
|
|
arg_ldst_ri b = {
|
|
|
|
.u = a->u, .w = a->w, .p = a->p,
|
|
|
|
.rn = a->rn, .rt = a->rt, .imm = a->imm
|
|
|
|
};
|
|
|
|
return op_ldrd_ri(s, &b, a->rt2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_strd_ri(DisasContext *s, arg_ldst_ri *a, int rt2)
|
|
|
|
{
|
|
|
|
int mem_idx = get_mem_index(s);
|
|
|
|
TCGv_i32 addr, tmp;
|
|
|
|
|
|
|
|
addr = op_addr_ri_pre(s, a);
|
|
|
|
|
|
|
|
tmp = load_reg(s, a->rt);
|
|
|
|
gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
|
|
|
|
tcg_gen_addi_i32(addr, addr, 4);
|
|
|
|
|
|
|
|
tmp = load_reg(s, rt2);
|
|
|
|
gen_aa32_st_i32(s, tmp, addr, mem_idx, MO_UL | s->be_data);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
|
|
|
|
op_addr_ri_post(s, a, addr, -4);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_STRD_ri_a32(DisasContext *s, arg_ldst_ri *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_5TE || (a->rt & 1)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_strd_ri(s, a, a->rt + 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_STRD_ri_t32(DisasContext *s, arg_ldst_ri2 *a)
|
|
|
|
{
|
|
|
|
arg_ldst_ri b = {
|
|
|
|
.u = a->u, .w = a->w, .p = a->p,
|
|
|
|
.rn = a->rn, .rt = a->rt, .imm = a->imm
|
|
|
|
};
|
|
|
|
return op_strd_ri(s, &b, a->rt2);
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_LDST(NAME, WHICH, MEMOP) \
|
|
|
|
static bool trans_##NAME##_ri(DisasContext *s, arg_ldst_ri *a) \
|
|
|
|
{ \
|
|
|
|
return op_##WHICH##_ri(s, a, MEMOP, get_mem_index(s)); \
|
|
|
|
} \
|
|
|
|
static bool trans_##NAME##T_ri(DisasContext *s, arg_ldst_ri *a) \
|
|
|
|
{ \
|
|
|
|
return op_##WHICH##_ri(s, a, MEMOP, get_a32_user_mem_index(s)); \
|
|
|
|
} \
|
|
|
|
static bool trans_##NAME##_rr(DisasContext *s, arg_ldst_rr *a) \
|
|
|
|
{ \
|
|
|
|
return op_##WHICH##_rr(s, a, MEMOP, get_mem_index(s)); \
|
|
|
|
} \
|
|
|
|
static bool trans_##NAME##T_rr(DisasContext *s, arg_ldst_rr *a) \
|
|
|
|
{ \
|
|
|
|
return op_##WHICH##_rr(s, a, MEMOP, get_a32_user_mem_index(s)); \
|
|
|
|
}
|
|
|
|
|
|
|
|
DO_LDST(LDR, load, MO_UL)
|
|
|
|
DO_LDST(LDRB, load, MO_UB)
|
|
|
|
DO_LDST(LDRH, load, MO_UW)
|
|
|
|
DO_LDST(LDRSB, load, MO_SB)
|
|
|
|
DO_LDST(LDRSH, load, MO_SW)
|
|
|
|
|
|
|
|
DO_LDST(STR, store, MO_UL)
|
|
|
|
DO_LDST(STRB, store, MO_UB)
|
|
|
|
DO_LDST(STRH, store, MO_UW)
|
|
|
|
|
|
|
|
#undef DO_LDST
|
|
|
|
|
2019-09-05 03:30:11 +08:00
|
|
|
/*
|
|
|
|
* Synchronization primitives
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool op_swp(DisasContext *s, arg_SWP *a, MemOp opc)
|
|
|
|
{
|
|
|
|
TCGv_i32 addr, tmp;
|
|
|
|
TCGv taddr;
|
|
|
|
|
|
|
|
opc |= s->be_data;
|
|
|
|
addr = load_reg(s, a->rn);
|
|
|
|
taddr = gen_aa32_addr(s, addr, opc);
|
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
|
|
|
|
tmp = load_reg(s, a->rt2);
|
|
|
|
tcg_gen_atomic_xchg_i32(tmp, taddr, tmp, get_mem_index(s), opc);
|
|
|
|
tcg_temp_free(taddr);
|
|
|
|
|
|
|
|
store_reg(s, a->rt, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SWP(DisasContext *s, arg_SWP *a)
|
|
|
|
{
|
|
|
|
return op_swp(s, a, MO_UL | MO_ALIGN);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SWPB(DisasContext *s, arg_SWP *a)
|
|
|
|
{
|
|
|
|
return op_swp(s, a, MO_UB);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Load/Store Exclusive and Load-Acquire/Store-Release
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool op_strex(DisasContext *s, arg_STREX *a, MemOp mop, bool rel)
|
|
|
|
{
|
|
|
|
TCGv_i32 addr;
|
2019-11-19 21:20:28 +08:00
|
|
|
/* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
|
|
|
|
bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
|
2019-09-05 03:30:11 +08:00
|
|
|
|
2019-09-05 03:30:12 +08:00
|
|
|
/* We UNDEF for these UNPREDICTABLE cases. */
|
|
|
|
if (a->rd == 15 || a->rn == 15 || a->rt == 15
|
|
|
|
|| a->rd == a->rn || a->rd == a->rt
|
2019-11-19 21:20:28 +08:00
|
|
|
|| (!v8a && s->thumb && (a->rd == 13 || a->rt == 13))
|
2019-09-05 03:30:12 +08:00
|
|
|
|| (mop == MO_64
|
|
|
|
&& (a->rt2 == 15
|
2019-11-19 21:20:28 +08:00
|
|
|
|| a->rd == a->rt2
|
2019-11-19 21:20:28 +08:00
|
|
|
|| (!v8a && s->thumb && a->rt2 == 13)))) {
|
2019-09-05 03:30:12 +08:00
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:11 +08:00
|
|
|
if (rel) {
|
|
|
|
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = tcg_temp_local_new_i32();
|
|
|
|
load_reg_var(s, addr, a->rn);
|
|
|
|
tcg_gen_addi_i32(addr, addr, a->imm);
|
|
|
|
|
|
|
|
gen_store_exclusive(s, a->rd, a->rt, a->rt2, addr, mop);
|
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_STREX(DisasContext *s, arg_STREX *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_6) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_strex(s, a, MO_32, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_STREXD_a32(DisasContext *s, arg_STREX *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_6K) {
|
|
|
|
return false;
|
|
|
|
}
|
2019-09-05 03:30:12 +08:00
|
|
|
/* We UNDEF for these UNPREDICTABLE cases. */
|
2019-09-05 03:30:11 +08:00
|
|
|
if (a->rt & 1) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
a->rt2 = a->rt + 1;
|
|
|
|
return op_strex(s, a, MO_64, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_STREXD_t32(DisasContext *s, arg_STREX *a)
|
|
|
|
{
|
|
|
|
return op_strex(s, a, MO_64, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_STREXB(DisasContext *s, arg_STREX *a)
|
|
|
|
{
|
|
|
|
if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_strex(s, a, MO_8, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_STREXH(DisasContext *s, arg_STREX *a)
|
|
|
|
{
|
|
|
|
if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_strex(s, a, MO_16, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_STLEX(DisasContext *s, arg_STREX *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_8) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_strex(s, a, MO_32, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_STLEXD_a32(DisasContext *s, arg_STREX *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_8) {
|
|
|
|
return false;
|
|
|
|
}
|
2019-09-05 03:30:12 +08:00
|
|
|
/* We UNDEF for these UNPREDICTABLE cases. */
|
2019-09-05 03:30:11 +08:00
|
|
|
if (a->rt & 1) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
a->rt2 = a->rt + 1;
|
|
|
|
return op_strex(s, a, MO_64, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_STLEXD_t32(DisasContext *s, arg_STREX *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_8) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_strex(s, a, MO_64, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_STLEXB(DisasContext *s, arg_STREX *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_8) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_strex(s, a, MO_8, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_STLEXH(DisasContext *s, arg_STREX *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_8) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_strex(s, a, MO_16, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_stl(DisasContext *s, arg_STL *a, MemOp mop)
|
|
|
|
{
|
|
|
|
TCGv_i32 addr, tmp;
|
|
|
|
|
|
|
|
if (!ENABLE_ARCH_8) {
|
|
|
|
return false;
|
|
|
|
}
|
2019-09-05 03:30:12 +08:00
|
|
|
/* We UNDEF for these UNPREDICTABLE cases. */
|
|
|
|
if (a->rn == 15 || a->rt == 15) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
2019-09-05 03:30:11 +08:00
|
|
|
|
2019-09-05 03:30:12 +08:00
|
|
|
addr = load_reg(s, a->rn);
|
2019-09-05 03:30:11 +08:00
|
|
|
tmp = load_reg(s, a->rt);
|
|
|
|
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
|
|
|
|
gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
|
|
|
|
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel | ISSIsWrite);
|
|
|
|
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_STL(DisasContext *s, arg_STL *a)
|
|
|
|
{
|
|
|
|
return op_stl(s, a, MO_UL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_STLB(DisasContext *s, arg_STL *a)
|
|
|
|
{
|
|
|
|
return op_stl(s, a, MO_UB);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_STLH(DisasContext *s, arg_STL *a)
|
|
|
|
{
|
|
|
|
return op_stl(s, a, MO_UW);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_ldrex(DisasContext *s, arg_LDREX *a, MemOp mop, bool acq)
|
|
|
|
{
|
|
|
|
TCGv_i32 addr;
|
2019-11-19 21:20:28 +08:00
|
|
|
/* Some cases stopped being UNPREDICTABLE in v8A (but not v8M) */
|
|
|
|
bool v8a = ENABLE_ARCH_8 && !arm_dc_feature(s, ARM_FEATURE_M);
|
2019-09-05 03:30:11 +08:00
|
|
|
|
2019-09-05 03:30:12 +08:00
|
|
|
/* We UNDEF for these UNPREDICTABLE cases. */
|
|
|
|
if (a->rn == 15 || a->rt == 15
|
2019-11-19 21:20:28 +08:00
|
|
|
|| (!v8a && s->thumb && a->rt == 13)
|
2019-09-05 03:30:12 +08:00
|
|
|
|| (mop == MO_64
|
|
|
|
&& (a->rt2 == 15 || a->rt == a->rt2
|
2019-11-19 21:20:28 +08:00
|
|
|
|| (!v8a && s->thumb && a->rt2 == 13)))) {
|
2019-09-05 03:30:12 +08:00
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:11 +08:00
|
|
|
addr = tcg_temp_local_new_i32();
|
|
|
|
load_reg_var(s, addr, a->rn);
|
|
|
|
tcg_gen_addi_i32(addr, addr, a->imm);
|
|
|
|
|
|
|
|
gen_load_exclusive(s, a->rt, a->rt2, addr, mop);
|
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
|
|
|
|
if (acq) {
|
|
|
|
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_LDREX(DisasContext *s, arg_LDREX *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_6) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_ldrex(s, a, MO_32, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_LDREXD_a32(DisasContext *s, arg_LDREX *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_6K) {
|
|
|
|
return false;
|
|
|
|
}
|
2019-09-05 03:30:12 +08:00
|
|
|
/* We UNDEF for these UNPREDICTABLE cases. */
|
2019-09-05 03:30:11 +08:00
|
|
|
if (a->rt & 1) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
a->rt2 = a->rt + 1;
|
|
|
|
return op_ldrex(s, a, MO_64, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_LDREXD_t32(DisasContext *s, arg_LDREX *a)
|
|
|
|
{
|
|
|
|
return op_ldrex(s, a, MO_64, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_LDREXB(DisasContext *s, arg_LDREX *a)
|
|
|
|
{
|
|
|
|
if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_ldrex(s, a, MO_8, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_LDREXH(DisasContext *s, arg_LDREX *a)
|
|
|
|
{
|
|
|
|
if (s->thumb ? !ENABLE_ARCH_7 : !ENABLE_ARCH_6K) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_ldrex(s, a, MO_16, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_LDAEX(DisasContext *s, arg_LDREX *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_8) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_ldrex(s, a, MO_32, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_LDAEXD_a32(DisasContext *s, arg_LDREX *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_8) {
|
|
|
|
return false;
|
|
|
|
}
|
2019-09-05 03:30:12 +08:00
|
|
|
/* We UNDEF for these UNPREDICTABLE cases. */
|
2019-09-05 03:30:11 +08:00
|
|
|
if (a->rt & 1) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
a->rt2 = a->rt + 1;
|
|
|
|
return op_ldrex(s, a, MO_64, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_LDAEXD_t32(DisasContext *s, arg_LDREX *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_8) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_ldrex(s, a, MO_64, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_LDAEXB(DisasContext *s, arg_LDREX *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_8) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_ldrex(s, a, MO_8, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_LDAEXH(DisasContext *s, arg_LDREX *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_8) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_ldrex(s, a, MO_16, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_lda(DisasContext *s, arg_LDA *a, MemOp mop)
|
|
|
|
{
|
|
|
|
TCGv_i32 addr, tmp;
|
|
|
|
|
|
|
|
if (!ENABLE_ARCH_8) {
|
|
|
|
return false;
|
|
|
|
}
|
2019-09-05 03:30:12 +08:00
|
|
|
/* We UNDEF for these UNPREDICTABLE cases. */
|
|
|
|
if (a->rn == 15 || a->rt == 15) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
2019-09-05 03:30:11 +08:00
|
|
|
|
2019-09-05 03:30:12 +08:00
|
|
|
addr = load_reg(s, a->rn);
|
2019-09-05 03:30:11 +08:00
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), mop | s->be_data);
|
|
|
|
disas_set_da_iss(s, mop, a->rt | ISSIsAcqRel);
|
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
|
|
|
|
store_reg(s, a->rt, tmp);
|
|
|
|
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_LDA(DisasContext *s, arg_LDA *a)
|
|
|
|
{
|
|
|
|
return op_lda(s, a, MO_UL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_LDAB(DisasContext *s, arg_LDA *a)
|
|
|
|
{
|
|
|
|
return op_lda(s, a, MO_UB);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_LDAH(DisasContext *s, arg_LDA *a)
|
|
|
|
{
|
|
|
|
return op_lda(s, a, MO_UW);
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:13 +08:00
|
|
|
/*
|
|
|
|
* Media instructions
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool trans_USADA8(DisasContext *s, arg_USADA8 *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 t1, t2;
|
|
|
|
|
|
|
|
if (!ENABLE_ARCH_6) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
t1 = load_reg(s, a->rn);
|
|
|
|
t2 = load_reg(s, a->rm);
|
|
|
|
gen_helper_usad8(t1, t1, t2);
|
|
|
|
tcg_temp_free_i32(t2);
|
|
|
|
if (a->ra != 15) {
|
|
|
|
t2 = load_reg(s, a->ra);
|
|
|
|
tcg_gen_add_i32(t1, t1, t2);
|
|
|
|
tcg_temp_free_i32(t2);
|
|
|
|
}
|
|
|
|
store_reg(s, a->rd, t1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_bfx(DisasContext *s, arg_UBFX *a, bool u)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
int width = a->widthm1 + 1;
|
|
|
|
int shift = a->lsb;
|
|
|
|
|
|
|
|
if (!ENABLE_ARCH_6T2) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (shift + width > 32) {
|
|
|
|
/* UNPREDICTABLE; we choose to UNDEF */
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = load_reg(s, a->rn);
|
|
|
|
if (u) {
|
|
|
|
tcg_gen_extract_i32(tmp, tmp, shift, width);
|
|
|
|
} else {
|
|
|
|
tcg_gen_sextract_i32(tmp, tmp, shift, width);
|
|
|
|
}
|
|
|
|
store_reg(s, a->rd, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SBFX(DisasContext *s, arg_SBFX *a)
|
|
|
|
{
|
|
|
|
return op_bfx(s, a, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_UBFX(DisasContext *s, arg_UBFX *a)
|
|
|
|
{
|
|
|
|
return op_bfx(s, a, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_BFCI(DisasContext *s, arg_BFCI *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
int msb = a->msb, lsb = a->lsb;
|
|
|
|
int width;
|
|
|
|
|
|
|
|
if (!ENABLE_ARCH_6T2) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (msb < lsb) {
|
|
|
|
/* UNPREDICTABLE; we choose to UNDEF */
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
width = msb + 1 - lsb;
|
|
|
|
if (a->rn == 15) {
|
|
|
|
/* BFC */
|
|
|
|
tmp = tcg_const_i32(0);
|
|
|
|
} else {
|
|
|
|
/* BFI */
|
|
|
|
tmp = load_reg(s, a->rn);
|
|
|
|
}
|
|
|
|
if (width != 32) {
|
|
|
|
TCGv_i32 tmp2 = load_reg(s, a->rd);
|
|
|
|
tcg_gen_deposit_i32(tmp, tmp2, tmp, lsb, width);
|
|
|
|
tcg_temp_free_i32(tmp2);
|
|
|
|
}
|
|
|
|
store_reg(s, a->rd, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_UDF(DisasContext *s, arg_UDF *a)
|
|
|
|
{
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:14 +08:00
|
|
|
/*
|
|
|
|
* Parallel addition and subtraction
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool op_par_addsub(DisasContext *s, arg_rrr *a,
|
|
|
|
void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32))
|
|
|
|
{
|
|
|
|
TCGv_i32 t0, t1;
|
|
|
|
|
|
|
|
if (s->thumb
|
|
|
|
? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
|
|
|
|
: !ENABLE_ARCH_6) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
t0 = load_reg(s, a->rn);
|
|
|
|
t1 = load_reg(s, a->rm);
|
|
|
|
|
|
|
|
gen(t0, t0, t1);
|
|
|
|
|
|
|
|
tcg_temp_free_i32(t1);
|
|
|
|
store_reg(s, a->rd, t0);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_par_addsub_ge(DisasContext *s, arg_rrr *a,
|
|
|
|
void (*gen)(TCGv_i32, TCGv_i32,
|
|
|
|
TCGv_i32, TCGv_ptr))
|
|
|
|
{
|
|
|
|
TCGv_i32 t0, t1;
|
|
|
|
TCGv_ptr ge;
|
|
|
|
|
|
|
|
if (s->thumb
|
|
|
|
? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
|
|
|
|
: !ENABLE_ARCH_6) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
t0 = load_reg(s, a->rn);
|
|
|
|
t1 = load_reg(s, a->rm);
|
|
|
|
|
|
|
|
ge = tcg_temp_new_ptr();
|
|
|
|
tcg_gen_addi_ptr(ge, cpu_env, offsetof(CPUARMState, GE));
|
|
|
|
gen(t0, t0, t1, ge);
|
|
|
|
|
|
|
|
tcg_temp_free_ptr(ge);
|
|
|
|
tcg_temp_free_i32(t1);
|
|
|
|
store_reg(s, a->rd, t0);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_PAR_ADDSUB(NAME, helper) \
|
|
|
|
static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
|
|
|
|
{ \
|
|
|
|
return op_par_addsub(s, a, helper); \
|
|
|
|
}
|
|
|
|
|
|
|
|
#define DO_PAR_ADDSUB_GE(NAME, helper) \
|
|
|
|
static bool trans_##NAME(DisasContext *s, arg_rrr *a) \
|
|
|
|
{ \
|
|
|
|
return op_par_addsub_ge(s, a, helper); \
|
|
|
|
}
|
|
|
|
|
|
|
|
DO_PAR_ADDSUB_GE(SADD16, gen_helper_sadd16)
|
|
|
|
DO_PAR_ADDSUB_GE(SASX, gen_helper_saddsubx)
|
|
|
|
DO_PAR_ADDSUB_GE(SSAX, gen_helper_ssubaddx)
|
|
|
|
DO_PAR_ADDSUB_GE(SSUB16, gen_helper_ssub16)
|
|
|
|
DO_PAR_ADDSUB_GE(SADD8, gen_helper_sadd8)
|
|
|
|
DO_PAR_ADDSUB_GE(SSUB8, gen_helper_ssub8)
|
|
|
|
|
|
|
|
DO_PAR_ADDSUB_GE(UADD16, gen_helper_uadd16)
|
|
|
|
DO_PAR_ADDSUB_GE(UASX, gen_helper_uaddsubx)
|
|
|
|
DO_PAR_ADDSUB_GE(USAX, gen_helper_usubaddx)
|
|
|
|
DO_PAR_ADDSUB_GE(USUB16, gen_helper_usub16)
|
|
|
|
DO_PAR_ADDSUB_GE(UADD8, gen_helper_uadd8)
|
|
|
|
DO_PAR_ADDSUB_GE(USUB8, gen_helper_usub8)
|
|
|
|
|
|
|
|
DO_PAR_ADDSUB(QADD16, gen_helper_qadd16)
|
|
|
|
DO_PAR_ADDSUB(QASX, gen_helper_qaddsubx)
|
|
|
|
DO_PAR_ADDSUB(QSAX, gen_helper_qsubaddx)
|
|
|
|
DO_PAR_ADDSUB(QSUB16, gen_helper_qsub16)
|
|
|
|
DO_PAR_ADDSUB(QADD8, gen_helper_qadd8)
|
|
|
|
DO_PAR_ADDSUB(QSUB8, gen_helper_qsub8)
|
|
|
|
|
|
|
|
DO_PAR_ADDSUB(UQADD16, gen_helper_uqadd16)
|
|
|
|
DO_PAR_ADDSUB(UQASX, gen_helper_uqaddsubx)
|
|
|
|
DO_PAR_ADDSUB(UQSAX, gen_helper_uqsubaddx)
|
|
|
|
DO_PAR_ADDSUB(UQSUB16, gen_helper_uqsub16)
|
|
|
|
DO_PAR_ADDSUB(UQADD8, gen_helper_uqadd8)
|
|
|
|
DO_PAR_ADDSUB(UQSUB8, gen_helper_uqsub8)
|
|
|
|
|
|
|
|
DO_PAR_ADDSUB(SHADD16, gen_helper_shadd16)
|
|
|
|
DO_PAR_ADDSUB(SHASX, gen_helper_shaddsubx)
|
|
|
|
DO_PAR_ADDSUB(SHSAX, gen_helper_shsubaddx)
|
|
|
|
DO_PAR_ADDSUB(SHSUB16, gen_helper_shsub16)
|
|
|
|
DO_PAR_ADDSUB(SHADD8, gen_helper_shadd8)
|
|
|
|
DO_PAR_ADDSUB(SHSUB8, gen_helper_shsub8)
|
|
|
|
|
|
|
|
DO_PAR_ADDSUB(UHADD16, gen_helper_uhadd16)
|
|
|
|
DO_PAR_ADDSUB(UHASX, gen_helper_uhaddsubx)
|
|
|
|
DO_PAR_ADDSUB(UHSAX, gen_helper_uhsubaddx)
|
|
|
|
DO_PAR_ADDSUB(UHSUB16, gen_helper_uhsub16)
|
|
|
|
DO_PAR_ADDSUB(UHADD8, gen_helper_uhadd8)
|
|
|
|
DO_PAR_ADDSUB(UHSUB8, gen_helper_uhsub8)
|
|
|
|
|
|
|
|
#undef DO_PAR_ADDSUB
|
|
|
|
#undef DO_PAR_ADDSUB_GE
|
|
|
|
|
2019-09-05 03:30:15 +08:00
|
|
|
/*
|
|
|
|
* Packing, unpacking, saturation, and reversal
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool trans_PKH(DisasContext *s, arg_PKH *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 tn, tm;
|
|
|
|
int shift = a->imm;
|
|
|
|
|
|
|
|
if (s->thumb
|
|
|
|
? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
|
|
|
|
: !ENABLE_ARCH_6) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
tn = load_reg(s, a->rn);
|
|
|
|
tm = load_reg(s, a->rm);
|
|
|
|
if (a->tb) {
|
|
|
|
/* PKHTB */
|
|
|
|
if (shift == 0) {
|
|
|
|
shift = 31;
|
|
|
|
}
|
|
|
|
tcg_gen_sari_i32(tm, tm, shift);
|
|
|
|
tcg_gen_deposit_i32(tn, tn, tm, 0, 16);
|
|
|
|
} else {
|
|
|
|
/* PKHBT */
|
|
|
|
tcg_gen_shli_i32(tm, tm, shift);
|
|
|
|
tcg_gen_deposit_i32(tn, tm, tn, 0, 16);
|
|
|
|
}
|
|
|
|
tcg_temp_free_i32(tm);
|
|
|
|
store_reg(s, a->rd, tn);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_sat(DisasContext *s, arg_sat *a,
|
|
|
|
void (*gen)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp, satimm;
|
|
|
|
int shift = a->imm;
|
|
|
|
|
|
|
|
if (!ENABLE_ARCH_6) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = load_reg(s, a->rn);
|
|
|
|
if (a->sh) {
|
|
|
|
tcg_gen_sari_i32(tmp, tmp, shift ? shift : 31);
|
|
|
|
} else {
|
|
|
|
tcg_gen_shli_i32(tmp, tmp, shift);
|
|
|
|
}
|
|
|
|
|
|
|
|
satimm = tcg_const_i32(a->satimm);
|
|
|
|
gen(tmp, cpu_env, tmp, satimm);
|
|
|
|
tcg_temp_free_i32(satimm);
|
|
|
|
|
|
|
|
store_reg(s, a->rd, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SSAT(DisasContext *s, arg_sat *a)
|
|
|
|
{
|
|
|
|
return op_sat(s, a, gen_helper_ssat);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_USAT(DisasContext *s, arg_sat *a)
|
|
|
|
{
|
|
|
|
return op_sat(s, a, gen_helper_usat);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SSAT16(DisasContext *s, arg_sat *a)
|
|
|
|
{
|
|
|
|
if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_sat(s, a, gen_helper_ssat16);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_USAT16(DisasContext *s, arg_sat *a)
|
|
|
|
{
|
|
|
|
if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_sat(s, a, gen_helper_usat16);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_xta(DisasContext *s, arg_rrr_rot *a,
|
|
|
|
void (*gen_extract)(TCGv_i32, TCGv_i32),
|
|
|
|
void (*gen_add)(TCGv_i32, TCGv_i32, TCGv_i32))
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
if (!ENABLE_ARCH_6) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = load_reg(s, a->rm);
|
|
|
|
/*
|
|
|
|
* TODO: In many cases we could do a shift instead of a rotate.
|
|
|
|
* Combined with a simple extend, that becomes an extract.
|
|
|
|
*/
|
|
|
|
tcg_gen_rotri_i32(tmp, tmp, a->rot * 8);
|
|
|
|
gen_extract(tmp, tmp);
|
|
|
|
|
|
|
|
if (a->rn != 15) {
|
|
|
|
TCGv_i32 tmp2 = load_reg(s, a->rn);
|
|
|
|
gen_add(tmp, tmp, tmp2);
|
|
|
|
tcg_temp_free_i32(tmp2);
|
|
|
|
}
|
|
|
|
store_reg(s, a->rd, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SXTAB(DisasContext *s, arg_rrr_rot *a)
|
|
|
|
{
|
|
|
|
return op_xta(s, a, tcg_gen_ext8s_i32, tcg_gen_add_i32);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SXTAH(DisasContext *s, arg_rrr_rot *a)
|
|
|
|
{
|
|
|
|
return op_xta(s, a, tcg_gen_ext16s_i32, tcg_gen_add_i32);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SXTAB16(DisasContext *s, arg_rrr_rot *a)
|
|
|
|
{
|
|
|
|
if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_xta(s, a, gen_helper_sxtb16, gen_add16);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_UXTAB(DisasContext *s, arg_rrr_rot *a)
|
|
|
|
{
|
|
|
|
return op_xta(s, a, tcg_gen_ext8u_i32, tcg_gen_add_i32);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_UXTAH(DisasContext *s, arg_rrr_rot *a)
|
|
|
|
{
|
|
|
|
return op_xta(s, a, tcg_gen_ext16u_i32, tcg_gen_add_i32);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_UXTAB16(DisasContext *s, arg_rrr_rot *a)
|
|
|
|
{
|
|
|
|
if (s->thumb && !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_xta(s, a, gen_helper_uxtb16, gen_add16);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SEL(DisasContext *s, arg_rrr *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 t1, t2, t3;
|
|
|
|
|
|
|
|
if (s->thumb
|
|
|
|
? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
|
|
|
|
: !ENABLE_ARCH_6) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
t1 = load_reg(s, a->rn);
|
|
|
|
t2 = load_reg(s, a->rm);
|
|
|
|
t3 = tcg_temp_new_i32();
|
|
|
|
tcg_gen_ld_i32(t3, cpu_env, offsetof(CPUARMState, GE));
|
|
|
|
gen_helper_sel_flags(t1, t3, t1, t2);
|
|
|
|
tcg_temp_free_i32(t3);
|
|
|
|
tcg_temp_free_i32(t2);
|
|
|
|
store_reg(s, a->rd, t1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_rr(DisasContext *s, arg_rr *a,
|
|
|
|
void (*gen)(TCGv_i32, TCGv_i32))
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
tmp = load_reg(s, a->rm);
|
|
|
|
gen(tmp, tmp);
|
|
|
|
store_reg(s, a->rd, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_REV(DisasContext *s, arg_rr *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_6) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_rr(s, a, tcg_gen_bswap32_i32);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_REV16(DisasContext *s, arg_rr *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_6) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_rr(s, a, gen_rev16);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_REVSH(DisasContext *s, arg_rr *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_6) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_rr(s, a, gen_revsh);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_RBIT(DisasContext *s, arg_rr *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_6T2) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return op_rr(s, a, gen_helper_rbit);
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:16 +08:00
|
|
|
/*
|
|
|
|
* Signed multiply, signed and unsigned divide
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool op_smlad(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
|
|
|
|
{
|
|
|
|
TCGv_i32 t1, t2;
|
|
|
|
|
|
|
|
if (!ENABLE_ARCH_6) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
t1 = load_reg(s, a->rn);
|
|
|
|
t2 = load_reg(s, a->rm);
|
|
|
|
if (m_swap) {
|
2020-06-17 01:08:34 +08:00
|
|
|
gen_swap_half(t2, t2);
|
2019-09-05 03:30:16 +08:00
|
|
|
}
|
|
|
|
gen_smul_dual(t1, t2);
|
|
|
|
|
|
|
|
if (sub) {
|
|
|
|
/*
|
2020-10-09 22:47:12 +08:00
|
|
|
* This subtraction cannot overflow, so we can do a simple
|
|
|
|
* 32-bit subtraction and then a possible 32-bit saturating
|
|
|
|
* addition of Ra.
|
2019-09-05 03:30:16 +08:00
|
|
|
*/
|
2020-10-09 22:47:12 +08:00
|
|
|
tcg_gen_sub_i32(t1, t1, t2);
|
|
|
|
tcg_temp_free_i32(t2);
|
2019-09-05 03:30:16 +08:00
|
|
|
|
2020-10-09 22:47:12 +08:00
|
|
|
if (a->ra != 15) {
|
|
|
|
t2 = load_reg(s, a->ra);
|
|
|
|
gen_helper_add_setq(t1, cpu_env, t1, t2);
|
|
|
|
tcg_temp_free_i32(t2);
|
|
|
|
}
|
|
|
|
} else if (a->ra == 15) {
|
|
|
|
/* Single saturation-checking addition */
|
2019-09-05 03:30:16 +08:00
|
|
|
gen_helper_add_setq(t1, cpu_env, t1, t2);
|
|
|
|
tcg_temp_free_i32(t2);
|
2020-10-09 22:47:12 +08:00
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* We need to add the products and Ra together and then
|
|
|
|
* determine whether the final result overflowed. Doing
|
|
|
|
* this as two separate add-and-check-overflow steps incorrectly
|
|
|
|
* sets Q for cases like (-32768 * -32768) + (-32768 * -32768) + -1.
|
|
|
|
* Do all the arithmetic at 64-bits and then check for overflow.
|
|
|
|
*/
|
|
|
|
TCGv_i64 p64, q64;
|
|
|
|
TCGv_i32 t3, qf, one;
|
|
|
|
|
|
|
|
p64 = tcg_temp_new_i64();
|
|
|
|
q64 = tcg_temp_new_i64();
|
|
|
|
tcg_gen_ext_i32_i64(p64, t1);
|
|
|
|
tcg_gen_ext_i32_i64(q64, t2);
|
|
|
|
tcg_gen_add_i64(p64, p64, q64);
|
|
|
|
load_reg_var(s, t2, a->ra);
|
|
|
|
tcg_gen_ext_i32_i64(q64, t2);
|
|
|
|
tcg_gen_add_i64(p64, p64, q64);
|
|
|
|
tcg_temp_free_i64(q64);
|
|
|
|
|
|
|
|
tcg_gen_extr_i64_i32(t1, t2, p64);
|
|
|
|
tcg_temp_free_i64(p64);
|
|
|
|
/*
|
|
|
|
* t1 is the low half of the result which goes into Rd.
|
|
|
|
* We have overflow and must set Q if the high half (t2)
|
|
|
|
* is different from the sign-extension of t1.
|
|
|
|
*/
|
|
|
|
t3 = tcg_temp_new_i32();
|
|
|
|
tcg_gen_sari_i32(t3, t1, 31);
|
|
|
|
qf = load_cpu_field(QF);
|
|
|
|
one = tcg_const_i32(1);
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_NE, qf, t2, t3, one, qf);
|
|
|
|
store_cpu_field(qf, QF);
|
|
|
|
tcg_temp_free_i32(one);
|
|
|
|
tcg_temp_free_i32(t3);
|
|
|
|
tcg_temp_free_i32(t2);
|
2019-09-05 03:30:16 +08:00
|
|
|
}
|
|
|
|
store_reg(s, a->rd, t1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SMLAD(DisasContext *s, arg_rrrr *a)
|
|
|
|
{
|
|
|
|
return op_smlad(s, a, false, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SMLADX(DisasContext *s, arg_rrrr *a)
|
|
|
|
{
|
|
|
|
return op_smlad(s, a, true, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SMLSD(DisasContext *s, arg_rrrr *a)
|
|
|
|
{
|
|
|
|
return op_smlad(s, a, false, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SMLSDX(DisasContext *s, arg_rrrr *a)
|
|
|
|
{
|
|
|
|
return op_smlad(s, a, true, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_smlald(DisasContext *s, arg_rrrr *a, bool m_swap, bool sub)
|
|
|
|
{
|
|
|
|
TCGv_i32 t1, t2;
|
|
|
|
TCGv_i64 l1, l2;
|
|
|
|
|
|
|
|
if (!ENABLE_ARCH_6) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
t1 = load_reg(s, a->rn);
|
|
|
|
t2 = load_reg(s, a->rm);
|
|
|
|
if (m_swap) {
|
2020-06-17 01:08:34 +08:00
|
|
|
gen_swap_half(t2, t2);
|
2019-09-05 03:30:16 +08:00
|
|
|
}
|
|
|
|
gen_smul_dual(t1, t2);
|
|
|
|
|
|
|
|
l1 = tcg_temp_new_i64();
|
|
|
|
l2 = tcg_temp_new_i64();
|
|
|
|
tcg_gen_ext_i32_i64(l1, t1);
|
|
|
|
tcg_gen_ext_i32_i64(l2, t2);
|
|
|
|
tcg_temp_free_i32(t1);
|
|
|
|
tcg_temp_free_i32(t2);
|
|
|
|
|
|
|
|
if (sub) {
|
|
|
|
tcg_gen_sub_i64(l1, l1, l2);
|
|
|
|
} else {
|
|
|
|
tcg_gen_add_i64(l1, l1, l2);
|
|
|
|
}
|
|
|
|
tcg_temp_free_i64(l2);
|
|
|
|
|
|
|
|
gen_addq(s, l1, a->ra, a->rd);
|
|
|
|
gen_storeq_reg(s, a->ra, a->rd, l1);
|
|
|
|
tcg_temp_free_i64(l1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SMLALD(DisasContext *s, arg_rrrr *a)
|
|
|
|
{
|
|
|
|
return op_smlald(s, a, false, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SMLALDX(DisasContext *s, arg_rrrr *a)
|
|
|
|
{
|
|
|
|
return op_smlald(s, a, true, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SMLSLD(DisasContext *s, arg_rrrr *a)
|
|
|
|
{
|
|
|
|
return op_smlald(s, a, false, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SMLSLDX(DisasContext *s, arg_rrrr *a)
|
|
|
|
{
|
|
|
|
return op_smlald(s, a, true, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_smmla(DisasContext *s, arg_rrrr *a, bool round, bool sub)
|
|
|
|
{
|
|
|
|
TCGv_i32 t1, t2;
|
|
|
|
|
|
|
|
if (s->thumb
|
|
|
|
? !arm_dc_feature(s, ARM_FEATURE_THUMB_DSP)
|
|
|
|
: !ENABLE_ARCH_6) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
t1 = load_reg(s, a->rn);
|
|
|
|
t2 = load_reg(s, a->rm);
|
|
|
|
tcg_gen_muls2_i32(t2, t1, t1, t2);
|
|
|
|
|
|
|
|
if (a->ra != 15) {
|
|
|
|
TCGv_i32 t3 = load_reg(s, a->ra);
|
|
|
|
if (sub) {
|
|
|
|
/*
|
|
|
|
* For SMMLS, we need a 64-bit subtract. Borrow caused by
|
|
|
|
* a non-zero multiplicand lowpart, and the correct result
|
|
|
|
* lowpart for rounding.
|
|
|
|
*/
|
|
|
|
TCGv_i32 zero = tcg_const_i32(0);
|
|
|
|
tcg_gen_sub2_i32(t2, t1, zero, t3, t2, t1);
|
|
|
|
tcg_temp_free_i32(zero);
|
|
|
|
} else {
|
|
|
|
tcg_gen_add_i32(t1, t1, t3);
|
|
|
|
}
|
|
|
|
tcg_temp_free_i32(t3);
|
|
|
|
}
|
|
|
|
if (round) {
|
|
|
|
/*
|
|
|
|
* Adding 0x80000000 to the 64-bit quantity means that we have
|
|
|
|
* carry in to the high word when the low word has the msb set.
|
|
|
|
*/
|
|
|
|
tcg_gen_shri_i32(t2, t2, 31);
|
|
|
|
tcg_gen_add_i32(t1, t1, t2);
|
|
|
|
}
|
|
|
|
tcg_temp_free_i32(t2);
|
|
|
|
store_reg(s, a->rd, t1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SMMLA(DisasContext *s, arg_rrrr *a)
|
|
|
|
{
|
|
|
|
return op_smmla(s, a, false, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SMMLAR(DisasContext *s, arg_rrrr *a)
|
|
|
|
{
|
|
|
|
return op_smmla(s, a, true, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SMMLS(DisasContext *s, arg_rrrr *a)
|
|
|
|
{
|
|
|
|
return op_smmla(s, a, false, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SMMLSR(DisasContext *s, arg_rrrr *a)
|
|
|
|
{
|
|
|
|
return op_smmla(s, a, true, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool op_div(DisasContext *s, arg_rrr *a, bool u)
|
|
|
|
{
|
|
|
|
TCGv_i32 t1, t2;
|
|
|
|
|
|
|
|
if (s->thumb
|
2020-02-15 01:50:56 +08:00
|
|
|
? !dc_isar_feature(aa32_thumb_div, s)
|
|
|
|
: !dc_isar_feature(aa32_arm_div, s)) {
|
2019-09-05 03:30:16 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
t1 = load_reg(s, a->rn);
|
|
|
|
t2 = load_reg(s, a->rm);
|
|
|
|
if (u) {
|
|
|
|
gen_helper_udiv(t1, t1, t2);
|
|
|
|
} else {
|
|
|
|
gen_helper_sdiv(t1, t1, t2);
|
|
|
|
}
|
|
|
|
tcg_temp_free_i32(t2);
|
|
|
|
store_reg(s, a->rd, t1);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SDIV(DisasContext *s, arg_rrr *a)
|
|
|
|
{
|
|
|
|
return op_div(s, a, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_UDIV(DisasContext *s, arg_rrr *a)
|
|
|
|
{
|
|
|
|
return op_div(s, a, true);
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:18 +08:00
|
|
|
/*
|
|
|
|
* Block data transfer
|
|
|
|
*/
|
|
|
|
|
|
|
|
static TCGv_i32 op_addr_block_pre(DisasContext *s, arg_ldst_block *a, int n)
|
|
|
|
{
|
|
|
|
TCGv_i32 addr = load_reg(s, a->rn);
|
|
|
|
|
|
|
|
if (a->b) {
|
|
|
|
if (a->i) {
|
|
|
|
/* pre increment */
|
|
|
|
tcg_gen_addi_i32(addr, addr, 4);
|
|
|
|
} else {
|
|
|
|
/* pre decrement */
|
|
|
|
tcg_gen_addi_i32(addr, addr, -(n * 4));
|
|
|
|
}
|
|
|
|
} else if (!a->i && n != 1) {
|
|
|
|
/* post decrement */
|
|
|
|
tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->v8m_stackcheck && a->rn == 13 && a->w) {
|
|
|
|
/*
|
|
|
|
* If the writeback is incrementing SP rather than
|
|
|
|
* decrementing it, and the initial SP is below the
|
|
|
|
* stack limit but the final written-back SP would
|
|
|
|
* be above, then then we must not perform any memory
|
|
|
|
* accesses, but it is IMPDEF whether we generate
|
|
|
|
* an exception. We choose to do so in this case.
|
|
|
|
* At this point 'addr' is the lowest address, so
|
|
|
|
* either the original SP (if incrementing) or our
|
|
|
|
* final SP (if decrementing), so that's what we check.
|
|
|
|
*/
|
|
|
|
gen_helper_v8m_stackcheck(cpu_env, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
return addr;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void op_addr_block_post(DisasContext *s, arg_ldst_block *a,
|
|
|
|
TCGv_i32 addr, int n)
|
|
|
|
{
|
|
|
|
if (a->w) {
|
|
|
|
/* write back */
|
|
|
|
if (!a->b) {
|
|
|
|
if (a->i) {
|
|
|
|
/* post increment */
|
|
|
|
tcg_gen_addi_i32(addr, addr, 4);
|
|
|
|
} else {
|
|
|
|
/* post decrement */
|
|
|
|
tcg_gen_addi_i32(addr, addr, -(n * 4));
|
|
|
|
}
|
|
|
|
} else if (!a->i && n != 1) {
|
|
|
|
/* pre decrement */
|
|
|
|
tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
|
|
|
|
}
|
|
|
|
store_reg(s, a->rn, addr);
|
|
|
|
} else {
|
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:20 +08:00
|
|
|
static bool op_stm(DisasContext *s, arg_ldst_block *a, int min_n)
|
2019-09-05 03:30:18 +08:00
|
|
|
{
|
|
|
|
int i, j, n, list, mem_idx;
|
|
|
|
bool user = a->u;
|
|
|
|
TCGv_i32 addr, tmp, tmp2;
|
|
|
|
|
|
|
|
if (user) {
|
|
|
|
/* STM (user) */
|
|
|
|
if (IS_USER(s)) {
|
|
|
|
/* Only usable in supervisor mode. */
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
list = a->list;
|
|
|
|
n = ctpop16(list);
|
2019-09-05 03:30:21 +08:00
|
|
|
if (n < min_n || a->rn == 15) {
|
2019-09-05 03:30:20 +08:00
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
2019-09-05 03:30:18 +08:00
|
|
|
|
|
|
|
addr = op_addr_block_pre(s, a, n);
|
|
|
|
mem_idx = get_mem_index(s);
|
|
|
|
|
|
|
|
for (i = j = 0; i < 16; i++) {
|
|
|
|
if (!(list & (1 << i))) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (user && i != 15) {
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
tmp2 = tcg_const_i32(i);
|
|
|
|
gen_helper_get_user_reg(tmp, cpu_env, tmp2);
|
|
|
|
tcg_temp_free_i32(tmp2);
|
|
|
|
} else {
|
|
|
|
tmp = load_reg(s, i);
|
|
|
|
}
|
|
|
|
gen_aa32_st32(s, tmp, addr, mem_idx);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
|
|
|
|
/* No need to add after the last transfer. */
|
|
|
|
if (++j != n) {
|
|
|
|
tcg_gen_addi_i32(addr, addr, 4);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
op_addr_block_post(s, a, addr, n);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_STM(DisasContext *s, arg_ldst_block *a)
|
|
|
|
{
|
2019-09-05 03:30:20 +08:00
|
|
|
/* BitCount(list) < 1 is UNPREDICTABLE */
|
|
|
|
return op_stm(s, a, 1);
|
2019-09-05 03:30:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_STM_t32(DisasContext *s, arg_ldst_block *a)
|
|
|
|
{
|
|
|
|
/* Writeback register in register list is UNPREDICTABLE for T32. */
|
|
|
|
if (a->w && (a->list & (1 << a->rn))) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
2019-09-05 03:30:20 +08:00
|
|
|
/* BitCount(list) < 2 is UNPREDICTABLE */
|
|
|
|
return op_stm(s, a, 2);
|
2019-09-05 03:30:18 +08:00
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:20 +08:00
|
|
|
static bool do_ldm(DisasContext *s, arg_ldst_block *a, int min_n)
|
2019-09-05 03:30:18 +08:00
|
|
|
{
|
|
|
|
int i, j, n, list, mem_idx;
|
|
|
|
bool loaded_base;
|
|
|
|
bool user = a->u;
|
|
|
|
bool exc_return = false;
|
|
|
|
TCGv_i32 addr, tmp, tmp2, loaded_var;
|
|
|
|
|
|
|
|
if (user) {
|
|
|
|
/* LDM (user), LDM (exception return) */
|
|
|
|
if (IS_USER(s)) {
|
|
|
|
/* Only usable in supervisor mode. */
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if (extract32(a->list, 15, 1)) {
|
|
|
|
exc_return = true;
|
|
|
|
user = false;
|
|
|
|
} else {
|
|
|
|
/* LDM (user) does not allow writeback. */
|
|
|
|
if (a->w) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
list = a->list;
|
|
|
|
n = ctpop16(list);
|
2019-09-05 03:30:21 +08:00
|
|
|
if (n < min_n || a->rn == 15) {
|
2019-09-05 03:30:20 +08:00
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
2019-09-05 03:30:18 +08:00
|
|
|
|
|
|
|
addr = op_addr_block_pre(s, a, n);
|
|
|
|
mem_idx = get_mem_index(s);
|
|
|
|
loaded_base = false;
|
|
|
|
loaded_var = NULL;
|
|
|
|
|
|
|
|
for (i = j = 0; i < 16; i++) {
|
|
|
|
if (!(list & (1 << i))) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
gen_aa32_ld32u(s, tmp, addr, mem_idx);
|
|
|
|
if (user) {
|
|
|
|
tmp2 = tcg_const_i32(i);
|
|
|
|
gen_helper_set_user_reg(cpu_env, tmp2, tmp);
|
|
|
|
tcg_temp_free_i32(tmp2);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
} else if (i == a->rn) {
|
|
|
|
loaded_var = tmp;
|
|
|
|
loaded_base = true;
|
|
|
|
} else if (i == 15 && exc_return) {
|
|
|
|
store_pc_exc_ret(s, tmp);
|
|
|
|
} else {
|
|
|
|
store_reg_from_load(s, i, tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* No need to add after the last transfer. */
|
|
|
|
if (++j != n) {
|
|
|
|
tcg_gen_addi_i32(addr, addr, 4);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
op_addr_block_post(s, a, addr, n);
|
|
|
|
|
|
|
|
if (loaded_base) {
|
2019-09-05 03:30:21 +08:00
|
|
|
/* Note that we reject base == pc above. */
|
2019-09-05 03:30:18 +08:00
|
|
|
store_reg(s, a->rn, loaded_var);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (exc_return) {
|
|
|
|
/* Restore CPSR from SPSR. */
|
|
|
|
tmp = load_cpu_field(spsr);
|
|
|
|
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
|
|
|
gen_io_start();
|
|
|
|
}
|
|
|
|
gen_helper_cpsr_write_eret(cpu_env, tmp);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
/* Must exit loop to check un-masked IRQs */
|
|
|
|
s->base.is_jmp = DISAS_EXIT;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_LDM_a32(DisasContext *s, arg_ldst_block *a)
|
|
|
|
{
|
2019-09-05 03:30:19 +08:00
|
|
|
/*
|
|
|
|
* Writeback register in register list is UNPREDICTABLE
|
|
|
|
* for ArchVersion() >= 7. Prior to v7, A32 would write
|
|
|
|
* an UNKNOWN value to the base register.
|
|
|
|
*/
|
|
|
|
if (ENABLE_ARCH_7 && a->w && (a->list & (1 << a->rn))) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
2019-09-05 03:30:20 +08:00
|
|
|
/* BitCount(list) < 1 is UNPREDICTABLE */
|
|
|
|
return do_ldm(s, a, 1);
|
2019-09-05 03:30:18 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_LDM_t32(DisasContext *s, arg_ldst_block *a)
|
|
|
|
{
|
|
|
|
/* Writeback register in register list is UNPREDICTABLE for T32. */
|
|
|
|
if (a->w && (a->list & (1 << a->rn))) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
2019-09-05 03:30:20 +08:00
|
|
|
/* BitCount(list) < 2 is UNPREDICTABLE */
|
|
|
|
return do_ldm(s, a, 2);
|
2019-09-05 03:30:18 +08:00
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:40 +08:00
|
|
|
static bool trans_LDM_t16(DisasContext *s, arg_ldst_block *a)
|
|
|
|
{
|
|
|
|
/* Writeback is conditional on the base register not being loaded. */
|
|
|
|
a->w = !(a->list & (1 << a->rn));
|
|
|
|
/* BitCount(list) < 1 is UNPREDICTABLE */
|
|
|
|
return do_ldm(s, a, 1);
|
|
|
|
}
|
|
|
|
|
2020-11-20 05:55:54 +08:00
|
|
|
static bool trans_CLRM(DisasContext *s, arg_CLRM *a)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
TCGv_i32 zero;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_m_sec_state, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (extract32(a->list, 13, 1)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!a->list) {
|
|
|
|
/* UNPREDICTABLE; we choose to UNDEF */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
zero = tcg_const_i32(0);
|
|
|
|
for (i = 0; i < 15; i++) {
|
|
|
|
if (extract32(a->list, i, 1)) {
|
|
|
|
/* Clear R[i] */
|
|
|
|
tcg_gen_mov_i32(cpu_R[i], zero);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (extract32(a->list, 15, 1)) {
|
|
|
|
/*
|
|
|
|
* Clear APSR (by calling the MSR helper with the same argument
|
|
|
|
* as for "MSR APSR_nzcvqg, Rn": mask = 0b1100, SYSM=0)
|
|
|
|
*/
|
|
|
|
TCGv_i32 maskreg = tcg_const_i32(0xc << 8);
|
|
|
|
gen_helper_v7m_msr(cpu_env, maskreg, zero);
|
|
|
|
tcg_temp_free_i32(maskreg);
|
|
|
|
}
|
|
|
|
tcg_temp_free_i32(zero);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:22 +08:00
|
|
|
/*
|
|
|
|
* Branch, branch with link
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool trans_B(DisasContext *s, arg_i *a)
|
|
|
|
{
|
|
|
|
gen_jmp(s, read_pc(s) + a->imm);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_B_cond_thumb(DisasContext *s, arg_ci *a)
|
|
|
|
{
|
|
|
|
/* This has cond from encoding, required to be outside IT block. */
|
|
|
|
if (a->cond >= 0xe) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (s->condexec_mask) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
arm_skip_unless(s, a->cond);
|
|
|
|
gen_jmp(s, read_pc(s) + a->imm);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_BL(DisasContext *s, arg_i *a)
|
|
|
|
{
|
|
|
|
tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
|
|
|
|
gen_jmp(s, read_pc(s) + a->imm);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_BLX_i(DisasContext *s, arg_BLX_i *a)
|
|
|
|
{
|
2019-09-05 03:30:59 +08:00
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
2020-10-19 23:12:56 +08:00
|
|
|
/*
|
|
|
|
* BLX <imm> would be useless on M-profile; the encoding space
|
|
|
|
* is used for other insns from v8.1M onward, and UNDEFs before that.
|
|
|
|
*/
|
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-08-03 19:18:49 +08:00
|
|
|
/* For A32, ARM_FEATURE_V5 is checked near the start of the uncond block. */
|
2019-09-05 03:30:22 +08:00
|
|
|
if (s->thumb && (a->imm & 2)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | s->thumb);
|
2019-09-05 03:30:59 +08:00
|
|
|
tmp = tcg_const_i32(!s->thumb);
|
|
|
|
store_cpu_field(tmp, thumb);
|
|
|
|
gen_jmp(s, (read_pc(s) & ~3) + a->imm);
|
2019-09-05 03:30:22 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:57 +08:00
|
|
|
static bool trans_BL_BLX_prefix(DisasContext *s, arg_BL_BLX_prefix *a)
|
|
|
|
{
|
|
|
|
assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
|
|
|
|
tcg_gen_movi_i32(cpu_R[14], read_pc(s) + (a->imm << 12));
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_BL_suffix(DisasContext *s, arg_BL_suffix *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
|
|
|
|
tcg_gen_addi_i32(tmp, cpu_R[14], (a->imm << 1) | 1);
|
|
|
|
tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
|
|
|
|
gen_bx(s, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_BLX_suffix(DisasContext *s, arg_BLX_suffix *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
|
|
|
|
if (!ENABLE_ARCH_5) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
tcg_gen_addi_i32(tmp, cpu_R[14], a->imm << 1);
|
|
|
|
tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
|
|
|
|
tcg_gen_movi_i32(cpu_R[14], s->base.pc_next | 1);
|
|
|
|
gen_bx(s, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-10-19 23:12:57 +08:00
|
|
|
static bool trans_BF(DisasContext *s, arg_BF *a)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* M-profile branch future insns. The architecture permits an
|
|
|
|
* implementation to implement these as NOPs (equivalent to
|
|
|
|
* discarding the LO_BRANCH_INFO cache immediately), and we
|
|
|
|
* take that IMPDEF option because for QEMU a "real" implementation
|
|
|
|
* would be complicated and wouldn't execute any faster.
|
|
|
|
*/
|
|
|
|
if (!dc_isar_feature(aa32_lob, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (a->boff == 0) {
|
|
|
|
/* SEE "Related encodings" (loop insns) */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
/* Handle as NOP */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-10-19 23:12:58 +08:00
|
|
|
static bool trans_DLS(DisasContext *s, arg_DLS *a)
|
|
|
|
{
|
|
|
|
/* M-profile low-overhead loop start */
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_lob, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (a->rn == 13 || a->rn == 15) {
|
|
|
|
/* CONSTRAINED UNPREDICTABLE: we choose to UNDEF */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Not a while loop, no tail predication: just set LR to the count */
|
|
|
|
tmp = load_reg(s, a->rn);
|
|
|
|
store_reg(s, 14, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_WLS(DisasContext *s, arg_WLS *a)
|
|
|
|
{
|
|
|
|
/* M-profile low-overhead while-loop start */
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
TCGLabel *nextlabel;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_lob, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (a->rn == 13 || a->rn == 15) {
|
|
|
|
/* CONSTRAINED UNPREDICTABLE: we choose to UNDEF */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (s->condexec_mask) {
|
|
|
|
/*
|
|
|
|
* WLS in an IT block is CONSTRAINED UNPREDICTABLE;
|
|
|
|
* we choose to UNDEF, because otherwise our use of
|
|
|
|
* gen_goto_tb(1) would clash with the use of TB exit 1
|
|
|
|
* in the dc->condjmp condition-failed codepath in
|
|
|
|
* arm_tr_tb_stop() and we'd get an assertion.
|
|
|
|
*/
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
nextlabel = gen_new_label();
|
|
|
|
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_R[a->rn], 0, nextlabel);
|
|
|
|
tmp = load_reg(s, a->rn);
|
|
|
|
store_reg(s, 14, tmp);
|
|
|
|
gen_jmp_tb(s, s->base.pc_next, 1);
|
|
|
|
|
|
|
|
gen_set_label(nextlabel);
|
|
|
|
gen_jmp(s, read_pc(s) + a->imm);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_LE(DisasContext *s, arg_LE *a)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* M-profile low-overhead loop end. The architecture permits an
|
|
|
|
* implementation to discard the LO_BRANCH_INFO cache at any time,
|
|
|
|
* and we take the IMPDEF option to never set it in the first place
|
|
|
|
* (equivalent to always discarding it immediately), because for QEMU
|
|
|
|
* a "real" implementation would be complicated and wouldn't execute
|
|
|
|
* any faster.
|
|
|
|
*/
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_lob, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!a->f) {
|
|
|
|
/* Not loop-forever. If LR <= 1 this is the last loop: do nothing. */
|
|
|
|
arm_gen_condlabel(s);
|
|
|
|
tcg_gen_brcondi_i32(TCG_COND_LEU, cpu_R[14], 1, s->condlabel);
|
|
|
|
/* Decrement LR */
|
|
|
|
tmp = load_reg(s, 14);
|
|
|
|
tcg_gen_addi_i32(tmp, tmp, -1);
|
|
|
|
store_reg(s, 14, tmp);
|
|
|
|
}
|
|
|
|
/* Jump back to the loop start */
|
|
|
|
gen_jmp(s, read_pc(s) - a->imm);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:30 +08:00
|
|
|
static bool op_tbranch(DisasContext *s, arg_tbranch *a, bool half)
|
|
|
|
{
|
|
|
|
TCGv_i32 addr, tmp;
|
|
|
|
|
|
|
|
tmp = load_reg(s, a->rm);
|
|
|
|
if (half) {
|
|
|
|
tcg_gen_add_i32(tmp, tmp, tmp);
|
|
|
|
}
|
|
|
|
addr = load_reg(s, a->rn);
|
|
|
|
tcg_gen_add_i32(addr, addr, tmp);
|
|
|
|
|
|
|
|
gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
|
|
|
|
half ? MO_UW | s->be_data : MO_UB);
|
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
|
|
|
|
tcg_gen_add_i32(tmp, tmp, tmp);
|
|
|
|
tcg_gen_addi_i32(tmp, tmp, read_pc(s));
|
|
|
|
store_reg(s, 15, tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_TBB(DisasContext *s, arg_tbranch *a)
|
|
|
|
{
|
|
|
|
return op_tbranch(s, a, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_TBH(DisasContext *s, arg_tbranch *a)
|
|
|
|
{
|
|
|
|
return op_tbranch(s, a, true);
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:53 +08:00
|
|
|
static bool trans_CBZ(DisasContext *s, arg_CBZ *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp = load_reg(s, a->rn);
|
|
|
|
|
|
|
|
arm_gen_condlabel(s);
|
|
|
|
tcg_gen_brcondi_i32(a->nz ? TCG_COND_EQ : TCG_COND_NE,
|
|
|
|
tmp, 0, s->condlabel);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
gen_jmp(s, read_pc(s) + a->imm);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:23 +08:00
|
|
|
/*
|
2019-09-19 21:18:40 +08:00
|
|
|
* Supervisor call - both T32 & A32 come here so we need to check
|
|
|
|
* which mode we are in when checking for semihosting.
|
2019-09-05 03:30:23 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
static bool trans_SVC(DisasContext *s, arg_SVC *a)
|
|
|
|
{
|
2019-09-19 21:18:40 +08:00
|
|
|
const uint32_t semihost_imm = s->thumb ? 0xab : 0x123456;
|
|
|
|
|
|
|
|
if (!arm_dc_feature(s, ARM_FEATURE_M) && semihosting_enabled() &&
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
!IS_USER(s) &&
|
|
|
|
#endif
|
|
|
|
(a->imm == semihost_imm)) {
|
2019-12-17 23:08:57 +08:00
|
|
|
gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
|
2019-09-19 21:18:40 +08:00
|
|
|
} else {
|
|
|
|
gen_set_pc_im(s, s->base.pc_next);
|
|
|
|
s->svc_imm = a->imm;
|
|
|
|
s->base.is_jmp = DISAS_SWI;
|
|
|
|
}
|
2019-09-05 03:30:23 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:24 +08:00
|
|
|
/*
|
|
|
|
* Unconditional system instructions
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool trans_RFE(DisasContext *s, arg_RFE *a)
|
|
|
|
{
|
|
|
|
static const int8_t pre_offset[4] = {
|
|
|
|
/* DA */ -4, /* IA */ 0, /* DB */ -8, /* IB */ 4
|
|
|
|
};
|
|
|
|
static const int8_t post_offset[4] = {
|
|
|
|
/* DA */ -8, /* IA */ 4, /* DB */ -4, /* IB */ 0
|
|
|
|
};
|
|
|
|
TCGv_i32 addr, t1, t2;
|
|
|
|
|
|
|
|
if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (IS_USER(s)) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
addr = load_reg(s, a->rn);
|
|
|
|
tcg_gen_addi_i32(addr, addr, pre_offset[a->pu]);
|
|
|
|
|
|
|
|
/* Load PC into tmp and CPSR into tmp2. */
|
|
|
|
t1 = tcg_temp_new_i32();
|
|
|
|
gen_aa32_ld32u(s, t1, addr, get_mem_index(s));
|
|
|
|
tcg_gen_addi_i32(addr, addr, 4);
|
|
|
|
t2 = tcg_temp_new_i32();
|
|
|
|
gen_aa32_ld32u(s, t2, addr, get_mem_index(s));
|
|
|
|
|
|
|
|
if (a->w) {
|
|
|
|
/* Base writeback. */
|
|
|
|
tcg_gen_addi_i32(addr, addr, post_offset[a->pu]);
|
|
|
|
store_reg(s, a->rn, addr);
|
|
|
|
} else {
|
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
}
|
|
|
|
gen_rfe(s, t1, t2);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SRS(DisasContext *s, arg_SRS *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
gen_srs(s, a->mode, a->pu, a->w);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:26 +08:00
|
|
|
static bool trans_CPS(DisasContext *s, arg_CPS *a)
|
|
|
|
{
|
|
|
|
uint32_t mask, val;
|
|
|
|
|
2019-09-05 03:30:47 +08:00
|
|
|
if (!ENABLE_ARCH_6 || arm_dc_feature(s, ARM_FEATURE_M)) {
|
2019-09-05 03:30:26 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (IS_USER(s)) {
|
|
|
|
/* Implemented as NOP in user mode. */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
/* TODO: There are quite a lot of UNPREDICTABLE argument combinations. */
|
|
|
|
|
|
|
|
mask = val = 0;
|
|
|
|
if (a->imod & 2) {
|
|
|
|
if (a->A) {
|
|
|
|
mask |= CPSR_A;
|
|
|
|
}
|
|
|
|
if (a->I) {
|
|
|
|
mask |= CPSR_I;
|
|
|
|
}
|
|
|
|
if (a->F) {
|
|
|
|
mask |= CPSR_F;
|
|
|
|
}
|
|
|
|
if (a->imod & 1) {
|
|
|
|
val |= mask;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (a->M) {
|
|
|
|
mask |= CPSR_M;
|
|
|
|
val |= a->mode;
|
|
|
|
}
|
|
|
|
if (mask) {
|
|
|
|
gen_set_psr_im(s, mask, 0, val);
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:47 +08:00
|
|
|
static bool trans_CPS_v7m(DisasContext *s, arg_CPS_v7m *a)
|
|
|
|
{
|
2020-03-04 01:49:48 +08:00
|
|
|
TCGv_i32 tmp, addr, el;
|
2019-09-05 03:30:47 +08:00
|
|
|
|
|
|
|
if (!arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (IS_USER(s)) {
|
|
|
|
/* Implemented as NOP in user mode. */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = tcg_const_i32(a->im);
|
|
|
|
/* FAULTMASK */
|
|
|
|
if (a->F) {
|
|
|
|
addr = tcg_const_i32(19);
|
|
|
|
gen_helper_v7m_msr(cpu_env, addr, tmp);
|
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
}
|
|
|
|
/* PRIMASK */
|
|
|
|
if (a->I) {
|
|
|
|
addr = tcg_const_i32(16);
|
|
|
|
gen_helper_v7m_msr(cpu_env, addr, tmp);
|
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
}
|
2020-03-04 01:49:48 +08:00
|
|
|
el = tcg_const_i32(s->current_el);
|
|
|
|
gen_helper_rebuild_hflags_m32(cpu_env, el);
|
|
|
|
tcg_temp_free_i32(el);
|
2019-09-05 03:30:47 +08:00
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
gen_lookup_tb(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:25 +08:00
|
|
|
/*
|
|
|
|
* Clear-Exclusive, Barriers
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool trans_CLREX(DisasContext *s, arg_CLREX *a)
|
|
|
|
{
|
|
|
|
if (s->thumb
|
|
|
|
? !ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)
|
|
|
|
: !ENABLE_ARCH_6K) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
gen_clrex(s);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_DSB(DisasContext *s, arg_DSB *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_DMB(DisasContext *s, arg_DMB *a)
|
|
|
|
{
|
|
|
|
return trans_DSB(s, NULL);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_ISB(DisasContext *s, arg_ISB *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_7 && !arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* We need to break the TB after this insn to execute
|
|
|
|
* self-modifying code correctly and also to take
|
|
|
|
* any pending interrupts immediately.
|
|
|
|
*/
|
|
|
|
gen_goto_tb(s, 0, s->base.pc_next);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_SB(DisasContext *s, arg_SB *a)
|
|
|
|
{
|
|
|
|
if (!dc_isar_feature(aa32_sb, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* TODO: There is no speculation barrier opcode
|
|
|
|
* for TCG; MB and end the TB instead.
|
|
|
|
*/
|
|
|
|
tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
|
|
|
|
gen_goto_tb(s, 0, s->base.pc_next);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:27 +08:00
|
|
|
static bool trans_SETEND(DisasContext *s, arg_SETEND *a)
|
|
|
|
{
|
|
|
|
if (!ENABLE_ARCH_6) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (a->E != (s->be_data == MO_BE)) {
|
|
|
|
gen_helper_setend(cpu_env);
|
2020-06-26 11:31:03 +08:00
|
|
|
s->base.is_jmp = DISAS_UPDATE_EXIT;
|
2019-09-05 03:30:27 +08:00
|
|
|
}
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:28 +08:00
|
|
|
/*
|
|
|
|
* Preload instructions
|
|
|
|
* All are nops, contingent on the appropriate arch level.
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool trans_PLD(DisasContext *s, arg_PLD *a)
|
|
|
|
{
|
|
|
|
return ENABLE_ARCH_5TE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_PLDW(DisasContext *s, arg_PLD *a)
|
|
|
|
{
|
|
|
|
return arm_dc_feature(s, ARM_FEATURE_V7MP);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_PLI(DisasContext *s, arg_PLD *a)
|
|
|
|
{
|
|
|
|
return ENABLE_ARCH_7;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:30:53 +08:00
|
|
|
/*
|
|
|
|
* If-then
|
|
|
|
*/
|
|
|
|
|
|
|
|
static bool trans_IT(DisasContext *s, arg_IT *a)
|
|
|
|
{
|
|
|
|
int cond_mask = a->cond_mask;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No actual code generated for this insn, just setup state.
|
|
|
|
*
|
|
|
|
* Combinations of firstcond and mask which set up an 0b1111
|
|
|
|
* condition are UNPREDICTABLE; we take the CONSTRAINED
|
|
|
|
* UNPREDICTABLE choice to treat 0b1111 the same as 0b1110,
|
|
|
|
* i.e. both meaning "execute always".
|
|
|
|
*/
|
|
|
|
s->condexec_cond = (cond_mask >> 4) & 0xe;
|
|
|
|
s->condexec_mask = cond_mask & 0x1f;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-10-19 23:12:54 +08:00
|
|
|
/* v8.1M CSEL/CSINC/CSNEG/CSINV */
|
|
|
|
static bool trans_CSEL(DisasContext *s, arg_CSEL *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 rn, rm, zero;
|
|
|
|
DisasCompare c;
|
|
|
|
|
|
|
|
if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (a->rm == 13) {
|
|
|
|
/* SEE "Related encodings" (MVE shifts) */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (a->rd == 13 || a->rd == 15 || a->rn == 13 || a->fcond >= 14) {
|
|
|
|
/* CONSTRAINED UNPREDICTABLE: we choose to UNDEF */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* In this insn input reg fields of 0b1111 mean "zero", not "PC" */
|
|
|
|
if (a->rn == 15) {
|
|
|
|
rn = tcg_const_i32(0);
|
|
|
|
} else {
|
|
|
|
rn = load_reg(s, a->rn);
|
|
|
|
}
|
|
|
|
if (a->rm == 15) {
|
|
|
|
rm = tcg_const_i32(0);
|
|
|
|
} else {
|
|
|
|
rm = load_reg(s, a->rm);
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (a->op) {
|
|
|
|
case 0: /* CSEL */
|
|
|
|
break;
|
|
|
|
case 1: /* CSINC */
|
|
|
|
tcg_gen_addi_i32(rm, rm, 1);
|
|
|
|
break;
|
|
|
|
case 2: /* CSINV */
|
|
|
|
tcg_gen_not_i32(rm, rm);
|
|
|
|
break;
|
|
|
|
case 3: /* CSNEG */
|
|
|
|
tcg_gen_neg_i32(rm, rm);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
arm_test_cc(&c, a->fcond);
|
|
|
|
zero = tcg_const_i32(0);
|
|
|
|
tcg_gen_movcond_i32(c.cond, rn, c.value, zero, rn, rm);
|
|
|
|
arm_free_cc(&c);
|
|
|
|
tcg_temp_free_i32(zero);
|
|
|
|
|
|
|
|
store_reg(s, a->rd, rn);
|
|
|
|
tcg_temp_free_i32(rm);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-09-05 03:29:52 +08:00
|
|
|
/*
|
|
|
|
* Legacy decoder.
|
|
|
|
*/
|
|
|
|
|
2014-10-29 03:24:04 +08:00
|
|
|
static void disas_arm_insn(DisasContext *s, unsigned int insn)
|
2007-11-11 08:04:49 +08:00
|
|
|
{
|
2019-09-05 03:30:34 +08:00
|
|
|
unsigned int cond = insn >> 28;
|
2007-11-11 08:04:49 +08:00
|
|
|
|
2017-02-28 20:08:19 +08:00
|
|
|
/* M variants do not implement ARM mode; this must raise the INVSTATE
|
|
|
|
* UsageFault exception.
|
|
|
|
*/
|
2014-10-29 03:24:02 +08:00
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_M)) {
|
2019-08-15 16:46:44 +08:00
|
|
|
gen_exception_insn(s, s->pc_curr, EXCP_INVSTATE, syn_uncategorized(),
|
2017-02-28 20:08:19 +08:00
|
|
|
default_exception_el(s));
|
|
|
|
return;
|
2014-10-29 03:24:02 +08:00
|
|
|
}
|
2019-09-05 03:29:52 +08:00
|
|
|
|
|
|
|
if (cond == 0xf) {
|
arm: basic support for ARMv4/ARMv4T emulation
Currently target-arm/ assumes at least ARMv5 core. Add support for
handling also ARMv4/ARMv4T. This changes the following instructions:
BX(v4T and later)
BKPT, BLX, CDP2, CLZ, LDC2, LDRD, MCRR, MCRR2, MRRC, MCRR, MRC2, MRRC,
MRRC2, PLD QADD, QDADD, QDSUB, QSUB, STRD, SMLAxy, SMLALxy, SMLAWxy,
SMULxy, SMULWxy, STC2 (v5 and later)
All instructions that are "v5TE and later" are also bound to just v5, as
that's how it was before.
This patch doesn _not_ include disabling of cp15 access and base-updated
data abort model (that will be required to emulate chips based on a
ARM7TDMI), because:
* no ARM7TDMI chips are currently emulated (or planned)
* those features aren't strictly necessary for my purposes (SA-1 core
emulation).
All v5 models are handled as they are v5T. Internally we still have a
check if the model is a v5(T) or v5TE, but as all emulated cores are
v5TE, those two cases are simply aliased (for now).
Patch is heavily based on patch by Filip Navara <filip.navara@gmail.com>
which in turn is based on work by Ulrich Hecht <uli@suse.de> and Vincent
Sanders <vince@kyllikki.org>.
Signed-off-by: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2011-04-04 21:38:44 +08:00
|
|
|
/* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
|
|
|
|
* choose to UNDEF. In ARMv5 and above the space is used
|
|
|
|
* for miscellaneous unconditional instructions.
|
|
|
|
*/
|
2020-08-03 19:18:49 +08:00
|
|
|
if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return;
|
|
|
|
}
|
arm: basic support for ARMv4/ARMv4T emulation
Currently target-arm/ assumes at least ARMv5 core. Add support for
handling also ARMv4/ARMv4T. This changes the following instructions:
BX(v4T and later)
BKPT, BLX, CDP2, CLZ, LDC2, LDRD, MCRR, MCRR2, MRRC, MCRR, MRC2, MRRC,
MRRC2, PLD QADD, QDADD, QDSUB, QSUB, STRD, SMLAxy, SMLALxy, SMLAWxy,
SMULxy, SMULWxy, STC2 (v5 and later)
All instructions that are "v5TE and later" are also bound to just v5, as
that's how it was before.
This patch doesn _not_ include disabling of cp15 access and base-updated
data abort model (that will be required to emulate chips based on a
ARM7TDMI), because:
* no ARM7TDMI chips are currently emulated (or planned)
* those features aren't strictly necessary for my purposes (SA-1 core
emulation).
All v5 models are handled as they are v5T. Internally we still have a
check if the model is a v5(T) or v5TE, but as all emulated cores are
v5TE, those two cases are simply aliased (for now).
Patch is heavily based on patch by Filip Navara <filip.navara@gmail.com>
which in turn is based on work by Ulrich Hecht <uli@suse.de> and Vincent
Sanders <vince@kyllikki.org>.
Signed-off-by: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
2011-04-04 21:38:44 +08:00
|
|
|
|
2007-11-11 08:04:49 +08:00
|
|
|
/* Unconditional instructions. */
|
2020-02-25 06:22:27 +08:00
|
|
|
/* TODO: Perhaps merge these into one decodetree output file. */
|
|
|
|
if (disas_a32_uncond(s, insn) ||
|
2020-05-01 02:09:30 +08:00
|
|
|
disas_vfp_uncond(s, insn) ||
|
|
|
|
disas_neon_dp(s, insn) ||
|
|
|
|
disas_neon_ls(s, insn) ||
|
|
|
|
disas_neon_shared(s, insn)) {
|
2019-09-05 03:29:52 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* fall back to legacy decoder */
|
|
|
|
|
2019-09-05 03:30:27 +08:00
|
|
|
if ((insn & 0x0e000f00) == 0x0c000100) {
|
2014-10-29 03:24:01 +08:00
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
|
2007-11-11 08:04:49 +08:00
|
|
|
/* iWMMXt register transfer. */
|
2014-09-30 01:48:48 +08:00
|
|
|
if (extract32(s->c15_cpar, 1, 1)) {
|
2014-10-29 03:24:03 +08:00
|
|
|
if (!disas_iwmmxt_insn(s, insn)) {
|
2007-11-11 08:04:49 +08:00
|
|
|
return;
|
2014-09-30 01:48:48 +08:00
|
|
|
}
|
|
|
|
}
|
2007-11-11 08:04:49 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
if (cond != 0xe) {
|
|
|
|
/* if not always execute, we generate a conditional jump to
|
|
|
|
next instruction */
|
2018-08-20 18:24:31 +08:00
|
|
|
arm_skip_unless(s, cond);
|
2007-11-11 08:04:49 +08:00
|
|
|
}
|
2019-09-05 03:29:52 +08:00
|
|
|
|
2020-02-25 06:22:27 +08:00
|
|
|
/* TODO: Perhaps merge these into one decodetree output file. */
|
|
|
|
if (disas_a32(s, insn) ||
|
|
|
|
disas_vfp(s, insn)) {
|
2019-09-05 03:29:52 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* fall back to legacy decoder */
|
2020-08-03 19:18:46 +08:00
|
|
|
/* TODO: convert xscale/iwmmxt decoder to decodetree ?? */
|
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
|
|
|
|
if (((insn & 0x0c000e00) == 0x0c000000)
|
|
|
|
&& ((insn & 0x03000000) != 0x03000000)) {
|
|
|
|
/* Coprocessor insn, coprocessor 0 or 1 */
|
2020-08-03 19:18:43 +08:00
|
|
|
disas_xscale_insn(s, insn);
|
2020-08-03 19:18:46 +08:00
|
|
|
return;
|
2020-08-03 19:18:43 +08:00
|
|
|
}
|
2007-11-11 08:04:49 +08:00
|
|
|
}
|
2020-08-03 19:18:46 +08:00
|
|
|
|
|
|
|
illegal_op:
|
|
|
|
unallocated_encoding(s);
|
2007-11-11 08:04:49 +08:00
|
|
|
}
|
|
|
|
|
2019-08-15 16:46:42 +08:00
|
|
|
static bool thumb_insn_is_16bit(DisasContext *s, uint32_t pc, uint32_t insn)
|
2017-10-09 21:48:36 +08:00
|
|
|
{
|
2019-08-15 16:46:42 +08:00
|
|
|
/*
|
|
|
|
* Return true if this is a 16 bit instruction. We must be precise
|
|
|
|
* about this (matching the decode).
|
2017-10-09 21:48:36 +08:00
|
|
|
*/
|
|
|
|
if ((insn >> 11) < 0x1d) {
|
|
|
|
/* Definitely a 16-bit instruction */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
|
|
|
|
* first half of a 32-bit Thumb insn. Thumb-1 cores might
|
|
|
|
* end up actually treating this as two 16-bit insns, though,
|
|
|
|
* if it's half of a bl/blx pair that might span a page boundary.
|
|
|
|
*/
|
2018-06-15 21:57:16 +08:00
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_THUMB2) ||
|
|
|
|
arm_dc_feature(s, ARM_FEATURE_M)) {
|
2017-10-09 21:48:36 +08:00
|
|
|
/* Thumb2 cores (including all M profile ones) always treat
|
|
|
|
* 32-bit insns as 32-bit.
|
|
|
|
*/
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-08-15 16:46:42 +08:00
|
|
|
if ((insn >> 11) == 0x1e && pc - s->page_start < TARGET_PAGE_SIZE - 3) {
|
2017-10-09 21:48:36 +08:00
|
|
|
/* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
|
|
|
|
* is not on the next page; we merge this into a 32-bit
|
|
|
|
* insn.
|
|
|
|
*/
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
/* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
|
|
|
|
* 0b1111_1xxx_xxxx_xxxx : BL suffix;
|
|
|
|
* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
|
|
|
|
* -- handle as single 16 bit insn
|
|
|
|
*/
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-01-11 21:25:40 +08:00
|
|
|
/* Translate a 32-bit thumb instruction. */
|
|
|
|
static void disas_thumb2_insn(DisasContext *s, uint32_t insn)
|
2007-11-11 08:04:49 +08:00
|
|
|
{
|
2018-06-15 21:57:16 +08:00
|
|
|
/*
|
|
|
|
* ARMv6-M supports a limited subset of Thumb2 instructions.
|
|
|
|
* Other Thumb1 architectures allow only 32-bit
|
|
|
|
* combined BL/BLX prefix and suffix.
|
2017-10-09 21:48:36 +08:00
|
|
|
*/
|
2018-06-15 21:57:16 +08:00
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_M) &&
|
|
|
|
!arm_dc_feature(s, ARM_FEATURE_V7)) {
|
|
|
|
int i;
|
|
|
|
bool found = false;
|
2018-06-22 20:28:34 +08:00
|
|
|
static const uint32_t armv6m_insn[] = {0xf3808000 /* msr */,
|
|
|
|
0xf3b08040 /* dsb */,
|
|
|
|
0xf3b08050 /* dmb */,
|
|
|
|
0xf3b08060 /* isb */,
|
|
|
|
0xf3e08000 /* mrs */,
|
|
|
|
0xf000d000 /* bl */};
|
|
|
|
static const uint32_t armv6m_mask[] = {0xffe0d000,
|
|
|
|
0xfff0d0f0,
|
|
|
|
0xfff0d0f0,
|
|
|
|
0xfff0d0f0,
|
|
|
|
0xffe0d000,
|
|
|
|
0xf800d000};
|
2018-06-15 21:57:16 +08:00
|
|
|
|
|
|
|
for (i = 0; i < ARRAY_SIZE(armv6m_insn); i++) {
|
|
|
|
if ((insn & armv6m_mask[i]) == armv6m_insn[i]) {
|
|
|
|
found = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (!found) {
|
|
|
|
goto illegal_op;
|
|
|
|
}
|
|
|
|
} else if ((insn & 0xf800e800) != 0xf000e800) {
|
2020-08-03 19:18:49 +08:00
|
|
|
if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
|
|
|
|
unallocated_encoding(s);
|
|
|
|
return;
|
|
|
|
}
|
2007-11-11 08:04:49 +08:00
|
|
|
}
|
|
|
|
|
2020-08-03 19:18:47 +08:00
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
/*
|
|
|
|
* NOCP takes precedence over any UNDEF for (almost) the
|
|
|
|
* entire wide range of coprocessor-space encodings, so check
|
|
|
|
* for it first before proceeding to actually decode eg VFP
|
|
|
|
* insns. This decode also handles the few insns which are
|
|
|
|
* in copro space but do not have NOCP checks (eg VLLDM, VLSTM).
|
|
|
|
*/
|
|
|
|
if (disas_m_nocp(s, insn)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-01 02:09:30 +08:00
|
|
|
if ((insn & 0xef000000) == 0xef000000) {
|
|
|
|
/*
|
|
|
|
* T32 encodings 0b111p_1111_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
|
|
|
|
* transform into
|
|
|
|
* A32 encodings 0b1111_001p_qqqq_qqqq_qqqq_qqqq_qqqq_qqqq
|
|
|
|
*/
|
|
|
|
uint32_t a32_insn = (insn & 0xe2ffffff) |
|
|
|
|
((insn & (1 << 28)) >> 4) | (1 << 28);
|
|
|
|
|
|
|
|
if (disas_neon_dp(s, a32_insn)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((insn & 0xff100000) == 0xf9000000) {
|
|
|
|
/*
|
|
|
|
* T32 encodings 0b1111_1001_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
|
|
|
|
* transform into
|
|
|
|
* A32 encodings 0b1111_0100_ppp0_qqqq_qqqq_qqqq_qqqq_qqqq
|
|
|
|
*/
|
|
|
|
uint32_t a32_insn = (insn & 0x00ffffff) | 0xf4000000;
|
|
|
|
|
|
|
|
if (disas_neon_ls(s, a32_insn)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-25 06:22:27 +08:00
|
|
|
/*
|
|
|
|
* TODO: Perhaps merge these into one decodetree output file.
|
|
|
|
* Note disas_vfp is written for a32 with cond field in the
|
|
|
|
* top nibble. The t32 encoding requires 0xe in the top nibble.
|
|
|
|
*/
|
|
|
|
if (disas_t32(s, insn) ||
|
|
|
|
disas_vfp_uncond(s, insn) ||
|
2020-05-01 02:09:30 +08:00
|
|
|
disas_neon_shared(s, insn) ||
|
2020-02-25 06:22:27 +08:00
|
|
|
((insn >> 28) == 0xe && disas_vfp(s, insn))) {
|
2019-09-05 03:29:52 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-08-03 19:18:48 +08:00
|
|
|
illegal_op:
|
|
|
|
unallocated_encoding(s);
|
2003-10-01 04:34:21 +08:00
|
|
|
}
|
|
|
|
|
2017-10-09 21:48:36 +08:00
|
|
|
static void disas_thumb_insn(DisasContext *s, uint32_t insn)
|
2005-02-01 04:45:13 +08:00
|
|
|
{
|
2019-09-05 03:30:58 +08:00
|
|
|
if (!disas_t16(s, insn)) {
|
|
|
|
unallocated_encoding(s);
|
2005-02-01 04:45:13 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-27 20:00:50 +08:00
|
|
|
static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
|
|
|
|
{
|
2019-08-15 16:46:44 +08:00
|
|
|
/* Return true if the insn at dc->base.pc_next might cross a page boundary.
|
2015-10-27 20:00:50 +08:00
|
|
|
* (False positives are OK, false negatives are not.)
|
2017-10-09 21:48:37 +08:00
|
|
|
* We know this is a Thumb insn, and our caller ensures we are
|
2019-08-15 16:46:44 +08:00
|
|
|
* only called if dc->base.pc_next is less than 4 bytes from the page
|
2017-10-09 21:48:37 +08:00
|
|
|
* boundary, so we cross the page if the first 16 bits indicate
|
|
|
|
* that this is a 32 bit insn.
|
2015-10-27 20:00:50 +08:00
|
|
|
*/
|
2019-08-15 16:46:44 +08:00
|
|
|
uint16_t insn = arm_lduw_code(env, s->base.pc_next, s->sctlr_b);
|
2015-10-27 20:00:50 +08:00
|
|
|
|
2019-08-15 16:46:44 +08:00
|
|
|
return !thumb_insn_is_16bit(s, s->base.pc_next, insn);
|
2015-10-27 20:00:50 +08:00
|
|
|
}
|
|
|
|
|
2018-02-20 09:51:58 +08:00
|
|
|
static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
|
2003-10-01 04:34:21 +08:00
|
|
|
{
|
2017-07-14 17:06:02 +08:00
|
|
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
2017-07-14 16:17:35 +08:00
|
|
|
CPUARMState *env = cs->env_ptr;
|
2019-03-23 08:41:14 +08:00
|
|
|
ARMCPU *cpu = env_archcpu(env);
|
2019-01-07 23:23:45 +08:00
|
|
|
uint32_t tb_flags = dc->base.tb->flags;
|
|
|
|
uint32_t condexec, core_mmu_idx;
|
2007-09-17 16:09:54 +08:00
|
|
|
|
2018-10-24 14:50:16 +08:00
|
|
|
dc->isar = &cpu->isar;
|
2005-04-27 04:36:11 +08:00
|
|
|
dc->condjmp = 0;
|
2013-09-04 03:12:09 +08:00
|
|
|
|
2013-12-18 03:42:31 +08:00
|
|
|
dc->aarch64 = 0;
|
2015-09-09 00:38:44 +08:00
|
|
|
/* If we are coming from secure EL0 in a system with a 32-bit EL3, then
|
|
|
|
* there is no secure EL1, so we route exceptions to EL3.
|
|
|
|
*/
|
|
|
|
dc->secure_routed_to_el3 = arm_feature(env, ARM_FEATURE_EL3) &&
|
|
|
|
!arm_el_is_aa64(env, 3);
|
2020-02-07 22:04:23 +08:00
|
|
|
dc->thumb = FIELD_EX32(tb_flags, TBFLAG_AM32, THUMB);
|
2019-01-07 23:23:45 +08:00
|
|
|
dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
|
2020-02-07 22:04:23 +08:00
|
|
|
condexec = FIELD_EX32(tb_flags, TBFLAG_AM32, CONDEXEC);
|
2019-01-07 23:23:45 +08:00
|
|
|
dc->condexec_mask = (condexec & 0xf) << 1;
|
|
|
|
dc->condexec_cond = condexec >> 4;
|
2020-02-07 22:04:23 +08:00
|
|
|
|
2019-01-07 23:23:45 +08:00
|
|
|
core_mmu_idx = FIELD_EX32(tb_flags, TBFLAG_ANY, MMUIDX);
|
|
|
|
dc->mmu_idx = core_to_arm_mmu_idx(env, core_mmu_idx);
|
2015-02-05 21:37:23 +08:00
|
|
|
dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
|
2013-09-04 03:12:09 +08:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
2015-02-05 21:37:23 +08:00
|
|
|
dc->user = (dc->current_el == 0);
|
2013-09-04 03:12:09 +08:00
|
|
|
#endif
|
2019-01-07 23:23:45 +08:00
|
|
|
dc->fp_excp_el = FIELD_EX32(tb_flags, TBFLAG_ANY, FPEXC_EL);
|
2020-02-07 22:04:23 +08:00
|
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
|
|
|
dc->vfp_enabled = 1;
|
|
|
|
dc->be_data = MO_TE;
|
|
|
|
dc->v7m_handler_mode = FIELD_EX32(tb_flags, TBFLAG_M32, HANDLER);
|
|
|
|
dc->v8m_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
|
|
|
|
regime_is_secure(env, dc->mmu_idx);
|
|
|
|
dc->v8m_stackcheck = FIELD_EX32(tb_flags, TBFLAG_M32, STACKCHECK);
|
|
|
|
dc->v8m_fpccr_s_wrong =
|
|
|
|
FIELD_EX32(tb_flags, TBFLAG_M32, FPCCR_S_WRONG);
|
|
|
|
dc->v7m_new_fp_ctxt_needed =
|
|
|
|
FIELD_EX32(tb_flags, TBFLAG_M32, NEW_FP_CTXT_NEEDED);
|
|
|
|
dc->v7m_lspact = FIELD_EX32(tb_flags, TBFLAG_M32, LSPACT);
|
2019-04-30 00:36:01 +08:00
|
|
|
} else {
|
2020-02-07 22:04:23 +08:00
|
|
|
dc->be_data =
|
|
|
|
FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE;
|
|
|
|
dc->debug_target_el =
|
|
|
|
FIELD_EX32(tb_flags, TBFLAG_ANY, DEBUG_TARGET_EL);
|
|
|
|
dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B);
|
|
|
|
dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE);
|
|
|
|
dc->ns = FIELD_EX32(tb_flags, TBFLAG_A32, NS);
|
|
|
|
dc->vfp_enabled = FIELD_EX32(tb_flags, TBFLAG_A32, VFPEN);
|
|
|
|
if (arm_feature(env, ARM_FEATURE_XSCALE)) {
|
|
|
|
dc->c15_cpar = FIELD_EX32(tb_flags, TBFLAG_A32, XSCALE_CPAR);
|
|
|
|
} else {
|
|
|
|
dc->vec_len = FIELD_EX32(tb_flags, TBFLAG_A32, VECLEN);
|
|
|
|
dc->vec_stride = FIELD_EX32(tb_flags, TBFLAG_A32, VECSTRIDE);
|
|
|
|
}
|
|
|
|
}
|
2014-01-05 06:15:44 +08:00
|
|
|
dc->cp_regs = cpu->cp_regs;
|
2014-03-18 00:31:47 +08:00
|
|
|
dc->features = env->features;
|
2013-12-18 03:42:31 +08:00
|
|
|
|
2014-08-20 01:56:27 +08:00
|
|
|
/* Single step state. The code-generation logic here is:
|
|
|
|
* SS_ACTIVE == 0:
|
|
|
|
* generate code with no special handling for single-stepping (except
|
|
|
|
* that anything that can make us go to SS_ACTIVE == 1 must end the TB;
|
|
|
|
* this happens anyway because those changes are all system register or
|
|
|
|
* PSTATE writes).
|
|
|
|
* SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
|
|
|
|
* emit code for one insn
|
|
|
|
* emit code to clear PSTATE.SS
|
|
|
|
* emit code to generate software step exception for completed step
|
|
|
|
* end TB (as usual for having generated an exception)
|
|
|
|
* SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
|
|
|
|
* emit code to generate a software step exception
|
|
|
|
* end the TB
|
|
|
|
*/
|
2019-01-07 23:23:45 +08:00
|
|
|
dc->ss_active = FIELD_EX32(tb_flags, TBFLAG_ANY, SS_ACTIVE);
|
|
|
|
dc->pstate_ss = FIELD_EX32(tb_flags, TBFLAG_ANY, PSTATE_SS);
|
2014-08-20 01:56:27 +08:00
|
|
|
dc->is_ldex = false;
|
|
|
|
|
2018-04-10 23:09:52 +08:00
|
|
|
dc->page_start = dc->base.pc_first & TARGET_PAGE_MASK;
|
2017-07-14 17:06:02 +08:00
|
|
|
|
2017-07-15 05:56:47 +08:00
|
|
|
/* If architectural single step active, limit to 1. */
|
|
|
|
if (is_singlestepping(dc)) {
|
2018-02-20 09:51:58 +08:00
|
|
|
dc->base.max_insns = 1;
|
2017-07-15 05:56:47 +08:00
|
|
|
}
|
|
|
|
|
2017-07-15 06:51:15 +08:00
|
|
|
/* ARM is a fixed-length ISA. Bound the number of insns to execute
|
|
|
|
to those left on the page. */
|
|
|
|
if (!dc->thumb) {
|
2018-04-10 23:09:52 +08:00
|
|
|
int bound = -(dc->base.pc_first | TARGET_PAGE_MASK) / 4;
|
2018-02-20 09:51:58 +08:00
|
|
|
dc->base.max_insns = MIN(dc->base.max_insns, bound);
|
2017-07-15 06:51:15 +08:00
|
|
|
}
|
|
|
|
|
2019-06-14 00:39:17 +08:00
|
|
|
cpu_V0 = tcg_temp_new_i64();
|
|
|
|
cpu_V1 = tcg_temp_new_i64();
|
2008-11-17 22:43:54 +08:00
|
|
|
cpu_M0 = tcg_temp_new_i64();
|
2017-07-14 17:06:02 +08:00
|
|
|
}
|
|
|
|
|
2017-07-14 17:14:07 +08:00
|
|
|
static void arm_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu)
|
|
|
|
{
|
|
|
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
|
|
|
|
|
|
|
/* A note on handling of the condexec (IT) bits:
|
|
|
|
*
|
|
|
|
* We want to avoid the overhead of having to write the updated condexec
|
|
|
|
* bits back to the CPUARMState for every instruction in an IT block. So:
|
|
|
|
* (1) if the condexec bits are not already zero then we write
|
|
|
|
* zero back into the CPUARMState now. This avoids complications trying
|
|
|
|
* to do it at the end of the block. (For example if we don't do this
|
|
|
|
* it's hard to identify whether we can safely skip writing condexec
|
|
|
|
* at the end of the TB, which we definitely want to do for the case
|
|
|
|
* where a TB doesn't do anything with the IT state at all.)
|
|
|
|
* (2) if we are going to leave the TB then we call gen_set_condexec()
|
|
|
|
* which will write the correct value into CPUARMState if zero is wrong.
|
|
|
|
* This is done both for leaving the TB at the end, and for leaving
|
|
|
|
* it because of an exception we know will happen, which is done in
|
|
|
|
* gen_exception_insn(). The latter is necessary because we need to
|
|
|
|
* leave the TB with the PC/IT state just prior to execution of the
|
|
|
|
* instruction which caused the exception.
|
|
|
|
* (3) if we leave the TB unexpectedly (eg a data abort on a load)
|
|
|
|
* then the CPUARMState will be wrong and we need to reset it.
|
|
|
|
* This is handled in the same way as restoration of the
|
|
|
|
* PC in these situations; we save the value of the condexec bits
|
|
|
|
* for each PC via tcg_gen_insn_start(), and restore_state_to_opc()
|
|
|
|
* then uses this to restore them after an exception.
|
|
|
|
*
|
|
|
|
* Note that there are no instructions which can read the condexec
|
|
|
|
* bits, and none which can write non-static values to them, so
|
|
|
|
* we don't need to care about whether CPUARMState is correct in the
|
|
|
|
* middle of a TB.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Reset the conditional execution bits immediately. This avoids
|
|
|
|
complications trying to do it at the end of the block. */
|
|
|
|
if (dc->condexec_mask || dc->condexec_cond) {
|
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
|
|
|
tcg_gen_movi_i32(tmp, 0);
|
|
|
|
store_cpu_field(tmp, condexec_bits);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-14 17:18:09 +08:00
|
|
|
static void arm_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu)
|
|
|
|
{
|
|
|
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
|
|
|
|
2019-08-15 16:46:44 +08:00
|
|
|
tcg_gen_insn_start(dc->base.pc_next,
|
2017-07-14 17:18:09 +08:00
|
|
|
(dc->condexec_cond << 4) | (dc->condexec_mask >> 1),
|
|
|
|
0);
|
2017-11-02 22:19:14 +08:00
|
|
|
dc->insn_start = tcg_last_op();
|
2017-07-14 17:18:09 +08:00
|
|
|
}
|
|
|
|
|
2017-07-14 17:22:12 +08:00
|
|
|
static bool arm_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu,
|
|
|
|
const CPUBreakpoint *bp)
|
|
|
|
{
|
|
|
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
|
|
|
|
|
|
|
if (bp->flags & BP_CPU) {
|
|
|
|
gen_set_condexec(dc);
|
2019-08-15 16:46:44 +08:00
|
|
|
gen_set_pc_im(dc, dc->base.pc_next);
|
2017-07-14 17:22:12 +08:00
|
|
|
gen_helper_check_breakpoints(cpu_env);
|
|
|
|
/* End the TB early; it's likely not going to be executed */
|
|
|
|
dc->base.is_jmp = DISAS_TOO_MANY;
|
|
|
|
} else {
|
2019-08-15 16:46:44 +08:00
|
|
|
gen_exception_internal_insn(dc, dc->base.pc_next, EXCP_DEBUG);
|
2017-07-14 17:22:12 +08:00
|
|
|
/* The address covered by the breakpoint must be
|
|
|
|
included in [tb->pc, tb->pc + tb->size) in order
|
|
|
|
to for it to be properly cleared -- thus we
|
|
|
|
increment the PC here so that the logic setting
|
|
|
|
tb->size below does the right thing. */
|
|
|
|
/* TODO: Advance PC by correct instruction length to
|
|
|
|
* avoid disassembler error messages */
|
2019-08-15 16:46:44 +08:00
|
|
|
dc->base.pc_next += 2;
|
2017-07-14 17:22:12 +08:00
|
|
|
dc->base.is_jmp = DISAS_NORETURN;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-07-15 06:29:07 +08:00
|
|
|
static bool arm_pre_translate_insn(DisasContext *dc)
|
2017-07-14 17:34:18 +08:00
|
|
|
{
|
|
|
|
#ifdef CONFIG_USER_ONLY
|
|
|
|
/* Intercept jump to the magic kernel page. */
|
2019-08-15 16:46:44 +08:00
|
|
|
if (dc->base.pc_next >= 0xffff0000) {
|
2017-07-14 17:34:18 +08:00
|
|
|
/* We always get here via a jump, so know we are not in a
|
|
|
|
conditional execution block. */
|
|
|
|
gen_exception_internal(EXCP_KERNEL_TRAP);
|
|
|
|
dc->base.is_jmp = DISAS_NORETURN;
|
2017-07-15 06:29:07 +08:00
|
|
|
return true;
|
2017-07-14 17:34:18 +08:00
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
if (dc->ss_active && !dc->pstate_ss) {
|
|
|
|
/* Singlestep state is Active-pending.
|
|
|
|
* If we're in this state at the start of a TB then either
|
|
|
|
* a) we just took an exception to an EL which is being debugged
|
|
|
|
* and this is the first insn in the exception handler
|
|
|
|
* b) debug exceptions were masked and we just unmasked them
|
|
|
|
* without changing EL (eg by clearing PSTATE.D)
|
|
|
|
* In either case we're going to take a swstep exception in the
|
|
|
|
* "did not step an insn" case, and so the syndrome ISV and EX
|
|
|
|
* bits should be zero.
|
|
|
|
*/
|
|
|
|
assert(dc->base.num_insns == 1);
|
2019-08-15 16:46:42 +08:00
|
|
|
gen_swstep_exception(dc, 0, 0);
|
2017-07-14 17:34:18 +08:00
|
|
|
dc->base.is_jmp = DISAS_NORETURN;
|
2017-07-15 06:29:07 +08:00
|
|
|
return true;
|
2017-07-14 17:34:18 +08:00
|
|
|
}
|
|
|
|
|
2017-07-15 06:29:07 +08:00
|
|
|
return false;
|
|
|
|
}
|
2017-07-14 17:34:18 +08:00
|
|
|
|
2017-07-15 06:51:15 +08:00
|
|
|
static void arm_post_translate_insn(DisasContext *dc)
|
2017-07-15 06:29:07 +08:00
|
|
|
{
|
2017-07-14 17:34:18 +08:00
|
|
|
if (dc->condjmp && !dc->base.is_jmp) {
|
|
|
|
gen_set_label(dc->condlabel);
|
|
|
|
dc->condjmp = 0;
|
|
|
|
}
|
2017-07-14 17:58:33 +08:00
|
|
|
translator_loop_temp_check(&dc->base);
|
2017-07-14 17:34:18 +08:00
|
|
|
}
|
|
|
|
|
2017-07-15 06:29:07 +08:00
|
|
|
static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
|
|
|
{
|
|
|
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
|
|
|
CPUARMState *env = cpu->env_ptr;
|
|
|
|
unsigned int insn;
|
|
|
|
|
|
|
|
if (arm_pre_translate_insn(dc)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-08-15 16:46:44 +08:00
|
|
|
dc->pc_curr = dc->base.pc_next;
|
|
|
|
insn = arm_ldl_code(env, dc->base.pc_next, dc->sctlr_b);
|
2017-10-31 19:50:50 +08:00
|
|
|
dc->insn = insn;
|
2019-08-15 16:46:44 +08:00
|
|
|
dc->base.pc_next += 4;
|
2017-07-15 06:29:07 +08:00
|
|
|
disas_arm_insn(dc, insn);
|
|
|
|
|
2017-07-15 06:51:15 +08:00
|
|
|
arm_post_translate_insn(dc);
|
|
|
|
|
|
|
|
/* ARM is a fixed-length ISA. We performed the cross-page check
|
|
|
|
in init_disas_context by adjusting max_insns. */
|
2017-07-15 06:29:07 +08:00
|
|
|
}
|
|
|
|
|
2017-10-09 21:48:38 +08:00
|
|
|
static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
|
|
|
|
{
|
|
|
|
/* Return true if this Thumb insn is always unconditional,
|
|
|
|
* even inside an IT block. This is true of only a very few
|
|
|
|
* instructions: BKPT, HLT, and SG.
|
|
|
|
*
|
|
|
|
* A larger class of instructions are UNPREDICTABLE if used
|
|
|
|
* inside an IT block; we do not need to detect those here, because
|
|
|
|
* what we do by default (perform the cc check and update the IT
|
|
|
|
* bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
|
|
|
|
* choice for those situations.
|
|
|
|
*
|
|
|
|
* insn is either a 16-bit or a 32-bit instruction; the two are
|
|
|
|
* distinguishable because for the 16-bit case the top 16 bits
|
|
|
|
* are zeroes, and that isn't a valid 32-bit encoding.
|
|
|
|
*/
|
|
|
|
if ((insn & 0xffffff00) == 0xbe00) {
|
|
|
|
/* BKPT */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
|
|
|
|
!arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
/* HLT: v8A only. This is unconditional even when it is going to
|
|
|
|
* UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
|
|
|
|
* For v7 cores this was a plain old undefined encoding and so
|
|
|
|
* honours its cc check. (We might be using the encoding as
|
|
|
|
* a semihosting trap, but we don't change the cc check behaviour
|
|
|
|
* on that account, because a debugger connected to a real v7A
|
|
|
|
* core and emulating semihosting traps by catching the UNDEF
|
|
|
|
* exception would also only see cases where the cc check passed.
|
|
|
|
* No guest code should be trying to do a HLT semihosting trap
|
|
|
|
* in an IT block anyway.
|
|
|
|
*/
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
|
|
|
|
arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
/* SG: v8M only */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-07-15 06:29:07 +08:00
|
|
|
static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
|
|
|
{
|
|
|
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
|
|
|
CPUARMState *env = cpu->env_ptr;
|
2017-10-09 21:48:36 +08:00
|
|
|
uint32_t insn;
|
|
|
|
bool is_16bit;
|
2017-07-15 06:29:07 +08:00
|
|
|
|
|
|
|
if (arm_pre_translate_insn(dc)) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-08-15 16:46:44 +08:00
|
|
|
dc->pc_curr = dc->base.pc_next;
|
|
|
|
insn = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
|
|
|
|
is_16bit = thumb_insn_is_16bit(dc, dc->base.pc_next, insn);
|
|
|
|
dc->base.pc_next += 2;
|
2017-10-09 21:48:36 +08:00
|
|
|
if (!is_16bit) {
|
2019-08-15 16:46:44 +08:00
|
|
|
uint32_t insn2 = arm_lduw_code(env, dc->base.pc_next, dc->sctlr_b);
|
2017-10-09 21:48:36 +08:00
|
|
|
|
|
|
|
insn = insn << 16 | insn2;
|
2019-08-15 16:46:44 +08:00
|
|
|
dc->base.pc_next += 2;
|
2017-10-09 21:48:36 +08:00
|
|
|
}
|
2017-10-31 19:50:50 +08:00
|
|
|
dc->insn = insn;
|
2017-10-09 21:48:36 +08:00
|
|
|
|
2017-10-09 21:48:38 +08:00
|
|
|
if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
|
2017-10-09 21:48:36 +08:00
|
|
|
uint32_t cond = dc->condexec_cond;
|
|
|
|
|
2019-07-05 00:14:44 +08:00
|
|
|
/*
|
|
|
|
* Conditionally skip the insn. Note that both 0xe and 0xf mean
|
|
|
|
* "always"; 0xf is not "never".
|
|
|
|
*/
|
|
|
|
if (cond < 0x0e) {
|
2018-08-20 18:24:31 +08:00
|
|
|
arm_skip_unless(dc, cond);
|
2017-10-09 21:48:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (is_16bit) {
|
|
|
|
disas_thumb_insn(dc, insn);
|
|
|
|
} else {
|
2018-01-11 21:25:40 +08:00
|
|
|
disas_thumb2_insn(dc, insn);
|
2017-10-09 21:48:36 +08:00
|
|
|
}
|
2017-07-15 06:29:07 +08:00
|
|
|
|
|
|
|
/* Advance the Thumb condexec condition. */
|
|
|
|
if (dc->condexec_mask) {
|
|
|
|
dc->condexec_cond = ((dc->condexec_cond & 0xe) |
|
|
|
|
((dc->condexec_mask >> 4) & 1));
|
|
|
|
dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
|
|
|
|
if (dc->condexec_mask == 0) {
|
|
|
|
dc->condexec_cond = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-15 06:51:15 +08:00
|
|
|
arm_post_translate_insn(dc);
|
|
|
|
|
|
|
|
/* Thumb is a variable-length ISA. Stop translation when the next insn
|
|
|
|
* will touch a new page. This ensures that prefetch aborts occur at
|
|
|
|
* the right place.
|
|
|
|
*
|
|
|
|
* We want to stop the TB if the next insn starts in a new page,
|
|
|
|
* or if it spans between this page and the next. This means that
|
|
|
|
* if we're looking at the last halfword in the page we need to
|
|
|
|
* see if it's a 16-bit Thumb insn (which will fit in this TB)
|
|
|
|
* or a 32-bit Thumb insn (which won't).
|
|
|
|
* This is to avoid generating a silly TB with a single 16-bit insn
|
|
|
|
* in it at the end of this page (which would execute correctly
|
|
|
|
* but isn't very efficient).
|
|
|
|
*/
|
|
|
|
if (dc->base.is_jmp == DISAS_NEXT
|
2019-08-15 16:46:44 +08:00
|
|
|
&& (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE
|
|
|
|
|| (dc->base.pc_next - dc->page_start >= TARGET_PAGE_SIZE - 3
|
2017-07-15 06:51:15 +08:00
|
|
|
&& insn_crosses_page(env, dc)))) {
|
|
|
|
dc->base.is_jmp = DISAS_TOO_MANY;
|
|
|
|
}
|
2017-07-15 06:29:07 +08:00
|
|
|
}
|
|
|
|
|
2017-07-14 17:42:23 +08:00
|
|
|
static void arm_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
|
2017-07-14 17:06:02 +08:00
|
|
|
{
|
2017-07-14 17:42:23 +08:00
|
|
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
2008-06-29 09:03:05 +08:00
|
|
|
|
2017-07-19 08:46:52 +08:00
|
|
|
if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
|
2017-07-14 17:42:23 +08:00
|
|
|
/* FIXME: This can theoretically happen with self-modifying code. */
|
|
|
|
cpu_abort(cpu, "IO on conditional branch instruction");
|
2008-06-29 09:03:05 +08:00
|
|
|
}
|
2007-11-11 08:04:49 +08:00
|
|
|
|
2005-11-26 18:38:39 +08:00
|
|
|
/* At this stage dc->condjmp will only be set when the skipped
|
2007-11-11 08:04:49 +08:00
|
|
|
instruction was a conditional branch or trap, and the PC has
|
|
|
|
already been written. */
|
2017-04-21 00:32:30 +08:00
|
|
|
gen_set_condexec(dc);
|
2017-07-14 17:01:59 +08:00
|
|
|
if (dc->base.is_jmp == DISAS_BX_EXCRET) {
|
arm: Implement M profile exception return properly
On M profile, return from exceptions happen when code in Handler mode
executes one of the following function call return instructions:
* POP or LDM which loads the PC
* LDR to PC
* BX register
and the new PC value is 0xFFxxxxxx.
QEMU tries to implement this by not treating the instruction
specially but then catching the attempt to execute from the magic
address value. This is not ideal, because:
* there are guest visible differences from the architecturally
specified behaviour (for instance jumping to 0xFFxxxxxx via a
different instruction should not cause an exception return but it
will in the QEMU implementation)
* we have to account for it in various places (like refusing to take
an interrupt if the PC is at a magic value, and making sure that
the MPU doesn't deny execution at the magic value addresses)
Drop these hacks, and instead implement exception return the way the
architecture specifies -- by having the relevant instructions check
for the magic value and raise the 'do an exception return' QEMU
internal exception immediately.
The effect on the generated code is minor:
bx lr, old code (and new code for Thread mode):
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
exit_tb $0x0
set_label $L0
exit_tb $0x7f2aabd61993
x86_64 generated code:
0x7f2aabe87019: mov %ebx,%ebp
0x7f2aabe8701b: and $0xfffffffffffffffe,%ebp
0x7f2aabe8701e: mov %ebp,0x3c(%r14)
0x7f2aabe87022: and $0x1,%ebx
0x7f2aabe87025: mov %ebx,0x218(%r14)
0x7f2aabe8702c: xor %eax,%eax
0x7f2aabe8702e: jmpq 0x7f2aabe7c016
bx lr, new code when in Handler mode:
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
movi_i32 tmp5,$0xffffffffff000000
brcond_i32 pc,tmp5,geu,$L1
exit_tb $0x0
set_label $L1
movi_i32 tmp5,$0x8
call exception_internal,$0x0,$0,env,tmp5
x86_64 generated code:
0x7fe8fa1264e3: mov %ebp,%ebx
0x7fe8fa1264e5: and $0xfffffffffffffffe,%ebx
0x7fe8fa1264e8: mov %ebx,0x3c(%r14)
0x7fe8fa1264ec: and $0x1,%ebp
0x7fe8fa1264ef: mov %ebp,0x218(%r14)
0x7fe8fa1264f6: cmp $0xff000000,%ebx
0x7fe8fa1264fc: jae 0x7fe8fa126509
0x7fe8fa126502: xor %eax,%eax
0x7fe8fa126504: jmpq 0x7fe8fa122016
0x7fe8fa126509: mov %r14,%rdi
0x7fe8fa12650c: mov $0x8,%esi
0x7fe8fa126511: mov $0x56095dbeccf5,%r10
0x7fe8fa12651b: callq *%r10
which is a difference of one cmp/branch-not-taken. This will
be lost in the noise of having to exit generated code and
look up the next TB anyway.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 1491844419-12485-9-git-send-email-peter.maydell@linaro.org
2017-04-21 00:32:31 +08:00
|
|
|
/* Exception return branches need some special case code at the
|
|
|
|
* end of the TB, which is complex enough that it has to
|
|
|
|
* handle the single-step vs not and the condition-failed
|
|
|
|
* insn codepath itself.
|
|
|
|
*/
|
|
|
|
gen_bx_excret_final_code(dc);
|
|
|
|
} else if (unlikely(is_singlestepping(dc))) {
|
2015-12-17 21:37:13 +08:00
|
|
|
/* Unconditional and "condition passed" instruction codepath. */
|
2017-07-14 17:01:59 +08:00
|
|
|
switch (dc->base.is_jmp) {
|
2015-12-17 21:37:13 +08:00
|
|
|
case DISAS_SWI:
|
2014-08-20 01:56:27 +08:00
|
|
|
gen_ss_advance(dc);
|
2015-05-29 18:28:50 +08:00
|
|
|
gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
|
|
|
|
default_exception_el(dc));
|
2015-12-17 21:37:13 +08:00
|
|
|
break;
|
|
|
|
case DISAS_HVC:
|
2014-10-24 19:19:13 +08:00
|
|
|
gen_ss_advance(dc);
|
2015-05-29 18:28:50 +08:00
|
|
|
gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
|
2015-12-17 21:37:13 +08:00
|
|
|
break;
|
|
|
|
case DISAS_SMC:
|
2014-10-24 19:19:13 +08:00
|
|
|
gen_ss_advance(dc);
|
2015-05-29 18:28:50 +08:00
|
|
|
gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
|
2015-12-17 21:37:13 +08:00
|
|
|
break;
|
|
|
|
case DISAS_NEXT:
|
2017-07-14 17:22:12 +08:00
|
|
|
case DISAS_TOO_MANY:
|
2020-06-26 11:31:03 +08:00
|
|
|
case DISAS_UPDATE_EXIT:
|
2020-06-26 11:31:04 +08:00
|
|
|
case DISAS_UPDATE_NOCHAIN:
|
2019-08-15 16:46:44 +08:00
|
|
|
gen_set_pc_im(dc, dc->base.pc_next);
|
2015-12-17 21:37:13 +08:00
|
|
|
/* fall through */
|
|
|
|
default:
|
2017-04-21 00:32:30 +08:00
|
|
|
/* FIXME: Single stepping a WFI insn will not halt the CPU. */
|
|
|
|
gen_singlestep_exception(dc);
|
2017-07-15 03:05:06 +08:00
|
|
|
break;
|
|
|
|
case DISAS_NORETURN:
|
|
|
|
break;
|
2015-12-17 21:37:13 +08:00
|
|
|
}
|
2005-04-24 02:27:52 +08:00
|
|
|
} else {
|
2007-11-11 08:04:49 +08:00
|
|
|
/* While branches must always occur at the end of an IT block,
|
|
|
|
there are a few other things that can cause us to terminate
|
2012-08-06 15:05:56 +08:00
|
|
|
the TB in the middle of an IT block:
|
2007-11-11 08:04:49 +08:00
|
|
|
- Exception generating instructions (bkpt, swi, undefined).
|
|
|
|
- Page boundaries.
|
|
|
|
- Hardware watchpoints.
|
|
|
|
Hardware breakpoints have already been handled and skip this code.
|
|
|
|
*/
|
2020-11-03 19:45:29 +08:00
|
|
|
switch (dc->base.is_jmp) {
|
2005-04-24 02:27:52 +08:00
|
|
|
case DISAS_NEXT:
|
2017-07-14 17:22:12 +08:00
|
|
|
case DISAS_TOO_MANY:
|
2019-08-15 16:46:44 +08:00
|
|
|
gen_goto_tb(dc, 1, dc->base.pc_next);
|
2005-04-24 02:27:52 +08:00
|
|
|
break;
|
2020-06-26 11:31:04 +08:00
|
|
|
case DISAS_UPDATE_NOCHAIN:
|
|
|
|
gen_set_pc_im(dc, dc->base.pc_next);
|
|
|
|
/* fall through */
|
2015-11-10 21:37:33 +08:00
|
|
|
case DISAS_JUMP:
|
2017-04-27 11:29:20 +08:00
|
|
|
gen_goto_ptr();
|
|
|
|
break;
|
2020-06-26 11:31:03 +08:00
|
|
|
case DISAS_UPDATE_EXIT:
|
2019-08-15 16:46:44 +08:00
|
|
|
gen_set_pc_im(dc, dc->base.pc_next);
|
2017-07-17 20:36:07 +08:00
|
|
|
/* fall through */
|
2015-11-10 21:37:33 +08:00
|
|
|
default:
|
2005-04-24 02:27:52 +08:00
|
|
|
/* indicate that the hash table must be used to find the next TB */
|
2018-05-31 09:06:23 +08:00
|
|
|
tcg_gen_exit_tb(NULL, 0);
|
2005-04-24 02:27:52 +08:00
|
|
|
break;
|
2017-07-15 03:05:06 +08:00
|
|
|
case DISAS_NORETURN:
|
2005-04-24 02:27:52 +08:00
|
|
|
/* nothing more to generate */
|
|
|
|
break;
|
2007-11-11 08:04:49 +08:00
|
|
|
case DISAS_WFI:
|
2017-10-31 19:50:50 +08:00
|
|
|
{
|
|
|
|
TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
|
|
|
|
!(dc->insn & (1U << 31))) ? 2 : 4);
|
|
|
|
|
|
|
|
gen_helper_wfi(cpu_env, tmp);
|
|
|
|
tcg_temp_free_i32(tmp);
|
2015-05-29 18:28:53 +08:00
|
|
|
/* The helper doesn't necessarily throw an exception, but we
|
|
|
|
* must go back to the main loop to check for interrupts anyway.
|
|
|
|
*/
|
2018-05-31 09:06:23 +08:00
|
|
|
tcg_gen_exit_tb(NULL, 0);
|
2007-11-11 08:04:49 +08:00
|
|
|
break;
|
2017-10-31 19:50:50 +08:00
|
|
|
}
|
2014-03-10 22:56:30 +08:00
|
|
|
case DISAS_WFE:
|
|
|
|
gen_helper_wfe(cpu_env);
|
|
|
|
break;
|
2015-07-06 17:05:44 +08:00
|
|
|
case DISAS_YIELD:
|
|
|
|
gen_helper_yield(cpu_env);
|
|
|
|
break;
|
2007-11-11 08:04:49 +08:00
|
|
|
case DISAS_SWI:
|
2015-05-29 18:28:50 +08:00
|
|
|
gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
|
|
|
|
default_exception_el(dc));
|
2007-11-11 08:04:49 +08:00
|
|
|
break;
|
2014-10-24 19:19:13 +08:00
|
|
|
case DISAS_HVC:
|
2015-05-29 18:28:50 +08:00
|
|
|
gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
|
2014-10-24 19:19:13 +08:00
|
|
|
break;
|
|
|
|
case DISAS_SMC:
|
2015-05-29 18:28:50 +08:00
|
|
|
gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
|
2014-10-24 19:19:13 +08:00
|
|
|
break;
|
2005-04-24 02:27:52 +08:00
|
|
|
}
|
2017-04-21 00:32:30 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (dc->condjmp) {
|
|
|
|
/* "Condition failed" instruction codepath for the branch/trap insn */
|
|
|
|
gen_set_label(dc->condlabel);
|
|
|
|
gen_set_condexec(dc);
|
2017-04-21 00:32:30 +08:00
|
|
|
if (unlikely(is_singlestepping(dc))) {
|
2019-08-15 16:46:44 +08:00
|
|
|
gen_set_pc_im(dc, dc->base.pc_next);
|
2017-04-21 00:32:30 +08:00
|
|
|
gen_singlestep_exception(dc);
|
|
|
|
} else {
|
2019-08-15 16:46:44 +08:00
|
|
|
gen_goto_tb(dc, 1, dc->base.pc_next);
|
2005-04-27 04:36:11 +08:00
|
|
|
}
|
2003-10-01 04:34:21 +08:00
|
|
|
}
|
2017-07-14 17:42:23 +08:00
|
|
|
}
|
|
|
|
|
2017-07-14 17:50:27 +08:00
|
|
|
static void arm_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
|
|
|
|
{
|
|
|
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
|
|
|
|
|
|
|
qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
|
2017-09-14 23:38:35 +08:00
|
|
|
log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
|
2017-07-14 17:50:27 +08:00
|
|
|
}
|
|
|
|
|
2017-07-14 17:58:33 +08:00
|
|
|
static const TranslatorOps arm_translator_ops = {
|
|
|
|
.init_disas_context = arm_tr_init_disas_context,
|
|
|
|
.tb_start = arm_tr_tb_start,
|
|
|
|
.insn_start = arm_tr_insn_start,
|
|
|
|
.breakpoint_check = arm_tr_breakpoint_check,
|
|
|
|
.translate_insn = arm_tr_translate_insn,
|
|
|
|
.tb_stop = arm_tr_tb_stop,
|
|
|
|
.disas_log = arm_tr_disas_log,
|
|
|
|
};
|
|
|
|
|
2017-07-15 06:29:07 +08:00
|
|
|
static const TranslatorOps thumb_translator_ops = {
|
|
|
|
.init_disas_context = arm_tr_init_disas_context,
|
|
|
|
.tb_start = arm_tr_tb_start,
|
|
|
|
.insn_start = arm_tr_insn_start,
|
|
|
|
.breakpoint_check = arm_tr_breakpoint_check,
|
|
|
|
.translate_insn = thumb_tr_translate_insn,
|
|
|
|
.tb_stop = arm_tr_tb_stop,
|
|
|
|
.disas_log = arm_tr_disas_log,
|
|
|
|
};
|
|
|
|
|
2017-07-14 17:42:23 +08:00
|
|
|
/* generate intermediate code for basic block 'tb'. */
|
2019-04-16 14:54:54 +08:00
|
|
|
void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns)
|
2017-07-14 17:42:23 +08:00
|
|
|
{
|
2020-02-07 22:04:23 +08:00
|
|
|
DisasContext dc = { };
|
2017-07-14 17:58:33 +08:00
|
|
|
const TranslatorOps *ops = &arm_translator_ops;
|
2017-07-14 17:42:23 +08:00
|
|
|
|
2020-02-07 22:04:23 +08:00
|
|
|
if (FIELD_EX32(tb->flags, TBFLAG_AM32, THUMB)) {
|
2017-07-15 06:29:07 +08:00
|
|
|
ops = &thumb_translator_ops;
|
|
|
|
}
|
2017-07-14 17:58:33 +08:00
|
|
|
#ifdef TARGET_AARCH64
|
2019-01-07 23:23:45 +08:00
|
|
|
if (FIELD_EX32(tb->flags, TBFLAG_ANY, AARCH64_STATE)) {
|
2017-07-14 17:58:33 +08:00
|
|
|
ops = &aarch64_translator_ops;
|
2003-10-01 04:34:21 +08:00
|
|
|
}
|
|
|
|
#endif
|
2017-07-14 17:58:33 +08:00
|
|
|
|
2019-04-16 14:54:54 +08:00
|
|
|
translator_loop(ops, &dc.base, cpu, tb, max_insns);
|
2003-10-01 04:34:21 +08:00
|
|
|
}
|
|
|
|
|
2015-09-02 06:51:12 +08:00
|
|
|
void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb,
|
|
|
|
target_ulong *data)
|
2008-04-28 08:32:32 +08:00
|
|
|
{
|
2013-09-04 03:12:09 +08:00
|
|
|
if (is_a64(env)) {
|
2015-09-02 06:51:12 +08:00
|
|
|
env->pc = data[0];
|
2013-12-18 03:42:31 +08:00
|
|
|
env->condexec_bits = 0;
|
2016-06-06 23:59:28 +08:00
|
|
|
env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
|
2013-09-04 03:12:09 +08:00
|
|
|
} else {
|
2015-09-02 06:51:12 +08:00
|
|
|
env->regs[15] = data[0];
|
|
|
|
env->condexec_bits = data[1];
|
2016-06-06 23:59:28 +08:00
|
|
|
env->exception.syndrome = data[2] << ARM_INSN_START_WORD2_SHIFT;
|
2013-09-04 03:12:09 +08:00
|
|
|
}
|
2008-04-28 08:32:32 +08:00
|
|
|
}
|