mirror of
https://github.com/qemu/qemu.git
synced 2024-11-25 20:03:37 +08:00
tcg/arm: Unset TCG_TARGET_HAS_MEMORY_BSWAP
Now that the middle-end can replicate the same tricks as tcg/arm used for optimizing bswap for signed loads and for stores, do not pretend to have these memory ops in the backend. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
92ecfab50e
commit
843b82424f
@ -1393,34 +1393,38 @@ static void tcg_out_vldst(TCGContext *s, ARMInsn insn,
|
||||
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
|
||||
* int mmu_idx, uintptr_t ra)
|
||||
*/
|
||||
static void * const qemu_ld_helpers[16] = {
|
||||
static void * const qemu_ld_helpers[8] = {
|
||||
[MO_UB] = helper_ret_ldub_mmu,
|
||||
[MO_SB] = helper_ret_ldsb_mmu,
|
||||
|
||||
[MO_LEUW] = helper_le_lduw_mmu,
|
||||
[MO_LEUL] = helper_le_ldul_mmu,
|
||||
[MO_LEQ] = helper_le_ldq_mmu,
|
||||
[MO_LESW] = helper_le_ldsw_mmu,
|
||||
[MO_LESL] = helper_le_ldul_mmu,
|
||||
|
||||
[MO_BEUW] = helper_be_lduw_mmu,
|
||||
[MO_BEUL] = helper_be_ldul_mmu,
|
||||
[MO_BEQ] = helper_be_ldq_mmu,
|
||||
[MO_BESW] = helper_be_ldsw_mmu,
|
||||
[MO_BESL] = helper_be_ldul_mmu,
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
[MO_UW] = helper_be_lduw_mmu,
|
||||
[MO_UL] = helper_be_ldul_mmu,
|
||||
[MO_Q] = helper_be_ldq_mmu,
|
||||
[MO_SW] = helper_be_ldsw_mmu,
|
||||
[MO_SL] = helper_be_ldul_mmu,
|
||||
#else
|
||||
[MO_UW] = helper_le_lduw_mmu,
|
||||
[MO_UL] = helper_le_ldul_mmu,
|
||||
[MO_Q] = helper_le_ldq_mmu,
|
||||
[MO_SW] = helper_le_ldsw_mmu,
|
||||
[MO_SL] = helper_le_ldul_mmu,
|
||||
#endif
|
||||
};
|
||||
|
||||
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
|
||||
* uintxx_t val, int mmu_idx, uintptr_t ra)
|
||||
*/
|
||||
static void * const qemu_st_helpers[16] = {
|
||||
[MO_UB] = helper_ret_stb_mmu,
|
||||
[MO_LEUW] = helper_le_stw_mmu,
|
||||
[MO_LEUL] = helper_le_stl_mmu,
|
||||
[MO_LEQ] = helper_le_stq_mmu,
|
||||
[MO_BEUW] = helper_be_stw_mmu,
|
||||
[MO_BEUL] = helper_be_stl_mmu,
|
||||
[MO_BEQ] = helper_be_stq_mmu,
|
||||
static void * const qemu_st_helpers[4] = {
|
||||
[MO_8] = helper_ret_stb_mmu,
|
||||
#ifdef HOST_WORDS_BIGENDIAN
|
||||
[MO_16] = helper_be_stw_mmu,
|
||||
[MO_32] = helper_be_stl_mmu,
|
||||
[MO_64] = helper_be_stq_mmu,
|
||||
#else
|
||||
[MO_16] = helper_le_stw_mmu,
|
||||
[MO_32] = helper_le_stl_mmu,
|
||||
[MO_64] = helper_le_stq_mmu,
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Helper routines for marshalling helper function arguments into
|
||||
@ -1625,9 +1629,9 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
||||
icache usage. For pre-armv6, use the signed helpers since we do
|
||||
not have a single insn sign-extend. */
|
||||
if (use_armv6_instructions) {
|
||||
func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)];
|
||||
func = qemu_ld_helpers[opc & MO_SIZE];
|
||||
} else {
|
||||
func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)];
|
||||
func = qemu_ld_helpers[opc & MO_SSIZE];
|
||||
if (opc & MO_SIGN) {
|
||||
opc = MO_UL;
|
||||
}
|
||||
@ -1705,7 +1709,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
||||
argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14);
|
||||
|
||||
/* Tail-call to the helper, which will return to the fast path. */
|
||||
tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
|
||||
tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & MO_SIZE]);
|
||||
return true;
|
||||
}
|
||||
#endif /* SOFTMMU */
|
||||
@ -1714,7 +1718,8 @@ static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
|
||||
TCGReg datalo, TCGReg datahi,
|
||||
TCGReg addrlo, TCGReg addend)
|
||||
{
|
||||
MemOp bswap = opc & MO_BSWAP;
|
||||
/* Byte swapping is left to middle-end expansion. */
|
||||
tcg_debug_assert((opc & MO_BSWAP) == 0);
|
||||
|
||||
switch (opc & MO_SSIZE) {
|
||||
case MO_UB:
|
||||
@ -1725,51 +1730,30 @@ static inline void tcg_out_qemu_ld_index(TCGContext *s, MemOp opc,
|
||||
break;
|
||||
case MO_UW:
|
||||
tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
|
||||
if (bswap) {
|
||||
tcg_out_bswap16(s, COND_AL, datalo, datalo,
|
||||
TCG_BSWAP_IZ | TCG_BSWAP_OZ);
|
||||
}
|
||||
break;
|
||||
case MO_SW:
|
||||
if (bswap) {
|
||||
tcg_out_ld16u_r(s, COND_AL, datalo, addrlo, addend);
|
||||
tcg_out_bswap16(s, COND_AL, datalo, datalo,
|
||||
TCG_BSWAP_IZ | TCG_BSWAP_OS);
|
||||
} else {
|
||||
tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend);
|
||||
}
|
||||
tcg_out_ld16s_r(s, COND_AL, datalo, addrlo, addend);
|
||||
break;
|
||||
case MO_UL:
|
||||
default:
|
||||
tcg_out_ld32_r(s, COND_AL, datalo, addrlo, addend);
|
||||
if (bswap) {
|
||||
tcg_out_bswap32(s, COND_AL, datalo, datalo);
|
||||
}
|
||||
break;
|
||||
case MO_Q:
|
||||
{
|
||||
TCGReg dl = (bswap ? datahi : datalo);
|
||||
TCGReg dh = (bswap ? datalo : datahi);
|
||||
|
||||
/* Avoid ldrd for user-only emulation, to handle unaligned. */
|
||||
if (USING_SOFTMMU && use_armv6_instructions
|
||||
&& (dl & 1) == 0 && dh == dl + 1) {
|
||||
tcg_out_ldrd_r(s, COND_AL, dl, addrlo, addend);
|
||||
} else if (dl != addend) {
|
||||
tcg_out_ld32_rwb(s, COND_AL, dl, addend, addrlo);
|
||||
tcg_out_ld32_12(s, COND_AL, dh, addend, 4);
|
||||
} else {
|
||||
tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP,
|
||||
addend, addrlo, SHIFT_IMM_LSL(0));
|
||||
tcg_out_ld32_12(s, COND_AL, dl, TCG_REG_TMP, 0);
|
||||
tcg_out_ld32_12(s, COND_AL, dh, TCG_REG_TMP, 4);
|
||||
}
|
||||
if (bswap) {
|
||||
tcg_out_bswap32(s, COND_AL, dl, dl);
|
||||
tcg_out_bswap32(s, COND_AL, dh, dh);
|
||||
}
|
||||
/* Avoid ldrd for user-only emulation, to handle unaligned. */
|
||||
if (USING_SOFTMMU && use_armv6_instructions
|
||||
&& (datalo & 1) == 0 && datahi == datalo + 1) {
|
||||
tcg_out_ldrd_r(s, COND_AL, datalo, addrlo, addend);
|
||||
} else if (datalo != addend) {
|
||||
tcg_out_ld32_rwb(s, COND_AL, datalo, addend, addrlo);
|
||||
tcg_out_ld32_12(s, COND_AL, datahi, addend, 4);
|
||||
} else {
|
||||
tcg_out_dat_reg(s, COND_AL, ARITH_ADD, TCG_REG_TMP,
|
||||
addend, addrlo, SHIFT_IMM_LSL(0));
|
||||
tcg_out_ld32_12(s, COND_AL, datalo, TCG_REG_TMP, 0);
|
||||
tcg_out_ld32_12(s, COND_AL, datahi, TCG_REG_TMP, 4);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1777,7 +1761,8 @@ static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc,
|
||||
TCGReg datalo, TCGReg datahi,
|
||||
TCGReg addrlo)
|
||||
{
|
||||
MemOp bswap = opc & MO_BSWAP;
|
||||
/* Byte swapping is left to middle-end expansion. */
|
||||
tcg_debug_assert((opc & MO_BSWAP) == 0);
|
||||
|
||||
switch (opc & MO_SSIZE) {
|
||||
case MO_UB:
|
||||
@ -1788,49 +1773,28 @@ static inline void tcg_out_qemu_ld_direct(TCGContext *s, MemOp opc,
|
||||
break;
|
||||
case MO_UW:
|
||||
tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
|
||||
if (bswap) {
|
||||
tcg_out_bswap16(s, COND_AL, datalo, datalo,
|
||||
TCG_BSWAP_IZ | TCG_BSWAP_OZ);
|
||||
}
|
||||
break;
|
||||
case MO_SW:
|
||||
if (bswap) {
|
||||
tcg_out_ld16u_8(s, COND_AL, datalo, addrlo, 0);
|
||||
tcg_out_bswap16(s, COND_AL, datalo, datalo,
|
||||
TCG_BSWAP_IZ | TCG_BSWAP_OS);
|
||||
} else {
|
||||
tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0);
|
||||
}
|
||||
tcg_out_ld16s_8(s, COND_AL, datalo, addrlo, 0);
|
||||
break;
|
||||
case MO_UL:
|
||||
default:
|
||||
tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
|
||||
if (bswap) {
|
||||
tcg_out_bswap32(s, COND_AL, datalo, datalo);
|
||||
}
|
||||
break;
|
||||
case MO_Q:
|
||||
{
|
||||
TCGReg dl = (bswap ? datahi : datalo);
|
||||
TCGReg dh = (bswap ? datalo : datahi);
|
||||
|
||||
/* Avoid ldrd for user-only emulation, to handle unaligned. */
|
||||
if (USING_SOFTMMU && use_armv6_instructions
|
||||
&& (dl & 1) == 0 && dh == dl + 1) {
|
||||
tcg_out_ldrd_8(s, COND_AL, dl, addrlo, 0);
|
||||
} else if (dl == addrlo) {
|
||||
tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4);
|
||||
tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0);
|
||||
} else {
|
||||
tcg_out_ld32_12(s, COND_AL, dl, addrlo, bswap ? 4 : 0);
|
||||
tcg_out_ld32_12(s, COND_AL, dh, addrlo, bswap ? 0 : 4);
|
||||
}
|
||||
if (bswap) {
|
||||
tcg_out_bswap32(s, COND_AL, dl, dl);
|
||||
tcg_out_bswap32(s, COND_AL, dh, dh);
|
||||
}
|
||||
/* Avoid ldrd for user-only emulation, to handle unaligned. */
|
||||
if (USING_SOFTMMU && use_armv6_instructions
|
||||
&& (datalo & 1) == 0 && datahi == datalo + 1) {
|
||||
tcg_out_ldrd_8(s, COND_AL, datalo, addrlo, 0);
|
||||
} else if (datalo == addrlo) {
|
||||
tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
|
||||
tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
|
||||
} else {
|
||||
tcg_out_ld32_12(s, COND_AL, datalo, addrlo, 0);
|
||||
tcg_out_ld32_12(s, COND_AL, datahi, addrlo, 4);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1879,44 +1843,31 @@ static inline void tcg_out_qemu_st_index(TCGContext *s, int cond, MemOp opc,
|
||||
TCGReg datalo, TCGReg datahi,
|
||||
TCGReg addrlo, TCGReg addend)
|
||||
{
|
||||
MemOp bswap = opc & MO_BSWAP;
|
||||
/* Byte swapping is left to middle-end expansion. */
|
||||
tcg_debug_assert((opc & MO_BSWAP) == 0);
|
||||
|
||||
switch (opc & MO_SIZE) {
|
||||
case MO_8:
|
||||
tcg_out_st8_r(s, cond, datalo, addrlo, addend);
|
||||
break;
|
||||
case MO_16:
|
||||
if (bswap) {
|
||||
tcg_out_bswap16(s, cond, TCG_REG_R0, datalo, 0);
|
||||
tcg_out_st16_r(s, cond, TCG_REG_R0, addrlo, addend);
|
||||
} else {
|
||||
tcg_out_st16_r(s, cond, datalo, addrlo, addend);
|
||||
}
|
||||
tcg_out_st16_r(s, cond, datalo, addrlo, addend);
|
||||
break;
|
||||
case MO_32:
|
||||
default:
|
||||
if (bswap) {
|
||||
tcg_out_bswap32(s, cond, TCG_REG_R0, datalo);
|
||||
tcg_out_st32_r(s, cond, TCG_REG_R0, addrlo, addend);
|
||||
} else {
|
||||
tcg_out_st32_r(s, cond, datalo, addrlo, addend);
|
||||
}
|
||||
tcg_out_st32_r(s, cond, datalo, addrlo, addend);
|
||||
break;
|
||||
case MO_64:
|
||||
/* Avoid strd for user-only emulation, to handle unaligned. */
|
||||
if (bswap) {
|
||||
tcg_out_bswap32(s, cond, TCG_REG_R0, datahi);
|
||||
tcg_out_st32_rwb(s, cond, TCG_REG_R0, addend, addrlo);
|
||||
tcg_out_bswap32(s, cond, TCG_REG_R0, datalo);
|
||||
tcg_out_st32_12(s, cond, TCG_REG_R0, addend, 4);
|
||||
} else if (USING_SOFTMMU && use_armv6_instructions
|
||||
&& (datalo & 1) == 0 && datahi == datalo + 1) {
|
||||
if (USING_SOFTMMU && use_armv6_instructions
|
||||
&& (datalo & 1) == 0 && datahi == datalo + 1) {
|
||||
tcg_out_strd_r(s, cond, datalo, addrlo, addend);
|
||||
} else {
|
||||
tcg_out_st32_rwb(s, cond, datalo, addend, addrlo);
|
||||
tcg_out_st32_12(s, cond, datahi, addend, 4);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
@ -1924,44 +1875,31 @@ static inline void tcg_out_qemu_st_direct(TCGContext *s, MemOp opc,
|
||||
TCGReg datalo, TCGReg datahi,
|
||||
TCGReg addrlo)
|
||||
{
|
||||
MemOp bswap = opc & MO_BSWAP;
|
||||
/* Byte swapping is left to middle-end expansion. */
|
||||
tcg_debug_assert((opc & MO_BSWAP) == 0);
|
||||
|
||||
switch (opc & MO_SIZE) {
|
||||
case MO_8:
|
||||
tcg_out_st8_12(s, COND_AL, datalo, addrlo, 0);
|
||||
break;
|
||||
case MO_16:
|
||||
if (bswap) {
|
||||
tcg_out_bswap16(s, COND_AL, TCG_REG_R0, datalo, 0);
|
||||
tcg_out_st16_8(s, COND_AL, TCG_REG_R0, addrlo, 0);
|
||||
} else {
|
||||
tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0);
|
||||
}
|
||||
tcg_out_st16_8(s, COND_AL, datalo, addrlo, 0);
|
||||
break;
|
||||
case MO_32:
|
||||
default:
|
||||
if (bswap) {
|
||||
tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo);
|
||||
tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0);
|
||||
} else {
|
||||
tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
|
||||
}
|
||||
tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
|
||||
break;
|
||||
case MO_64:
|
||||
/* Avoid strd for user-only emulation, to handle unaligned. */
|
||||
if (bswap) {
|
||||
tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datahi);
|
||||
tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 0);
|
||||
tcg_out_bswap32(s, COND_AL, TCG_REG_R0, datalo);
|
||||
tcg_out_st32_12(s, COND_AL, TCG_REG_R0, addrlo, 4);
|
||||
} else if (USING_SOFTMMU && use_armv6_instructions
|
||||
&& (datalo & 1) == 0 && datahi == datalo + 1) {
|
||||
if (USING_SOFTMMU && use_armv6_instructions
|
||||
&& (datalo & 1) == 0 && datahi == datalo + 1) {
|
||||
tcg_out_strd_8(s, COND_AL, datalo, addrlo, 0);
|
||||
} else {
|
||||
tcg_out_st32_12(s, COND_AL, datalo, addrlo, 0);
|
||||
tcg_out_st32_12(s, COND_AL, datahi, addrlo, 4);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -174,7 +174,7 @@ extern bool use_neon_instructions;
|
||||
#define TCG_TARGET_HAS_cmpsel_vec 0
|
||||
|
||||
#define TCG_TARGET_DEFAULT_MO (0)
|
||||
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
|
||||
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
|
||||
|
||||
/* not defined -- call should be eliminated at compile time */
|
||||
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
|
||||
|
Loading…
Reference in New Issue
Block a user