mirror of
https://github.com/qemu/qemu.git
synced 2024-11-25 20:03:37 +08:00
target-sh4: use bit number for SR constants
Use the bit number for SR constants instead of using a bit mask. This make possible to also use the constants for shifts. Reviewed-by: Richard Henderson <rth@twiddle.net> Signed-off-by: Aurelien Jarno <aurelien@aurel32.net>
This commit is contained in:
parent
563807520f
commit
5ed9a259c1
@ -61,7 +61,8 @@ static void superh_cpu_reset(CPUState *s)
|
||||
env->fpscr = FPSCR_PR; /* value for userspace according to the kernel */
|
||||
set_float_rounding_mode(float_round_nearest_even, &env->fp_status); /* ?! */
|
||||
#else
|
||||
env->sr = SR_MD | SR_RB | SR_BL | SR_I3 | SR_I2 | SR_I1 | SR_I0;
|
||||
env->sr = (1u << SR_MD) | (1u << SR_RB) | (1u << SR_BL) |
|
||||
(1u << SR_I3) | (1u << SR_I2) | (1u << SR_I1) | (1u << SR_I0);
|
||||
env->fpscr = FPSCR_DN | FPSCR_RM_ZERO; /* CPU reset value according to SH4 manual */
|
||||
set_float_rounding_mode(float_round_to_zero, &env->fp_status);
|
||||
set_flush_to_zero(1, &env->fp_status);
|
||||
|
@ -47,18 +47,18 @@
|
||||
#define TARGET_PHYS_ADDR_SPACE_BITS 32
|
||||
#define TARGET_VIRT_ADDR_SPACE_BITS 32
|
||||
|
||||
#define SR_MD (1 << 30)
|
||||
#define SR_RB (1 << 29)
|
||||
#define SR_BL (1 << 28)
|
||||
#define SR_FD (1 << 15)
|
||||
#define SR_M (1 << 9)
|
||||
#define SR_Q (1 << 8)
|
||||
#define SR_I3 (1 << 7)
|
||||
#define SR_I2 (1 << 6)
|
||||
#define SR_I1 (1 << 5)
|
||||
#define SR_I0 (1 << 4)
|
||||
#define SR_S (1 << 1)
|
||||
#define SR_T (1 << 0)
|
||||
#define SR_MD 30
|
||||
#define SR_RB 29
|
||||
#define SR_BL 28
|
||||
#define SR_FD 15
|
||||
#define SR_M 9
|
||||
#define SR_Q 8
|
||||
#define SR_I3 7
|
||||
#define SR_I2 6
|
||||
#define SR_I1 5
|
||||
#define SR_I0 4
|
||||
#define SR_S 1
|
||||
#define SR_T 0
|
||||
|
||||
#define FPSCR_MASK (0x003fffff)
|
||||
#define FPSCR_FR (1 << 21)
|
||||
@ -234,7 +234,7 @@ void cpu_load_tlb(CPUSH4State * env);
|
||||
#define MMU_USER_IDX 1
|
||||
static inline int cpu_mmu_index (CPUSH4State *env)
|
||||
{
|
||||
return (env->sr & SR_MD) == 0 ? 1 : 0;
|
||||
return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0;
|
||||
}
|
||||
|
||||
#include "exec/cpu-all.h"
|
||||
@ -339,8 +339,8 @@ static inline void cpu_get_tb_cpu_state(CPUSH4State *env, target_ulong *pc,
|
||||
*flags = (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL
|
||||
| DELAY_SLOT_TRUE | DELAY_SLOT_CLEARME)) /* Bits 0- 3 */
|
||||
| (env->fpscr & (FPSCR_FR | FPSCR_SZ | FPSCR_PR)) /* Bits 19-21 */
|
||||
| (env->sr & (SR_MD | SR_RB)) /* Bits 29-30 */
|
||||
| (env->sr & SR_FD) /* Bit 15 */
|
||||
| (env->sr & ((1u << SR_MD) | (1u << SR_RB))) /* Bits 29-30 */
|
||||
| (env->sr & (1u << SR_FD)) /* Bit 15 */
|
||||
| (env->movcal_backup ? TB_FLAG_PENDING_MOVCA : 0); /* Bit 4 */
|
||||
}
|
||||
|
||||
|
@ -31,7 +31,7 @@ int superh_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
|
||||
|
||||
switch (n) {
|
||||
case 0 ... 7:
|
||||
if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
|
||||
if ((env->sr & (1u << SR_MD)) && (env->sr & (1u << SR_RB))) {
|
||||
return gdb_get_regl(mem_buf, env->gregs[n + 16]);
|
||||
} else {
|
||||
return gdb_get_regl(mem_buf, env->gregs[n]);
|
||||
@ -83,7 +83,7 @@ int superh_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
|
||||
|
||||
switch (n) {
|
||||
case 0 ... 7:
|
||||
if ((env->sr & (SR_MD | SR_RB)) == (SR_MD | SR_RB)) {
|
||||
if ((env->sr & (1u << SR_MD)) && (env->sr & (1u << SR_RB))) {
|
||||
env->gregs[n + 16] = ldl_p(mem_buf);
|
||||
} else {
|
||||
env->gregs[n] = ldl_p(mem_buf);
|
||||
|
@ -93,7 +93,7 @@ void superh_cpu_do_interrupt(CPUState *cs)
|
||||
do_exp = cs->exception_index != -1;
|
||||
do_irq = do_irq && (cs->exception_index == -1);
|
||||
|
||||
if (env->sr & SR_BL) {
|
||||
if (env->sr & (1u << SR_BL)) {
|
||||
if (do_exp && cs->exception_index != 0x1e0) {
|
||||
cs->exception_index = 0x000; /* masked exception -> reset */
|
||||
}
|
||||
@ -165,7 +165,7 @@ void superh_cpu_do_interrupt(CPUState *cs)
|
||||
env->ssr = env->sr;
|
||||
env->spc = env->pc;
|
||||
env->sgr = env->gregs[15];
|
||||
env->sr |= SR_BL | SR_MD | SR_RB;
|
||||
env->sr |= (1u << SR_BL) | (1u << SR_MD) | (1u << SR_RB);
|
||||
|
||||
if (env->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) {
|
||||
/* Branch instruction should be executed again before delay slot. */
|
||||
@ -182,7 +182,7 @@ void superh_cpu_do_interrupt(CPUState *cs)
|
||||
case 0x000:
|
||||
case 0x020:
|
||||
case 0x140:
|
||||
env->sr &= ~SR_FD;
|
||||
env->sr &= ~(1u << SR_FD);
|
||||
env->sr |= 0xf << 4; /* IMASK */
|
||||
env->pc = 0xa0000000;
|
||||
break;
|
||||
@ -355,23 +355,24 @@ static int get_mmu_address(CPUSH4State * env, target_ulong * physical,
|
||||
int use_asid, n;
|
||||
tlb_t *matching = NULL;
|
||||
|
||||
use_asid = (env->mmucr & MMUCR_SV) == 0 || (env->sr & SR_MD) == 0;
|
||||
use_asid = !(env->mmucr & MMUCR_SV) || !(env->sr & (1u << SR_MD));
|
||||
|
||||
if (rw == 2) {
|
||||
n = find_itlb_entry(env, address, use_asid);
|
||||
if (n >= 0) {
|
||||
matching = &env->itlb[n];
|
||||
if (!(env->sr & SR_MD) && !(matching->pr & 2))
|
||||
if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) {
|
||||
n = MMU_ITLB_VIOLATION;
|
||||
else
|
||||
} else {
|
||||
*prot = PAGE_EXEC;
|
||||
}
|
||||
} else {
|
||||
n = find_utlb_entry(env, address, use_asid);
|
||||
if (n >= 0) {
|
||||
n = copy_utlb_entry_itlb(env, n);
|
||||
matching = &env->itlb[n];
|
||||
if (!(env->sr & SR_MD) && !(matching->pr & 2)) {
|
||||
n = MMU_ITLB_VIOLATION;
|
||||
if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) {
|
||||
n = MMU_ITLB_VIOLATION;
|
||||
} else {
|
||||
*prot = PAGE_READ | PAGE_EXEC;
|
||||
if ((matching->pr & 1) && matching->d) {
|
||||
@ -388,7 +389,7 @@ static int get_mmu_address(CPUSH4State * env, target_ulong * physical,
|
||||
n = find_utlb_entry(env, address, use_asid);
|
||||
if (n >= 0) {
|
||||
matching = &env->utlb[n];
|
||||
if (!(env->sr & SR_MD) && !(matching->pr & 2)) {
|
||||
if (!(env->sr & (1u << SR_MD)) && !(matching->pr & 2)) {
|
||||
n = (rw == 1) ? MMU_DTLB_VIOLATION_WRITE :
|
||||
MMU_DTLB_VIOLATION_READ;
|
||||
} else if ((rw == 1) && !(matching->pr & 1)) {
|
||||
@ -421,7 +422,7 @@ static int get_physical_address(CPUSH4State * env, target_ulong * physical,
|
||||
/* P1, P2 and P4 areas do not use translation */
|
||||
if ((address >= 0x80000000 && address < 0xc0000000) ||
|
||||
address >= 0xe0000000) {
|
||||
if (!(env->sr & SR_MD)
|
||||
if (!(env->sr & (1u << SR_MD))
|
||||
&& (address < 0xe0000000 || address >= 0xe4000000)) {
|
||||
/* Unauthorized access in user mode (only store queues are available) */
|
||||
fprintf(stderr, "Unauthorized access\n");
|
||||
@ -690,7 +691,7 @@ void cpu_sh4_write_mmaped_utlb_addr(CPUSH4State *s, hwaddr addr,
|
||||
uint8_t d = (uint8_t)((mem_value & 0x00000200) >> 9);
|
||||
uint8_t v = (uint8_t)((mem_value & 0x00000100) >> 8);
|
||||
uint8_t asid = (uint8_t)(mem_value & 0x000000ff);
|
||||
int use_asid = (s->mmucr & MMUCR_SV) == 0 || (s->sr & SR_MD) == 0;
|
||||
int use_asid = !(s->mmucr & MMUCR_SV) || !(s->sr & (1u << SR_MD));
|
||||
|
||||
if (associate) {
|
||||
int i;
|
||||
@ -821,10 +822,10 @@ void cpu_sh4_write_mmaped_utlb_data(CPUSH4State *s, hwaddr addr,
|
||||
int cpu_sh4_is_cached(CPUSH4State * env, target_ulong addr)
|
||||
{
|
||||
int n;
|
||||
int use_asid = (env->mmucr & MMUCR_SV) == 0 || (env->sr & SR_MD) == 0;
|
||||
int use_asid = !(env->mmucr & MMUCR_SV) || !(env->sr & (1u << SR_MD));
|
||||
|
||||
/* check area */
|
||||
if (env->sr & SR_MD) {
|
||||
if (env->sr & (1u << SR_MD)) {
|
||||
/* For previledged mode, P2 and P4 area is not cachable. */
|
||||
if ((0xA0000000 <= addr && addr < 0xC0000000) || 0xE0000000 <= addr)
|
||||
return 0;
|
||||
|
@ -156,15 +156,15 @@ void helper_ocbi(CPUSH4State *env, uint32_t address)
|
||||
}
|
||||
}
|
||||
|
||||
#define T (env->sr & SR_T)
|
||||
#define Q (env->sr & SR_Q ? 1 : 0)
|
||||
#define M (env->sr & SR_M ? 1 : 0)
|
||||
#define SETT env->sr |= SR_T
|
||||
#define CLRT env->sr &= ~SR_T
|
||||
#define SETQ env->sr |= SR_Q
|
||||
#define CLRQ env->sr &= ~SR_Q
|
||||
#define SETM env->sr |= SR_M
|
||||
#define CLRM env->sr &= ~SR_M
|
||||
#define T (env->sr & (1u << SR_T))
|
||||
#define Q (env->sr & (1u << SR_Q) ? 1 : 0)
|
||||
#define M (env->sr & (1u << SR_M) ? 1 : 0)
|
||||
#define SETT (env->sr |= (1u << SR_T))
|
||||
#define CLRT (env->sr &= ~(1u << SR_T))
|
||||
#define SETQ (env->sr |= (1u << SR_Q))
|
||||
#define CLRQ (env->sr &= ~(1u << SR_Q))
|
||||
#define SETM (env->sr |= (1u << SR_M))
|
||||
#define CLRM (env->sr &= ~(1u << SR_M))
|
||||
|
||||
uint32_t helper_div1(CPUSH4State *env, uint32_t arg0, uint32_t arg1)
|
||||
{
|
||||
@ -282,7 +282,7 @@ void helper_macl(CPUSH4State *env, uint32_t arg0, uint32_t arg1)
|
||||
res += (int64_t) (int32_t) arg0 *(int64_t) (int32_t) arg1;
|
||||
env->mach = (res >> 32) & 0xffffffff;
|
||||
env->macl = res & 0xffffffff;
|
||||
if (env->sr & SR_S) {
|
||||
if (env->sr & (1u << SR_S)) {
|
||||
if (res < 0)
|
||||
env->mach |= 0xffff0000;
|
||||
else
|
||||
@ -298,7 +298,7 @@ void helper_macw(CPUSH4State *env, uint32_t arg0, uint32_t arg1)
|
||||
res += (int64_t) (int16_t) arg0 *(int64_t) (int16_t) arg1;
|
||||
env->mach = (res >> 32) & 0xffffffff;
|
||||
env->macl = res & 0xffffffff;
|
||||
if (env->sr & SR_S) {
|
||||
if (env->sr & (1u << SR_S)) {
|
||||
if (res < -0x80000000) {
|
||||
env->mach = 1;
|
||||
env->macl = 0x80000000;
|
||||
@ -311,12 +311,12 @@ void helper_macw(CPUSH4State *env, uint32_t arg0, uint32_t arg1)
|
||||
|
||||
static inline void set_t(CPUSH4State *env)
|
||||
{
|
||||
env->sr |= SR_T;
|
||||
env->sr |= (1u << SR_T);
|
||||
}
|
||||
|
||||
static inline void clr_t(CPUSH4State *env)
|
||||
{
|
||||
env->sr &= ~SR_T;
|
||||
env->sr &= ~(1u << SR_T);
|
||||
}
|
||||
|
||||
void helper_ld_fpscr(CPUSH4State *env, uint32_t val)
|
||||
|
@ -47,7 +47,7 @@ typedef struct DisasContext {
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
#define IS_USER(ctx) 1
|
||||
#else
|
||||
#define IS_USER(ctx) (!(ctx->flags & SR_MD))
|
||||
#define IS_USER(ctx) (!(ctx->flags & (1u << SR_MD)))
|
||||
#endif
|
||||
|
||||
enum {
|
||||
@ -214,7 +214,7 @@ static inline void gen_branch_slot(uint32_t delayed_pc, int t)
|
||||
TCGLabel *label = gen_new_label();
|
||||
tcg_gen_movi_i32(cpu_delayed_pc, delayed_pc);
|
||||
sr = tcg_temp_new();
|
||||
tcg_gen_andi_i32(sr, cpu_sr, SR_T);
|
||||
tcg_gen_andi_i32(sr, cpu_sr, (1u << SR_T));
|
||||
tcg_gen_brcondi_i32(t ? TCG_COND_EQ:TCG_COND_NE, sr, 0, label);
|
||||
tcg_gen_ori_i32(cpu_flags, cpu_flags, DELAY_SLOT_TRUE);
|
||||
gen_set_label(label);
|
||||
@ -229,7 +229,7 @@ static void gen_conditional_jump(DisasContext * ctx,
|
||||
|
||||
l1 = gen_new_label();
|
||||
sr = tcg_temp_new();
|
||||
tcg_gen_andi_i32(sr, cpu_sr, SR_T);
|
||||
tcg_gen_andi_i32(sr, cpu_sr, (1u << SR_T));
|
||||
tcg_gen_brcondi_i32(TCG_COND_NE, sr, 0, l1);
|
||||
gen_goto_tb(ctx, 0, ifnott);
|
||||
gen_set_label(l1);
|
||||
@ -258,7 +258,7 @@ static inline void gen_cmp(int cond, TCGv t0, TCGv t1)
|
||||
|
||||
t = tcg_temp_new();
|
||||
tcg_gen_setcond_i32(cond, t, t1, t0);
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_T));
|
||||
tcg_gen_or_i32(cpu_sr, cpu_sr, t);
|
||||
|
||||
tcg_temp_free(t);
|
||||
@ -270,7 +270,7 @@ static inline void gen_cmp_imm(int cond, TCGv t0, int32_t imm)
|
||||
|
||||
t = tcg_temp_new();
|
||||
tcg_gen_setcondi_i32(cond, t, t0, imm);
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_T));
|
||||
tcg_gen_or_i32(cpu_sr, cpu_sr, t);
|
||||
|
||||
tcg_temp_free(t);
|
||||
@ -326,10 +326,12 @@ static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
|
||||
#define B11_8 ((ctx->opcode >> 8) & 0xf)
|
||||
#define B15_12 ((ctx->opcode >> 12) & 0xf)
|
||||
|
||||
#define REG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) == (SR_MD | SR_RB) \
|
||||
#define REG(x) ((x) < 8 && (ctx->flags & (1u << SR_MD))\
|
||||
&& (ctx->flags & (1u << SR_RB))\
|
||||
? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
|
||||
|
||||
#define ALTREG(x) ((x) < 8 && (ctx->flags & (SR_MD | SR_RB)) != (SR_MD | SR_RB)\
|
||||
#define ALTREG(x) ((x) < 8 && (!(ctx->flags & (1u << SR_MD))\
|
||||
|| !(ctx->flags & (1u << SR_RB)))\
|
||||
? (cpu_gregs[x + 16]) : (cpu_gregs[x]))
|
||||
|
||||
#define FREG(x) (ctx->flags & FPSCR_FR ? (x) ^ 0x10 : (x))
|
||||
@ -359,7 +361,7 @@ static inline void gen_store_fpr64 (TCGv_i64 t, int reg)
|
||||
}
|
||||
|
||||
#define CHECK_FPU_ENABLED \
|
||||
if (ctx->flags & SR_FD) { \
|
||||
if (ctx->flags & (1u << SR_FD)) { \
|
||||
tcg_gen_movi_i32(cpu_pc, ctx->pc); \
|
||||
if (ctx->flags & (DELAY_SLOT | DELAY_SLOT_CONDITIONAL)) { \
|
||||
gen_helper_raise_slot_fpu_disable(cpu_env); \
|
||||
@ -409,7 +411,8 @@ static void _decode_opc(DisasContext * ctx)
|
||||
|
||||
switch (ctx->opcode) {
|
||||
case 0x0019: /* div0u */
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(SR_M | SR_Q | SR_T));
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr,
|
||||
~((1u << SR_M) | (1u << SR_Q) | (1u << SR_T)));
|
||||
return;
|
||||
case 0x000b: /* rts */
|
||||
CHECK_NOT_DELAY_SLOT
|
||||
@ -422,10 +425,10 @@ static void _decode_opc(DisasContext * ctx)
|
||||
tcg_gen_movi_i32(cpu_macl, 0);
|
||||
return;
|
||||
case 0x0048: /* clrs */
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_S);
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_S));
|
||||
return;
|
||||
case 0x0008: /* clrt */
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_T));
|
||||
return;
|
||||
case 0x0038: /* ldtlb */
|
||||
CHECK_PRIVILEGED
|
||||
@ -440,10 +443,10 @@ static void _decode_opc(DisasContext * ctx)
|
||||
ctx->delayed_pc = (uint32_t) - 1;
|
||||
return;
|
||||
case 0x0058: /* sets */
|
||||
tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_S);
|
||||
tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_S));
|
||||
return;
|
||||
case 0x0018: /* sett */
|
||||
tcg_gen_ori_i32(cpu_sr, cpu_sr, SR_T);
|
||||
tcg_gen_ori_i32(cpu_sr, cpu_sr, (1u << SR_T));
|
||||
return;
|
||||
case 0xfbfd: /* frchg */
|
||||
tcg_gen_xori_i32(cpu_fpscr, cpu_fpscr, FPSCR_FR);
|
||||
@ -661,7 +664,7 @@ static void _decode_opc(DisasContext * ctx)
|
||||
{
|
||||
TCGv t0, t1, t2;
|
||||
t0 = tcg_temp_new();
|
||||
tcg_gen_andi_i32(t0, cpu_sr, SR_T);
|
||||
tcg_gen_andi_i32(t0, cpu_sr, (1u << SR_T));
|
||||
t1 = tcg_temp_new();
|
||||
tcg_gen_add_i32(t1, REG(B7_4), REG(B11_8));
|
||||
tcg_gen_add_i32(t0, t0, t1);
|
||||
@ -670,7 +673,7 @@ static void _decode_opc(DisasContext * ctx)
|
||||
tcg_gen_setcond_i32(TCG_COND_GTU, t1, t1, t0);
|
||||
tcg_gen_or_i32(t1, t1, t2);
|
||||
tcg_temp_free(t2);
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_T));
|
||||
tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
|
||||
tcg_temp_free(t1);
|
||||
tcg_gen_mov_i32(REG(B11_8), t0);
|
||||
@ -689,7 +692,7 @@ static void _decode_opc(DisasContext * ctx)
|
||||
tcg_gen_andc_i32(t1, t1, t2);
|
||||
tcg_temp_free(t2);
|
||||
tcg_gen_shri_i32(t1, t1, 31);
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_T));
|
||||
tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
|
||||
tcg_temp_free(t1);
|
||||
tcg_gen_mov_i32(REG(B7_4), t0);
|
||||
@ -718,7 +721,7 @@ static void _decode_opc(DisasContext * ctx)
|
||||
{
|
||||
TCGv cmp1 = tcg_temp_new();
|
||||
TCGv cmp2 = tcg_temp_new();
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_T));
|
||||
tcg_gen_xor_i32(cmp1, REG(B7_4), REG(B11_8));
|
||||
tcg_gen_andi_i32(cmp2, cmp1, 0xff000000);
|
||||
tcg_gen_setcondi_i32(TCG_COND_EQ, cmp2, cmp2, 0);
|
||||
@ -738,11 +741,11 @@ static void _decode_opc(DisasContext * ctx)
|
||||
return;
|
||||
case 0x2007: /* div0s Rm,Rn */
|
||||
{
|
||||
gen_copy_bit_i32(cpu_sr, 8, REG(B11_8), 31); /* SR_Q */
|
||||
gen_copy_bit_i32(cpu_sr, 9, REG(B7_4), 31); /* SR_M */
|
||||
gen_copy_bit_i32(cpu_sr, SR_Q, REG(B11_8), 31); /* SR_Q */
|
||||
gen_copy_bit_i32(cpu_sr, SR_M, REG(B7_4), 31); /* SR_M */
|
||||
TCGv val = tcg_temp_new();
|
||||
tcg_gen_xor_i32(val, REG(B7_4), REG(B11_8));
|
||||
gen_copy_bit_i32(cpu_sr, 0, val, 31); /* SR_T */
|
||||
gen_copy_bit_i32(cpu_sr, SR_T, val, 31); /* SR_T */
|
||||
tcg_temp_free(val);
|
||||
}
|
||||
return;
|
||||
@ -831,9 +834,9 @@ static void _decode_opc(DisasContext * ctx)
|
||||
t0 = tcg_temp_new();
|
||||
tcg_gen_neg_i32(t0, REG(B7_4));
|
||||
t1 = tcg_temp_new();
|
||||
tcg_gen_andi_i32(t1, cpu_sr, SR_T);
|
||||
tcg_gen_andi_i32(t1, cpu_sr, (1u << SR_T));
|
||||
tcg_gen_sub_i32(REG(B11_8), t0, t1);
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_T));
|
||||
tcg_gen_setcondi_i32(TCG_COND_GTU, t1, t0, 0);
|
||||
tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
|
||||
tcg_gen_setcond_i32(TCG_COND_GTU, t1, REG(B11_8), t0);
|
||||
@ -920,7 +923,7 @@ static void _decode_opc(DisasContext * ctx)
|
||||
{
|
||||
TCGv t0, t1, t2;
|
||||
t0 = tcg_temp_new();
|
||||
tcg_gen_andi_i32(t0, cpu_sr, SR_T);
|
||||
tcg_gen_andi_i32(t0, cpu_sr, (1u << SR_T));
|
||||
t1 = tcg_temp_new();
|
||||
tcg_gen_sub_i32(t1, REG(B11_8), REG(B7_4));
|
||||
tcg_gen_sub_i32(t0, t1, t0);
|
||||
@ -929,7 +932,7 @@ static void _decode_opc(DisasContext * ctx)
|
||||
tcg_gen_setcond_i32(TCG_COND_LTU, t1, t1, t0);
|
||||
tcg_gen_or_i32(t1, t1, t2);
|
||||
tcg_temp_free(t2);
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_T));
|
||||
tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
|
||||
tcg_temp_free(t1);
|
||||
tcg_gen_mov_i32(REG(B11_8), t0);
|
||||
@ -948,7 +951,7 @@ static void _decode_opc(DisasContext * ctx)
|
||||
tcg_gen_and_i32(t1, t1, t2);
|
||||
tcg_temp_free(t2);
|
||||
tcg_gen_shri_i32(t1, t1, 31);
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_T));
|
||||
tcg_gen_or_i32(cpu_sr, cpu_sr, t1);
|
||||
tcg_temp_free(t1);
|
||||
tcg_gen_mov_i32(REG(B11_8), t0);
|
||||
@ -1545,7 +1548,7 @@ static void _decode_opc(DisasContext * ctx)
|
||||
tcg_gen_addi_i32(REG(B11_8), REG(B11_8), 4);
|
||||
return;
|
||||
case 0x0029: /* movt Rn */
|
||||
tcg_gen_andi_i32(REG(B11_8), cpu_sr, SR_T);
|
||||
tcg_gen_andi_i32(REG(B11_8), cpu_sr, (1u << SR_T));
|
||||
return;
|
||||
case 0x0073:
|
||||
/* MOVCO.L
|
||||
@ -1555,7 +1558,7 @@ static void _decode_opc(DisasContext * ctx)
|
||||
*/
|
||||
if (ctx->features & SH_FEATURE_SH4A) {
|
||||
TCGLabel *label = gen_new_label();
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~SR_T);
|
||||
tcg_gen_andi_i32(cpu_sr, cpu_sr, ~(1u << SR_T));
|
||||
tcg_gen_or_i32(cpu_sr, cpu_sr, cpu_ldst);
|
||||
tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ldst, 0, label);
|
||||
tcg_gen_qemu_st_i32(REG(0), REG(B11_8), ctx->memidx, MO_TEUL);
|
||||
@ -1610,9 +1613,9 @@ static void _decode_opc(DisasContext * ctx)
|
||||
{
|
||||
TCGv tmp = tcg_temp_new();
|
||||
tcg_gen_mov_i32(tmp, cpu_sr);
|
||||
gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
|
||||
gen_copy_bit_i32(cpu_sr, SR_T, REG(B11_8), 31);
|
||||
tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
|
||||
gen_copy_bit_i32(REG(B11_8), 0, tmp, 0);
|
||||
gen_copy_bit_i32(REG(B11_8), SR_T, tmp, 0);
|
||||
tcg_temp_free(tmp);
|
||||
}
|
||||
return;
|
||||
@ -1620,7 +1623,7 @@ static void _decode_opc(DisasContext * ctx)
|
||||
{
|
||||
TCGv tmp = tcg_temp_new();
|
||||
tcg_gen_mov_i32(tmp, cpu_sr);
|
||||
gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
|
||||
gen_copy_bit_i32(cpu_sr, SR_T, REG(B11_8), 0);
|
||||
tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
|
||||
gen_copy_bit_i32(REG(B11_8), 31, tmp, 0);
|
||||
tcg_temp_free(tmp);
|
||||
@ -1628,23 +1631,23 @@ static void _decode_opc(DisasContext * ctx)
|
||||
return;
|
||||
case 0x4004: /* rotl Rn */
|
||||
tcg_gen_rotli_i32(REG(B11_8), REG(B11_8), 1);
|
||||
gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
|
||||
gen_copy_bit_i32(cpu_sr, SR_T, REG(B11_8), 0);
|
||||
return;
|
||||
case 0x4005: /* rotr Rn */
|
||||
gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
|
||||
gen_copy_bit_i32(cpu_sr, SR_T, REG(B11_8), 0);
|
||||
tcg_gen_rotri_i32(REG(B11_8), REG(B11_8), 1);
|
||||
return;
|
||||
case 0x4000: /* shll Rn */
|
||||
case 0x4020: /* shal Rn */
|
||||
gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 31);
|
||||
gen_copy_bit_i32(cpu_sr, SR_T, REG(B11_8), 31);
|
||||
tcg_gen_shli_i32(REG(B11_8), REG(B11_8), 1);
|
||||
return;
|
||||
case 0x4021: /* shar Rn */
|
||||
gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
|
||||
gen_copy_bit_i32(cpu_sr, SR_T, REG(B11_8), 0);
|
||||
tcg_gen_sari_i32(REG(B11_8), REG(B11_8), 1);
|
||||
return;
|
||||
case 0x4001: /* shlr Rn */
|
||||
gen_copy_bit_i32(cpu_sr, 0, REG(B11_8), 0);
|
||||
gen_copy_bit_i32(cpu_sr, SR_T, REG(B11_8), 0);
|
||||
tcg_gen_shri_i32(REG(B11_8), REG(B11_8), 1);
|
||||
return;
|
||||
case 0x4008: /* shll2 Rn */
|
||||
@ -1874,7 +1877,7 @@ gen_intermediate_code_internal(SuperHCPU *cpu, TranslationBlock *tb,
|
||||
ctx.pc = pc_start;
|
||||
ctx.flags = (uint32_t)tb->flags;
|
||||
ctx.bstate = BS_NONE;
|
||||
ctx.memidx = (ctx.flags & SR_MD) == 0 ? 1 : 0;
|
||||
ctx.memidx = (ctx.flags & (1u << SR_MD)) == 0 ? 1 : 0;
|
||||
/* We don't know if the delayed pc came from a dynamic or static branch,
|
||||
so assume it is a dynamic branch. */
|
||||
ctx.delayed_pc = -1; /* use delayed pc from env pointer */
|
||||
|
Loading…
Reference in New Issue
Block a user