mirror of
https://github.com/qemu/qemu.git
synced 2024-12-12 21:23:36 +08:00
target/i386: move 00-5F opcodes to new decoder
Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
445457693c
commit
cc1d28bdbe
@ -121,6 +121,8 @@
|
||||
|
||||
#define X86_OP_GROUP2(op, op0, s0, op1, s1, ...) \
|
||||
X86_OP_GROUP3(op, op0, s0, 2op, s0, op1, s1, ## __VA_ARGS__)
|
||||
#define X86_OP_GROUPw(op, op0, s0, ...) \
|
||||
X86_OP_GROUP3(op, op0, s0, None, None, None, None, ## __VA_ARGS__)
|
||||
#define X86_OP_GROUP0(op, ...) \
|
||||
X86_OP_GROUP3(op, None, None, None, None, None, None, ## __VA_ARGS__)
|
||||
|
||||
@ -140,12 +142,23 @@
|
||||
.op3 = X86_TYPE_I, .s3 = X86_SIZE_b, \
|
||||
## __VA_ARGS__)
|
||||
|
||||
/*
|
||||
* Short forms that are mostly useful for ALU opcodes and other
|
||||
* one-byte opcodes. For vector instructions it is usually
|
||||
* clearer to write all three operands explicitly, because the
|
||||
* corresponding gen_* function will use OP_PTRn rather than s->T0
|
||||
* and s->T1.
|
||||
*/
|
||||
#define X86_OP_ENTRYrr(op, op0, s0, op1, s1, ...) \
|
||||
X86_OP_ENTRY3(op, None, None, op0, s0, op1, s1, ## __VA_ARGS__)
|
||||
#define X86_OP_ENTRY2(op, op0, s0, op1, s1, ...) \
|
||||
X86_OP_ENTRY3(op, op0, s0, 2op, s0, op1, s1, ## __VA_ARGS__)
|
||||
#define X86_OP_ENTRYw(op, op0, s0, ...) \
|
||||
X86_OP_ENTRY3(op, op0, s0, None, None, None, None, ## __VA_ARGS__)
|
||||
#define X86_OP_ENTRYr(op, op0, s0, ...) \
|
||||
X86_OP_ENTRY3(op, None, None, None, None, op0, s0, ## __VA_ARGS__)
|
||||
#define X86_OP_ENTRY1(op, op0, s0, ...) \
|
||||
X86_OP_ENTRY3(op, op0, s0, 2op, s0, None, None, ## __VA_ARGS__)
|
||||
#define X86_OP_ENTRY0(op, ...) \
|
||||
X86_OP_ENTRY3(op, None, None, None, None, None, None, ## __VA_ARGS__)
|
||||
|
||||
@ -1096,7 +1109,114 @@ static void decode_0F(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint
|
||||
}
|
||||
|
||||
static const X86OpEntry opcodes_root[256] = {
|
||||
[0x00] = X86_OP_ENTRY2(ADD, E,b, G,b, lock),
|
||||
[0x01] = X86_OP_ENTRY2(ADD, E,v, G,v, lock),
|
||||
[0x02] = X86_OP_ENTRY2(ADD, G,b, E,b, lock),
|
||||
[0x03] = X86_OP_ENTRY2(ADD, G,v, E,v, lock),
|
||||
[0x04] = X86_OP_ENTRY2(ADD, 0,b, I,b, lock), /* AL, Ib */
|
||||
[0x05] = X86_OP_ENTRY2(ADD, 0,v, I,z, lock), /* rAX, Iz */
|
||||
[0x06] = X86_OP_ENTRYr(PUSH, ES, w, chk(i64)),
|
||||
[0x07] = X86_OP_ENTRYw(POP, ES, w, chk(i64)),
|
||||
|
||||
[0x10] = X86_OP_ENTRY2(ADC, E,b, G,b, lock),
|
||||
[0x11] = X86_OP_ENTRY2(ADC, E,v, G,v, lock),
|
||||
[0x12] = X86_OP_ENTRY2(ADC, G,b, E,b, lock),
|
||||
[0x13] = X86_OP_ENTRY2(ADC, G,v, E,v, lock),
|
||||
[0x14] = X86_OP_ENTRY2(ADC, 0,b, I,b, lock), /* AL, Ib */
|
||||
[0x15] = X86_OP_ENTRY2(ADC, 0,v, I,z, lock), /* rAX, Iz */
|
||||
[0x16] = X86_OP_ENTRYr(PUSH, SS, w, chk(i64)),
|
||||
[0x17] = X86_OP_ENTRYw(POP, SS, w, chk(i64)),
|
||||
|
||||
[0x20] = X86_OP_ENTRY2(AND, E,b, G,b, lock),
|
||||
[0x21] = X86_OP_ENTRY2(AND, E,v, G,v, lock),
|
||||
[0x22] = X86_OP_ENTRY2(AND, G,b, E,b, lock),
|
||||
[0x23] = X86_OP_ENTRY2(AND, G,v, E,v, lock),
|
||||
[0x24] = X86_OP_ENTRY2(AND, 0,b, I,b, lock), /* AL, Ib */
|
||||
[0x25] = X86_OP_ENTRY2(AND, 0,v, I,z, lock), /* rAX, Iz */
|
||||
[0x26] = {},
|
||||
[0x27] = X86_OP_ENTRY0(DAA, chk(i64)),
|
||||
|
||||
[0x30] = X86_OP_ENTRY2(XOR, E,b, G,b, lock),
|
||||
[0x31] = X86_OP_ENTRY2(XOR, E,v, G,v, lock),
|
||||
[0x32] = X86_OP_ENTRY2(XOR, G,b, E,b, lock),
|
||||
[0x33] = X86_OP_ENTRY2(XOR, G,v, E,v, lock),
|
||||
[0x34] = X86_OP_ENTRY2(XOR, 0,b, I,b, lock), /* AL, Ib */
|
||||
[0x35] = X86_OP_ENTRY2(XOR, 0,v, I,z, lock), /* rAX, Iz */
|
||||
[0x36] = {},
|
||||
[0x37] = X86_OP_ENTRY0(AAA, chk(i64)),
|
||||
|
||||
[0x40] = X86_OP_ENTRY1(INC, 0,v, chk(i64)),
|
||||
[0x41] = X86_OP_ENTRY1(INC, 1,v, chk(i64)),
|
||||
[0x42] = X86_OP_ENTRY1(INC, 2,v, chk(i64)),
|
||||
[0x43] = X86_OP_ENTRY1(INC, 3,v, chk(i64)),
|
||||
[0x44] = X86_OP_ENTRY1(INC, 4,v, chk(i64)),
|
||||
[0x45] = X86_OP_ENTRY1(INC, 5,v, chk(i64)),
|
||||
[0x46] = X86_OP_ENTRY1(INC, 6,v, chk(i64)),
|
||||
[0x47] = X86_OP_ENTRY1(INC, 7,v, chk(i64)),
|
||||
|
||||
[0x50] = X86_OP_ENTRYr(PUSH, LoBits,d64),
|
||||
[0x51] = X86_OP_ENTRYr(PUSH, LoBits,d64),
|
||||
[0x52] = X86_OP_ENTRYr(PUSH, LoBits,d64),
|
||||
[0x53] = X86_OP_ENTRYr(PUSH, LoBits,d64),
|
||||
[0x54] = X86_OP_ENTRYr(PUSH, LoBits,d64),
|
||||
[0x55] = X86_OP_ENTRYr(PUSH, LoBits,d64),
|
||||
[0x56] = X86_OP_ENTRYr(PUSH, LoBits,d64),
|
||||
[0x57] = X86_OP_ENTRYr(PUSH, LoBits,d64),
|
||||
|
||||
|
||||
[0x08] = X86_OP_ENTRY2(OR, E,b, G,b, lock),
|
||||
[0x09] = X86_OP_ENTRY2(OR, E,v, G,v, lock),
|
||||
[0x0A] = X86_OP_ENTRY2(OR, G,b, E,b, lock),
|
||||
[0x0B] = X86_OP_ENTRY2(OR, G,v, E,v, lock),
|
||||
[0x0C] = X86_OP_ENTRY2(OR, 0,b, I,b, lock), /* AL, Ib */
|
||||
[0x0D] = X86_OP_ENTRY2(OR, 0,v, I,z, lock), /* rAX, Iz */
|
||||
[0x0E] = X86_OP_ENTRYr(PUSH, CS, w, chk(i64)),
|
||||
[0x0F] = X86_OP_GROUP0(0F),
|
||||
|
||||
[0x18] = X86_OP_ENTRY2(SBB, E,b, G,b, lock),
|
||||
[0x19] = X86_OP_ENTRY2(SBB, E,v, G,v, lock),
|
||||
[0x1A] = X86_OP_ENTRY2(SBB, G,b, E,b, lock),
|
||||
[0x1B] = X86_OP_ENTRY2(SBB, G,v, E,v, lock),
|
||||
[0x1C] = X86_OP_ENTRY2(SBB, 0,b, I,b, lock), /* AL, Ib */
|
||||
[0x1D] = X86_OP_ENTRY2(SBB, 0,v, I,z, lock), /* rAX, Iz */
|
||||
[0x1E] = X86_OP_ENTRYr(PUSH, DS, w, chk(i64)),
|
||||
[0x1F] = X86_OP_ENTRYw(POP, DS, w, chk(i64)),
|
||||
|
||||
[0x28] = X86_OP_ENTRY2(SUB, E,b, G,b, lock),
|
||||
[0x29] = X86_OP_ENTRY2(SUB, E,v, G,v, lock),
|
||||
[0x2A] = X86_OP_ENTRY2(SUB, G,b, E,b, lock),
|
||||
[0x2B] = X86_OP_ENTRY2(SUB, G,v, E,v, lock),
|
||||
[0x2C] = X86_OP_ENTRY2(SUB, 0,b, I,b, lock), /* AL, Ib */
|
||||
[0x2D] = X86_OP_ENTRY2(SUB, 0,v, I,z, lock), /* rAX, Iz */
|
||||
[0x2E] = {},
|
||||
[0x2F] = X86_OP_ENTRY0(DAS, chk(i64)),
|
||||
|
||||
[0x38] = X86_OP_ENTRYrr(SUB, E,b, G,b),
|
||||
[0x39] = X86_OP_ENTRYrr(SUB, E,v, G,v),
|
||||
[0x3A] = X86_OP_ENTRYrr(SUB, G,b, E,b),
|
||||
[0x3B] = X86_OP_ENTRYrr(SUB, G,v, E,v),
|
||||
[0x3C] = X86_OP_ENTRYrr(SUB, 0,b, I,b), /* AL, Ib */
|
||||
[0x3D] = X86_OP_ENTRYrr(SUB, 0,v, I,z), /* rAX, Iz */
|
||||
[0x3E] = {},
|
||||
[0x3F] = X86_OP_ENTRY0(AAS, chk(i64)),
|
||||
|
||||
[0x48] = X86_OP_ENTRY1(DEC, 0,v, chk(i64)),
|
||||
[0x49] = X86_OP_ENTRY1(DEC, 1,v, chk(i64)),
|
||||
[0x4A] = X86_OP_ENTRY1(DEC, 2,v, chk(i64)),
|
||||
[0x4B] = X86_OP_ENTRY1(DEC, 3,v, chk(i64)),
|
||||
[0x4C] = X86_OP_ENTRY1(DEC, 4,v, chk(i64)),
|
||||
[0x4D] = X86_OP_ENTRY1(DEC, 5,v, chk(i64)),
|
||||
[0x4E] = X86_OP_ENTRY1(DEC, 6,v, chk(i64)),
|
||||
[0x4F] = X86_OP_ENTRY1(DEC, 7,v, chk(i64)),
|
||||
|
||||
[0x58] = X86_OP_ENTRYw(POP, LoBits,d64),
|
||||
[0x59] = X86_OP_ENTRYw(POP, LoBits,d64),
|
||||
[0x5A] = X86_OP_ENTRYw(POP, LoBits,d64),
|
||||
[0x5B] = X86_OP_ENTRYw(POP, LoBits,d64),
|
||||
[0x5C] = X86_OP_ENTRYw(POP, LoBits,d64),
|
||||
[0x5D] = X86_OP_ENTRYw(POP, LoBits,d64),
|
||||
[0x5E] = X86_OP_ENTRYw(POP, LoBits,d64),
|
||||
[0x5F] = X86_OP_ENTRYw(POP, LoBits,d64),
|
||||
};
|
||||
|
||||
#undef mmx
|
||||
|
@ -352,6 +352,20 @@ static void prepare_update2_cc(X86DecodedInsn *decode, DisasContext *s, CCOp op)
|
||||
decode->cc_op = op;
|
||||
}
|
||||
|
||||
static void prepare_update_cc_incdec(X86DecodedInsn *decode, DisasContext *s, CCOp op)
|
||||
{
|
||||
gen_compute_eflags_c(s, s->T1);
|
||||
prepare_update2_cc(decode, s, op);
|
||||
}
|
||||
|
||||
static void prepare_update3_cc(X86DecodedInsn *decode, DisasContext *s, CCOp op, TCGv reg)
|
||||
{
|
||||
decode->cc_src2 = reg;
|
||||
decode->cc_src = s->T1;
|
||||
decode->cc_dst = s->T0;
|
||||
decode->cc_op = op;
|
||||
}
|
||||
|
||||
static void gen_store_sse(DisasContext *s, X86DecodedInsn *decode, int src_ofs)
|
||||
{
|
||||
MemOp ot = decode->op[0].ot;
|
||||
@ -1040,6 +1054,37 @@ static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decod
|
||||
VSIB_AVX(VPGATHERD, vpgatherd)
|
||||
VSIB_AVX(VPGATHERQ, vpgatherq)
|
||||
|
||||
static void gen_AAA(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
gen_update_cc_op(s);
|
||||
gen_helper_aaa(tcg_env);
|
||||
set_cc_op(s, CC_OP_EFLAGS);
|
||||
}
|
||||
|
||||
static void gen_AAS(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
gen_update_cc_op(s);
|
||||
gen_helper_aas(tcg_env);
|
||||
set_cc_op(s, CC_OP_EFLAGS);
|
||||
}
|
||||
|
||||
static void gen_ADC(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
MemOp ot = decode->op[1].ot;
|
||||
TCGv c_in = tcg_temp_new();
|
||||
|
||||
gen_compute_eflags_c(s, c_in);
|
||||
if (s->prefix & PREFIX_LOCK) {
|
||||
tcg_gen_add_tl(s->T0, c_in, s->T1);
|
||||
tcg_gen_atomic_add_fetch_tl(s->T0, s->A0, s->T0,
|
||||
s->mem_index, ot | MO_LE);
|
||||
} else {
|
||||
tcg_gen_add_tl(s->T0, s->T0, s->T1);
|
||||
tcg_gen_add_tl(s->T0, s->T0, c_in);
|
||||
}
|
||||
prepare_update3_cc(decode, s, CC_OP_ADCB + ot, c_in);
|
||||
}
|
||||
|
||||
/* ADCX/ADOX do not have memory operands and can use set_cc_op. */
|
||||
static void gen_ADCOX(DisasContext *s, CPUX86State *env, MemOp ot, int cc_op)
|
||||
{
|
||||
@ -1093,11 +1138,37 @@ static void gen_ADCX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
gen_ADCOX(s, env, decode->op[0].ot, CC_OP_ADCX);
|
||||
}
|
||||
|
||||
static void gen_ADD(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
MemOp ot = decode->op[1].ot;
|
||||
|
||||
if (s->prefix & PREFIX_LOCK) {
|
||||
tcg_gen_atomic_add_fetch_tl(s->T0, s->A0, s->T1,
|
||||
s->mem_index, ot | MO_LE);
|
||||
} else {
|
||||
tcg_gen_add_tl(s->T0, s->T0, s->T1);
|
||||
}
|
||||
prepare_update2_cc(decode, s, CC_OP_ADDB + ot);
|
||||
}
|
||||
|
||||
static void gen_ADOX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
gen_ADCOX(s, env, decode->op[0].ot, CC_OP_ADOX);
|
||||
}
|
||||
|
||||
static void gen_AND(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
MemOp ot = decode->op[1].ot;
|
||||
|
||||
if (s->prefix & PREFIX_LOCK) {
|
||||
tcg_gen_atomic_and_fetch_tl(s->T0, s->A0, s->T1,
|
||||
s->mem_index, ot | MO_LE);
|
||||
} else {
|
||||
tcg_gen_and_tl(s->T0, s->T0, s->T1);
|
||||
}
|
||||
prepare_update1_cc(decode, s, CC_OP_LOGICB + ot);
|
||||
}
|
||||
|
||||
static void gen_ANDN(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
MemOp ot = decode->op[0].ot;
|
||||
@ -1331,6 +1402,34 @@ static void gen_CVTTPx2PI(DisasContext *s, CPUX86State *env, X86DecodedInsn *dec
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_DAA(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
gen_update_cc_op(s);
|
||||
gen_helper_daa(tcg_env);
|
||||
set_cc_op(s, CC_OP_EFLAGS);
|
||||
}
|
||||
|
||||
static void gen_DAS(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
gen_update_cc_op(s);
|
||||
gen_helper_das(tcg_env);
|
||||
set_cc_op(s, CC_OP_EFLAGS);
|
||||
}
|
||||
|
||||
static void gen_DEC(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
MemOp ot = decode->op[1].ot;
|
||||
|
||||
tcg_gen_movi_tl(s->T1, -1);
|
||||
if (s->prefix & PREFIX_LOCK) {
|
||||
tcg_gen_atomic_add_fetch_tl(s->T0, s->A0, s->T1,
|
||||
s->mem_index, ot | MO_LE);
|
||||
} else {
|
||||
tcg_gen_add_tl(s->T0, s->T0, s->T1);
|
||||
}
|
||||
prepare_update_cc_incdec(decode, s, CC_OP_DECB + ot);
|
||||
}
|
||||
|
||||
static void gen_EMMS(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
gen_helper_emms(tcg_env);
|
||||
@ -1349,6 +1448,20 @@ static void gen_EXTRQ_r(DisasContext *s, CPUX86State *env, X86DecodedInsn *decod
|
||||
gen_helper_extrq_r(tcg_env, OP_PTR0, OP_PTR2);
|
||||
}
|
||||
|
||||
static void gen_INC(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
MemOp ot = decode->op[1].ot;
|
||||
|
||||
tcg_gen_movi_tl(s->T1, 1);
|
||||
if (s->prefix & PREFIX_LOCK) {
|
||||
tcg_gen_atomic_add_fetch_tl(s->T0, s->A0, s->T1,
|
||||
s->mem_index, ot | MO_LE);
|
||||
} else {
|
||||
tcg_gen_add_tl(s->T0, s->T0, s->T1);
|
||||
}
|
||||
prepare_update_cc_incdec(decode, s, CC_OP_INCB + ot);
|
||||
}
|
||||
|
||||
static void gen_INSERTQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
TCGv_i32 length = tcg_constant_i32(decode->immediate & 63);
|
||||
@ -1501,6 +1614,19 @@ static void gen_MULX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_OR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
MemOp ot = decode->op[1].ot;
|
||||
|
||||
if (s->prefix & PREFIX_LOCK) {
|
||||
tcg_gen_atomic_or_fetch_tl(s->T0, s->A0, s->T1,
|
||||
s->mem_index, ot | MO_LE);
|
||||
} else {
|
||||
tcg_gen_or_tl(s->T0, s->T0, s->T1);
|
||||
}
|
||||
prepare_update1_cc(decode, s, CC_OP_LOGICB + ot);
|
||||
}
|
||||
|
||||
static void gen_PALIGNR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
|
||||
@ -1744,6 +1870,18 @@ static void gen_PMOVMSKB(DisasContext *s, CPUX86State *env, X86DecodedInsn *deco
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_POP(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
MemOp ot = gen_pop_T0(s);
|
||||
if (decode->op[0].has_ea) {
|
||||
/* NOTE: order is important for MMU exceptions */
|
||||
gen_op_st_v(s, ot, s->T0, s->A0);
|
||||
decode->op[0].unit = X86_OP_SKIP;
|
||||
}
|
||||
/* NOTE: writing back registers after update is important for pop %sp */
|
||||
gen_pop_update(s, ot);
|
||||
}
|
||||
|
||||
static void gen_PSHUFW(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
TCGv_i32 imm = tcg_constant8u_i32(decode->immediate);
|
||||
@ -1890,6 +2028,11 @@ static void gen_PSLLDQ_i(DisasContext *s, CPUX86State *env, X86DecodedInsn *deco
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_PUSH(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
gen_push_v(s, s->T1);
|
||||
}
|
||||
|
||||
static void gen_RORX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
MemOp ot = decode->op[0].ot;
|
||||
@ -1924,6 +2067,28 @@ static void gen_SARX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
tcg_gen_sar_tl(s->T0, s->T0, s->T1);
|
||||
}
|
||||
|
||||
static void gen_SBB(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
MemOp ot = decode->op[0].ot;
|
||||
TCGv c_in = tcg_temp_new();
|
||||
|
||||
gen_compute_eflags_c(s, c_in);
|
||||
if (s->prefix & PREFIX_LOCK) {
|
||||
tcg_gen_add_tl(s->T0, s->T1, c_in);
|
||||
tcg_gen_neg_tl(s->T0, s->T0);
|
||||
tcg_gen_atomic_add_fetch_tl(s->T0, s->A0, s->T0,
|
||||
s->mem_index, ot | MO_LE);
|
||||
} else {
|
||||
/*
|
||||
* TODO: SBB reg, reg could use gen_prepare_eflags_c followed by
|
||||
* negsetcond, and CC_OP_SUBB as the cc_op.
|
||||
*/
|
||||
tcg_gen_sub_tl(s->T0, s->T0, s->T1);
|
||||
tcg_gen_sub_tl(s->T0, s->T0, c_in);
|
||||
}
|
||||
prepare_update3_cc(decode, s, CC_OP_SBBB + ot, c_in);
|
||||
}
|
||||
|
||||
static void gen_SHA1NEXTE(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
gen_helper_sha1nexte(OP_PTR0, OP_PTR1, OP_PTR2);
|
||||
@ -2011,6 +2176,22 @@ static void gen_STMXCSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decod
|
||||
tcg_gen_ld32u_tl(s->T0, tcg_env, offsetof(CPUX86State, mxcsr));
|
||||
}
|
||||
|
||||
static void gen_SUB(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
MemOp ot = decode->op[1].ot;
|
||||
|
||||
if (s->prefix & PREFIX_LOCK) {
|
||||
tcg_gen_neg_tl(s->T0, s->T1);
|
||||
tcg_gen_atomic_fetch_add_tl(s->cc_srcT, s->A0, s->T0,
|
||||
s->mem_index, ot | MO_LE);
|
||||
tcg_gen_sub_tl(s->T0, s->cc_srcT, s->T1);
|
||||
} else {
|
||||
tcg_gen_mov_tl(s->cc_srcT, s->T0);
|
||||
tcg_gen_sub_tl(s->T0, s->T0, s->T1);
|
||||
}
|
||||
prepare_update2_cc(decode, s, CC_OP_SUBB + ot);
|
||||
}
|
||||
|
||||
static void gen_VAESIMC(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
assert(!s->vex_l);
|
||||
@ -2490,3 +2671,24 @@ static void gen_VZEROUPPER(DisasContext *s, CPUX86State *env, X86DecodedInsn *de
|
||||
tcg_gen_gvec_dup_imm(MO_64, offset, 16, 16, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_XOR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
|
||||
{
|
||||
/* special case XOR reg, reg */
|
||||
if (decode->op[1].unit == X86_OP_INT &&
|
||||
decode->op[2].unit == X86_OP_INT &&
|
||||
decode->op[1].n == decode->op[2].n) {
|
||||
tcg_gen_movi_tl(s->T0, 0);
|
||||
decode->cc_op = CC_OP_CLR;
|
||||
} else {
|
||||
MemOp ot = decode->op[1].ot;
|
||||
|
||||
if (s->prefix & PREFIX_LOCK) {
|
||||
tcg_gen_atomic_xor_fetch_tl(s->T0, s->A0, s->T1,
|
||||
s->mem_index, ot | MO_LE);
|
||||
} else {
|
||||
tcg_gen_xor_tl(s->T0, s->T0, s->T1);
|
||||
}
|
||||
prepare_update1_cc(decode, s, CC_OP_LOGICB + ot);
|
||||
}
|
||||
}
|
||||
|
@ -3146,7 +3146,7 @@ static bool disas_insn(DisasContext *s, CPUState *cpu)
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
use_new &= b <= limit;
|
||||
#endif
|
||||
if (use_new && 0) {
|
||||
if (use_new && b <= 0x5f) {
|
||||
disas_insn_new(s, cpu, b);
|
||||
return true;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user