mirror of
https://github.com/qemu/qemu.git
synced 2024-12-12 13:13:51 +08:00
tcg/optimize: Split out fold_to_not
Split out the conditional conversion from a more complex logical operation to a simple NOT. Create a couple more helpers to make this easy for the outer-most logical operations. Reviewed-by: Luis Pires <luis.pires@eldorado.org.br> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
67f84c9621
commit
0e0a32bacb
158
tcg/optimize.c
158
tcg/optimize.c
@ -694,6 +694,52 @@ static bool fold_const2(OptContext *ctx, TCGOp *op)
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert @op to NOT, if NOT is supported by the host.
|
||||
* Return true f the conversion is successful, which will still
|
||||
* indicate that the processing is complete.
|
||||
*/
|
||||
static bool fold_not(OptContext *ctx, TCGOp *op);
|
||||
static bool fold_to_not(OptContext *ctx, TCGOp *op, int idx)
|
||||
{
|
||||
TCGOpcode not_op;
|
||||
bool have_not;
|
||||
|
||||
switch (ctx->type) {
|
||||
case TCG_TYPE_I32:
|
||||
not_op = INDEX_op_not_i32;
|
||||
have_not = TCG_TARGET_HAS_not_i32;
|
||||
break;
|
||||
case TCG_TYPE_I64:
|
||||
not_op = INDEX_op_not_i64;
|
||||
have_not = TCG_TARGET_HAS_not_i64;
|
||||
break;
|
||||
case TCG_TYPE_V64:
|
||||
case TCG_TYPE_V128:
|
||||
case TCG_TYPE_V256:
|
||||
not_op = INDEX_op_not_vec;
|
||||
have_not = TCG_TARGET_HAS_not_vec;
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
if (have_not) {
|
||||
op->opc = not_op;
|
||||
op->args[1] = op->args[idx];
|
||||
return fold_not(ctx, op);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* If the binary operation has first argument @i, fold to NOT. */
|
||||
static bool fold_ix_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
|
||||
{
|
||||
if (arg_is_const(op->args[1]) && arg_info(op->args[1])->val == i) {
|
||||
return fold_to_not(ctx, op, 2);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* If the binary operation has second argument @i, fold to @i. */
|
||||
static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
|
||||
{
|
||||
@ -703,6 +749,15 @@ static bool fold_xi_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
|
||||
return false;
|
||||
}
|
||||
|
||||
/* If the binary operation has second argument @i, fold to NOT. */
|
||||
static bool fold_xi_to_not(OptContext *ctx, TCGOp *op, uint64_t i)
|
||||
{
|
||||
if (arg_is_const(op->args[2]) && arg_info(op->args[2])->val == i) {
|
||||
return fold_to_not(ctx, op, 1);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/* If the binary operation has both arguments equal, fold to @i. */
|
||||
static bool fold_xx_to_i(OptContext *ctx, TCGOp *op, uint64_t i)
|
||||
{
|
||||
@ -781,7 +836,8 @@ static bool fold_and(OptContext *ctx, TCGOp *op)
|
||||
static bool fold_andc(OptContext *ctx, TCGOp *op)
|
||||
{
|
||||
if (fold_const2(ctx, op) ||
|
||||
fold_xx_to_i(ctx, op, 0)) {
|
||||
fold_xx_to_i(ctx, op, 0) ||
|
||||
fold_ix_to_not(ctx, op, -1)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -987,7 +1043,11 @@ static bool fold_dup2(OptContext *ctx, TCGOp *op)
|
||||
|
||||
static bool fold_eqv(OptContext *ctx, TCGOp *op)
|
||||
{
|
||||
return fold_const2(ctx, op);
|
||||
if (fold_const2(ctx, op) ||
|
||||
fold_xi_to_not(ctx, op, 0)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool fold_extract(OptContext *ctx, TCGOp *op)
|
||||
@ -1134,7 +1194,11 @@ static bool fold_mulu2_i32(OptContext *ctx, TCGOp *op)
|
||||
|
||||
static bool fold_nand(OptContext *ctx, TCGOp *op)
|
||||
{
|
||||
return fold_const2(ctx, op);
|
||||
if (fold_const2(ctx, op) ||
|
||||
fold_xi_to_not(ctx, op, -1)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool fold_neg(OptContext *ctx, TCGOp *op)
|
||||
@ -1144,12 +1208,22 @@ static bool fold_neg(OptContext *ctx, TCGOp *op)
|
||||
|
||||
static bool fold_nor(OptContext *ctx, TCGOp *op)
|
||||
{
|
||||
return fold_const2(ctx, op);
|
||||
if (fold_const2(ctx, op) ||
|
||||
fold_xi_to_not(ctx, op, 0)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool fold_not(OptContext *ctx, TCGOp *op)
|
||||
{
|
||||
return fold_const1(ctx, op);
|
||||
if (fold_const1(ctx, op)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Because of fold_to_not, we want to always return true, via finish. */
|
||||
finish_folding(ctx, op);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool fold_or(OptContext *ctx, TCGOp *op)
|
||||
@ -1163,7 +1237,11 @@ static bool fold_or(OptContext *ctx, TCGOp *op)
|
||||
|
||||
static bool fold_orc(OptContext *ctx, TCGOp *op)
|
||||
{
|
||||
return fold_const2(ctx, op);
|
||||
if (fold_const2(ctx, op) ||
|
||||
fold_ix_to_not(ctx, op, 0)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool fold_qemu_ld(OptContext *ctx, TCGOp *op)
|
||||
@ -1299,7 +1377,8 @@ static bool fold_sub2_i32(OptContext *ctx, TCGOp *op)
|
||||
static bool fold_xor(OptContext *ctx, TCGOp *op)
|
||||
{
|
||||
if (fold_const2(ctx, op) ||
|
||||
fold_xx_to_i(ctx, op, 0)) {
|
||||
fold_xx_to_i(ctx, op, 0) ||
|
||||
fold_xi_to_not(ctx, op, -1)) {
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -1458,71 +1537,6 @@ void tcg_optimize(TCGContext *s)
|
||||
}
|
||||
}
|
||||
break;
|
||||
CASE_OP_32_64_VEC(xor):
|
||||
CASE_OP_32_64(nand):
|
||||
if (!arg_is_const(op->args[1])
|
||||
&& arg_is_const(op->args[2])
|
||||
&& arg_info(op->args[2])->val == -1) {
|
||||
i = 1;
|
||||
goto try_not;
|
||||
}
|
||||
break;
|
||||
CASE_OP_32_64(nor):
|
||||
if (!arg_is_const(op->args[1])
|
||||
&& arg_is_const(op->args[2])
|
||||
&& arg_info(op->args[2])->val == 0) {
|
||||
i = 1;
|
||||
goto try_not;
|
||||
}
|
||||
break;
|
||||
CASE_OP_32_64_VEC(andc):
|
||||
if (!arg_is_const(op->args[2])
|
||||
&& arg_is_const(op->args[1])
|
||||
&& arg_info(op->args[1])->val == -1) {
|
||||
i = 2;
|
||||
goto try_not;
|
||||
}
|
||||
break;
|
||||
CASE_OP_32_64_VEC(orc):
|
||||
CASE_OP_32_64(eqv):
|
||||
if (!arg_is_const(op->args[2])
|
||||
&& arg_is_const(op->args[1])
|
||||
&& arg_info(op->args[1])->val == 0) {
|
||||
i = 2;
|
||||
goto try_not;
|
||||
}
|
||||
break;
|
||||
try_not:
|
||||
{
|
||||
TCGOpcode not_op;
|
||||
bool have_not;
|
||||
|
||||
switch (ctx.type) {
|
||||
case TCG_TYPE_I32:
|
||||
not_op = INDEX_op_not_i32;
|
||||
have_not = TCG_TARGET_HAS_not_i32;
|
||||
break;
|
||||
case TCG_TYPE_I64:
|
||||
not_op = INDEX_op_not_i64;
|
||||
have_not = TCG_TARGET_HAS_not_i64;
|
||||
break;
|
||||
case TCG_TYPE_V64:
|
||||
case TCG_TYPE_V128:
|
||||
case TCG_TYPE_V256:
|
||||
not_op = INDEX_op_not_vec;
|
||||
have_not = TCG_TARGET_HAS_not_vec;
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
if (!have_not) {
|
||||
break;
|
||||
}
|
||||
op->opc = not_op;
|
||||
reset_temp(op->args[0]);
|
||||
op->args[1] = op->args[i];
|
||||
continue;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user