tree-ssa-math-opts: Fix up convert_{mult,plusminus}_to_widen [PR113759]

On the following testcase we emit invalid stmt:
error: type mismatch in ‘widen_mult_plus_expr’
    6 | foo (int c, int b)
      | ^~~
unsigned long
int
unsigned int
unsigned long
_31 = WIDEN_MULT_PLUS_EXPR <b_5(D), 2, _30>;

The recent PR113560 r14-8680 changes tweaked convert_mult_to_widen,
but didn't change convert_plusminus_to_widen for the
TREE_TYPE (rhsN) != typeN cases, but looking at this, it was already
before that change quite weird.

Earlier in those functions it determines actual_precision and from_unsignedN
and wants to use that precision and signedness for the operands and
it used build_and_insert_cast for that (which emits a cast stmt, even for
INTEGER_CSTs) and later on for INTEGER_CST arguments fold_converted them
to typeN (which is unclear to me why, because it seems to have assumed
that TREE_TYPE (rhsN) is typeN, for the actual_precision or from_unsignedN
cases it would be wrong except that build_and_insert_cast forced a SSA_NAME
and so it doesn't trigger anymore).
Now, since r14-8680 it is possible that rhsN also has some other type from
typeN and we again want to cast.

The following patch changes this, so that for the differences in
actual_precision and/or from_unsignedN we actually update typeN and then use
it as the type to convert the arguments to if it isn't useless, for
INTEGER_CSTs by just fold_converting, otherwise using build_and_insert_cast.
And uses useless_type_conversion_p test so that we don't convert unless
necessary.  Plus by doing that effectively also doing the important part of
the r14-8680 convert_mult_to_widen changes in convert_plusminus_to_widen.

2024-02-06  Jakub Jelinek  <jakub@redhat.com>

	PR tree-optimization/113759
	* tree-ssa-math-opts.cc (convert_mult_to_widen): If actual_precision
	or from_unsignedN differs from properties of typeN, update typeN
	to build_nonstandard_integer_type.  If TREE_TYPE (rhsN) is not
	uselessly convertible to typeN, convert it using fold_convert or
	build_and_insert_cast depending on if rhsN is INTEGER_CST or not.
	(convert_plusminus_to_widen): Likewise.

	* gcc.c-torture/compile/pr113759.c: New test.
This commit is contained in:
Jakub Jelinek 2024-02-06 12:57:53 +01:00
parent 3cec491196
commit 760a1a5b5e
2 changed files with 54 additions and 32 deletions

View File

@ -0,0 +1,20 @@
/* PR tree-optimization/113759 */
extern short t[];
int
foo (int c, int b)
{
if (b < 0)
__builtin_unreachable ();
if (c <= 0)
__builtin_unreachable ();
int d;
for (; c >= 0; c--)
{
int a = b + c;
d = t[a];
t[a] = 0;
}
return d;
}

View File

@ -2865,25 +2865,25 @@ convert_mult_to_widen (gimple *stmt, gimple_stmt_iterator *gsi)
if (2 * actual_precision > TYPE_PRECISION (type))
return false;
if (actual_precision != TYPE_PRECISION (type1)
|| from_unsigned1 != TYPE_UNSIGNED (type1)
|| (TREE_TYPE (rhs1) != type1
&& TREE_CODE (rhs1) != INTEGER_CST))
rhs1 = build_and_insert_cast (gsi, loc,
build_nonstandard_integer_type
(actual_precision, from_unsigned1), rhs1);
|| from_unsigned1 != TYPE_UNSIGNED (type1))
type1 = build_nonstandard_integer_type (actual_precision, from_unsigned1);
if (!useless_type_conversion_p (type1, TREE_TYPE (rhs1)))
{
if (TREE_CODE (rhs1) == INTEGER_CST)
rhs1 = fold_convert (type1, rhs1);
else
rhs1 = build_and_insert_cast (gsi, loc, type1, rhs1);
}
if (actual_precision != TYPE_PRECISION (type2)
|| from_unsigned2 != TYPE_UNSIGNED (type2)
|| (TREE_TYPE (rhs2) != type2
&& TREE_CODE (rhs2) != INTEGER_CST))
rhs2 = build_and_insert_cast (gsi, loc,
build_nonstandard_integer_type
(actual_precision, from_unsigned2), rhs2);
/* Handle constants. */
if (TREE_CODE (rhs1) == INTEGER_CST)
rhs1 = fold_convert (type1, rhs1);
if (TREE_CODE (rhs2) == INTEGER_CST)
rhs2 = fold_convert (type2, rhs2);
|| from_unsigned2 != TYPE_UNSIGNED (type2))
type2 = build_nonstandard_integer_type (actual_precision, from_unsigned2);
if (!useless_type_conversion_p (type2, TREE_TYPE (rhs2)))
{
if (TREE_CODE (rhs2) == INTEGER_CST)
rhs2 = fold_convert (type2, rhs2);
else
rhs2 = build_and_insert_cast (gsi, loc, type2, rhs2);
}
gimple_assign_set_rhs1 (stmt, rhs1);
gimple_assign_set_rhs2 (stmt, rhs2);
@ -3086,26 +3086,28 @@ convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple *stmt,
actual_precision = GET_MODE_PRECISION (actual_mode);
if (actual_precision != TYPE_PRECISION (type1)
|| from_unsigned1 != TYPE_UNSIGNED (type1))
mult_rhs1 = build_and_insert_cast (gsi, loc,
build_nonstandard_integer_type
(actual_precision, from_unsigned1),
mult_rhs1);
type1 = build_nonstandard_integer_type (actual_precision, from_unsigned1);
if (!useless_type_conversion_p (type1, TREE_TYPE (mult_rhs1)))
{
if (TREE_CODE (mult_rhs1) == INTEGER_CST)
mult_rhs1 = fold_convert (type1, mult_rhs1);
else
mult_rhs1 = build_and_insert_cast (gsi, loc, type1, mult_rhs1);
}
if (actual_precision != TYPE_PRECISION (type2)
|| from_unsigned2 != TYPE_UNSIGNED (type2))
mult_rhs2 = build_and_insert_cast (gsi, loc,
build_nonstandard_integer_type
(actual_precision, from_unsigned2),
mult_rhs2);
type2 = build_nonstandard_integer_type (actual_precision, from_unsigned2);
if (!useless_type_conversion_p (type2, TREE_TYPE (mult_rhs2)))
{
if (TREE_CODE (mult_rhs2) == INTEGER_CST)
mult_rhs2 = fold_convert (type2, mult_rhs2);
else
mult_rhs2 = build_and_insert_cast (gsi, loc, type2, mult_rhs2);
}
if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
add_rhs = build_and_insert_cast (gsi, loc, type, add_rhs);
/* Handle constants. */
if (TREE_CODE (mult_rhs1) == INTEGER_CST)
mult_rhs1 = fold_convert (type1, mult_rhs1);
if (TREE_CODE (mult_rhs2) == INTEGER_CST)
mult_rhs2 = fold_convert (type2, mult_rhs2);
gimple_assign_set_rhs_with_ops (gsi, wmult_code, mult_rhs1, mult_rhs2,
add_rhs);
update_stmt (gsi_stmt (*gsi));