diff --git a/gcc/generic-match-head.c b/gcc/generic-match-head.c index 2454baac9d44..fdb528d96863 100644 --- a/gcc/generic-match-head.c +++ b/gcc/generic-match-head.c @@ -80,6 +80,16 @@ canonicalize_math_after_vectorization_p () return false; } +/* Return true if we can still perform transformations that may introduce + vector operations that are not supported by the target. Vector lowering + normally handles those, but after that pass, it becomes unsafe. */ + +static inline bool +optimize_vectors_before_lowering_p () +{ + return true; +} + /* Return true if successive divisions can be optimized. Defer to GIMPLE opts. */ diff --git a/gcc/gimple-match-head.c b/gcc/gimple-match-head.c index 9b3e7298d87c..4a65be703b92 100644 --- a/gcc/gimple-match-head.c +++ b/gcc/gimple-match-head.c @@ -1158,6 +1158,16 @@ canonicalize_math_after_vectorization_p () return !cfun || (cfun->curr_properties & PROP_gimple_lvec) != 0; } +/* Return true if we can still perform transformations that may introduce + vector operations that are not supported by the target. Vector lowering + normally handles those, but after that pass, it becomes unsafe. */ + +static inline bool +optimize_vectors_before_lowering_p () +{ + return !cfun || (cfun->curr_properties & PROP_gimple_lvec) == 0; +} + /* Return true if pow(cst, x) should be optimized into exp(log(cst) * x). As a workaround for SPEC CPU2017 628.pop2_s, don't do it if arg0 is an exact integer, arg1 = phi_res +/- cst1 and phi_res = PHI diff --git a/gcc/match.pd b/gcc/match.pd index d8e3927d3c75..7e5c5a6eae61 100644 --- a/gcc/match.pd +++ b/gcc/match.pd @@ -3461,40 +3461,42 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) (vec_cond @0 (op! @3 @1) (op! @3 @2)))) #endif -/* (v ? w : 0) ? a : b is just (v & w) ? a : b */ +/* (v ? w : 0) ? a : b is just (v & w) ? a : b + Currently disabled after pass lvec because ARM understands + VEC_COND_EXPR but not a plain v==w fed to BIT_IOR_EXPR. */ (simplify (vec_cond (vec_cond:s @0 @3 integer_zerop) @1 @2) - (if (types_match (@0, @3)) + (if (optimize_vectors_before_lowering_p () && types_match (@0, @3)) (vec_cond (bit_and @0 @3) @1 @2))) (simplify (vec_cond (vec_cond:s @0 integer_all_onesp @3) @1 @2) - (if (types_match (@0, @3)) + (if (optimize_vectors_before_lowering_p () && types_match (@0, @3)) (vec_cond (bit_ior @0 @3) @1 @2))) (simplify (vec_cond (vec_cond:s @0 integer_zerop @3) @1 @2) - (if (types_match (@0, @3)) + (if (optimize_vectors_before_lowering_p () && types_match (@0, @3)) (vec_cond (bit_ior @0 (bit_not @3)) @2 @1))) (simplify (vec_cond (vec_cond:s @0 @3 integer_all_onesp) @1 @2) - (if (types_match (@0, @3)) + (if (optimize_vectors_before_lowering_p () && types_match (@0, @3)) (vec_cond (bit_and @0 (bit_not @3)) @2 @1))) /* c1 ? c2 ? a : b : b --> (c1 & c2) ? a : b */ (simplify (vec_cond @0 (vec_cond:s @1 @2 @3) @3) - (if (types_match (@0, @1)) + (if (optimize_vectors_before_lowering_p () && types_match (@0, @1)) (vec_cond (bit_and @0 @1) @2 @3))) (simplify (vec_cond @0 @2 (vec_cond:s @1 @2 @3)) - (if (types_match (@0, @1)) + (if (optimize_vectors_before_lowering_p () && types_match (@0, @1)) (vec_cond (bit_ior @0 @1) @2 @3))) (simplify (vec_cond @0 (vec_cond:s @1 @2 @3) @2) - (if (types_match (@0, @1)) + (if (optimize_vectors_before_lowering_p () && types_match (@0, @1)) (vec_cond (bit_ior (bit_not @0) @1) @2 @3))) (simplify (vec_cond @0 @3 (vec_cond:s @1 @2 @3)) - (if (types_match (@0, @1)) + (if (optimize_vectors_before_lowering_p () && types_match (@0, @1)) (vec_cond (bit_and (bit_not @0) @1) @2 @3))) /* Simplification moved from fold_cond_expr_with_comparison. It may also