Fix handling of __builtin_expect_with_probability and improve first-match heuristics

While looking into the std::vector _M_realloc_insert codegen I noticed that
call of __throw_bad_alloc is predicted with 10% probability. This is because
the conditional guarding it has __builtin_expect (cond, 0) on it.  This
incorrectly takes precedence over more reliable heuristics predicting that call
to cold noreturn is likely not going to happen.

So I reordered the predictors so __builtin_expect_with_probability comes first
after predictors that never makes a mistake (so user can use it to always
specify the outcome by hand).  I also downgraded malloc predictor since I do
think user-defined malloc functions & new operators may behave funny ways and
moved usual __builtin_expect after the noreturn cold predictor.

This triggered latent bug in expr_expected_value_1 where

	  if (*predictor < predictor2)
 	    *predictor = predictor2;

should be:

 	  if (predictor2 < *predictor)
 	    *predictor = predictor2;

which eventually triggered an ICE on combining heuristics.  This made me notice
that we can do slightly better while combining expected values in case only
one of the parameters (such as in a*b when we expect a==0) can determine
overall result.

Note that the new code may pick weaker heuristics in case that both values are
predicted.  Not sure if this scenario is worth the extra CPU time: there is
not correct way to combine the probabilities anyway since we do not know if
the predictions are independent, so I think users should not rely on it.

Fixing this issue uncovered another problem.  In 2018 Martin Liska added
code predicting that MALLOC returns non-NULL but instead of that he predicts
that it returns true (boolean 1).  This sort of works for testcase testing
 malloc (10) != NULL
but, for example, we will predict
 malloc (10) == malloc (10)
as true, which is not right and such comparsion may happen in real code

I think proper way is to update expr_expected_value_1 to work with value
ranges, but that needs greater surgery so I decided to postpone this and
only add FIXME and fill PR110499.

gcc/ChangeLog:

	PR middle-end/109849
	* predict.cc (estimate_bb_frequencies): Turn to static function.
	(expr_expected_value_1): Fix handling of binary expressions with
	predicted values.
	* predict.def (PRED_MALLOC_NONNULL): Move later in the priority queue.
	(PRED_BUILTIN_EXPECT_WITH_PROBABILITY): Move to almost top of the priority
	queue.
	* predict.h (estimate_bb_frequencies): No longer declare it.

gcc/testsuite/ChangeLog:

	PR middle-end/109849
	* gcc.dg/predict-18.c: Improve testcase.
This commit is contained in:
Jan Hubicka 2023-06-30 16:27:27 +02:00
parent ef4ea6e087
commit eab57b825b
4 changed files with 63 additions and 22 deletions

View File

@ -89,6 +89,7 @@ static void predict_paths_leading_to_edge (edge, enum br_predictor,
static bool can_predict_insn_p (const rtx_insn *);
static HOST_WIDE_INT get_predictor_value (br_predictor, HOST_WIDE_INT);
static void determine_unlikely_bbs ();
static void estimate_bb_frequencies (bool force);
/* Information we hold about each branch predictor.
Filled using information from predict.def. */
@ -2485,7 +2486,11 @@ expr_expected_value_1 (tree type, tree op0, enum tree_code code,
{
if (predictor)
*predictor = PRED_MALLOC_NONNULL;
return boolean_true_node;
/* FIXME: This is wrong and we need to convert the logic
to value ranges. This makes predictor to assume that
malloc always returns (size_t)1 which is not the same
as returning non-NULL. */
return fold_convert (type, boolean_true_node);
}
if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL)
@ -2563,7 +2568,9 @@ expr_expected_value_1 (tree type, tree op0, enum tree_code code,
case BUILT_IN_REALLOC:
if (predictor)
*predictor = PRED_MALLOC_NONNULL;
return boolean_true_node;
/* FIXME: This is wrong and we need to convert the logic
to value ranges. */
return fold_convert (type, boolean_true_node);
default:
break;
}
@ -2575,18 +2582,43 @@ expr_expected_value_1 (tree type, tree op0, enum tree_code code,
if (get_gimple_rhs_class (code) == GIMPLE_BINARY_RHS)
{
tree res;
tree nop0 = op0;
tree nop1 = op1;
if (TREE_CODE (op0) != INTEGER_CST)
{
/* See if expected value of op0 is good enough to determine the result. */
nop0 = expr_expected_value (op0, visited, predictor, probability);
if (nop0
&& (res = fold_build2 (code, type, nop0, op1)) != NULL
&& TREE_CODE (res) == INTEGER_CST)
return res;
if (!nop0)
nop0 = op0;
}
enum br_predictor predictor2;
HOST_WIDE_INT probability2;
op0 = expr_expected_value (op0, visited, predictor, probability);
if (!op0)
if (TREE_CODE (op1) != INTEGER_CST)
{
/* See if expected value of op1 is good enough to determine the result. */
nop1 = expr_expected_value (op1, visited, &predictor2, &probability2);
if (nop1
&& (res = fold_build2 (code, type, op0, nop1)) != NULL
&& TREE_CODE (res) == INTEGER_CST)
{
*predictor = predictor2;
*probability = probability2;
return res;
}
if (!nop1)
nop1 = op1;
}
if (nop0 == op0 || nop1 == op1)
return NULL;
op1 = expr_expected_value (op1, visited, &predictor2, &probability2);
if (!op1)
return NULL;
res = fold_build2 (code, type, op0, op1);
/* Finally see if we have two known values. */
res = fold_build2 (code, type, nop0, nop1);
if (TREE_CODE (res) == INTEGER_CST
&& TREE_CODE (op0) == INTEGER_CST
&& TREE_CODE (op1) == INTEGER_CST)
&& TREE_CODE (nop0) == INTEGER_CST
&& TREE_CODE (nop1) == INTEGER_CST)
{
/* Combine binary predictions. */
if (*probability != -1 || probability2 != -1)
@ -2596,7 +2628,7 @@ expr_expected_value_1 (tree type, tree op0, enum tree_code code,
*probability = RDIV (p1 * p2, REG_BR_PROB_BASE);
}
if (*predictor < predictor2)
if (predictor2 < *predictor)
*predictor = predictor2;
return res;
@ -3894,7 +3926,7 @@ determine_unlikely_bbs ()
probabilities. If FORCE is true, the frequencies are used to estimate
the counts even when there are already non-zero profile counts. */
void
static void
estimate_bb_frequencies (bool force)
{
basic_block bb;

View File

@ -51,16 +51,17 @@ DEF_PREDICTOR (PRED_NO_PREDICTION, "no prediction", PROB_ALWAYS, 0)
DEF_PREDICTOR (PRED_UNCONDITIONAL, "unconditional jump", PROB_ALWAYS,
PRED_FLAG_FIRST_MATCH)
/* Return value of malloc function is almost always non-null. */
DEF_PREDICTOR (PRED_MALLOC_NONNULL, "malloc returned non-NULL", \
PROB_VERY_LIKELY, PRED_FLAG_FIRST_MATCH)
/* Use number of loop iterations determined by # of iterations
analysis to set probability. We don't want to use Dempster-Shaffer
theory here, as the predictions is exact. */
DEF_PREDICTOR (PRED_LOOP_ITERATIONS, "loop iterations", PROB_UNINITIALIZED,
PRED_FLAG_FIRST_MATCH)
/* Hints provided by user via __builtin_expect_with_probability. */
DEF_PREDICTOR (PRED_BUILTIN_EXPECT_WITH_PROBABILITY,
"__builtin_expect_with_probability", PROB_UNINITIALIZED,
PRED_FLAG_FIRST_MATCH)
/* Assume that any given atomic operation has low contention,
and thus the compare-and-swap operation succeeds. */
DEF_PREDICTOR (PRED_COMPARE_AND_SWAP, "compare and swap", PROB_VERY_LIKELY,
@ -73,11 +74,6 @@ DEF_PREDICTOR (PRED_COMPARE_AND_SWAP, "compare and swap", PROB_VERY_LIKELY,
DEF_PREDICTOR (PRED_BUILTIN_EXPECT, "__builtin_expect", PROB_VERY_LIKELY,
PRED_FLAG_FIRST_MATCH)
/* Hints provided by user via __builtin_expect_with_probability. */
DEF_PREDICTOR (PRED_BUILTIN_EXPECT_WITH_PROBABILITY,
"__builtin_expect_with_probability", PROB_UNINITIALIZED,
PRED_FLAG_FIRST_MATCH)
/* Branches to hot labels are likely. */
DEF_PREDICTOR (PRED_HOT_LABEL, "hot label", HITRATE (90),
PRED_FLAG_FIRST_MATCH)
@ -86,6 +82,10 @@ DEF_PREDICTOR (PRED_HOT_LABEL, "hot label", HITRATE (90),
DEF_PREDICTOR (PRED_COLD_LABEL, "cold label", HITRATE (90),
PRED_FLAG_FIRST_MATCH)
/* Return value of malloc function is almost always non-null. */
DEF_PREDICTOR (PRED_MALLOC_NONNULL, "malloc returned non-NULL", \
PROB_VERY_LIKELY, PRED_FLAG_FIRST_MATCH)
/* Use number of loop iterations guessed by the contents of the loop. */
DEF_PREDICTOR (PRED_LOOP_ITERATIONS_GUESSED, "guessed loop iterations",
PROB_UNINITIALIZED, PRED_FLAG_FIRST_MATCH)

View File

@ -93,7 +93,6 @@ extern void tree_estimate_probability (bool);
extern void handle_missing_profiles (void);
extern bool update_max_bb_count (void);
extern bool expensive_function_p (int);
extern void estimate_bb_frequencies (bool);
extern void compute_function_frequency (void);
extern tree build_predict_expr (enum br_predictor, enum prediction);
extern const char *predictor_name (enum br_predictor);

View File

@ -8,6 +8,8 @@ int x;
short v = 0;
short expected = 0;
short max = ~0;
short m = 0;
short n = 0;
#define STRONG 0
void foo (int a, int b)
@ -23,9 +25,17 @@ void foo (int a, int b)
if (__builtin_expect_with_probability (a < 10, 1, 0.9f) > __builtin_expect_with_probability (b, 0, 0.8f))
global++;
if (a * __builtin_expect_with_probability (m, 0, 0.6f) > 0)
global++;
if (__builtin_expect_with_probability (n, 0, 0.65f) * a > 0)
global++;
}
/* { dg-final { scan-tree-dump "__builtin_expect_with_probability heuristics of edge .*->.*: 54.00%" "profile_estimate"} } */
/* { dg-final { scan-tree-dump "__builtin_expect_with_probability heuristics of edge .*->.*: 77.70%" "profile_estimate"} } */
/* { dg-final { scan-tree-dump "__builtin_expect_with_probability heuristics of edge .*->.*: 98.96%" "profile_estimate"} } */
/* { dg-final { scan-tree-dump "__builtin_expect_with_probability heuristics of edge .*->.*: 71.99%" "profile_estimate"} } */
/* { dg-final { scan-tree-dump "__builtin_expect_with_probability heuristics of edge .*->.*: 40.00%" "profile_estimate"} } */
/* { dg-final { scan-tree-dump "__builtin_expect_with_probability heuristics of edge .*->.*: 35.01%" "profile_estimate"} } */