The original patch has some flaw. The new patch is attached. Bootstrapped and passed regression tests.
Thanks, Dehao On Mon, Jun 24, 2013 at 9:44 AM, Dehao Chen <de...@google.com> wrote: > Hi, Richard, > > I've updated the patch (as attached) to use sreal to compute badness. > > Thanks, > Dehao > > On Mon, Jun 24, 2013 at 5:41 AM, Richard Biener > <richard.guent...@gmail.com> wrote: >> On Sat, Jun 22, 2013 at 2:59 AM, Dehao Chen <de...@google.com> wrote: >>> This patch prevents integer-underflow of badness computation in ipa-inline. >>> >>> Bootstrapped and passed regression tests. >>> >>> OK for trunk? >>> >>> Thanks, >>> Dehao >>> >>> gcc/ChangeLog: >>> 2013-06-21 Dehao Chen <de...@google.com> >>> >>> * ipa-inline.c (edge_badness): Fix integer underflow. >>> >>> Index: gcc/ipa-inline.c >>> =================================================================== >>> --- gcc/ipa-inline.c (revision 200326) >>> +++ gcc/ipa-inline.c (working copy) >>> @@ -888,11 +888,9 @@ edge_badness (struct cgraph_edge *edge, bool dump) >>> else if (max_count) >>> { >>> int relbenefit = relative_time_benefit (callee_info, edge, >>> edge_time); >>> - badness = >>> - ((int) >>> - ((double) edge->count * INT_MIN / 2 / max_count / >>> RELATIVE_TIME_BENEFIT_RANGE) * >>> - relbenefit) / growth; >>> - >>> + badness = ((int)((double) edge->count / max_count >>> + * relbenefit / RELATIVE_TIME_BENEFIT_RANGE * INT_MIN / 2)) / growth; >>> + >> >> FP operations on the host are frowned upon if code generation depends >> on their outcome. They all should use sreal_* as predict already does. >> Other than that I wonder why the final division is int (so we get truncation >> and not rounding)? That increases the effect of doing the multiply by >> relbenefit in double. >> >> Richard. >> >>> /* Be sure that insanity of the profile won't lead to increasing >>> counts >>> in the scalling and thus to overflow in the computation above. */ >>> gcc_assert (max_count >= edge->count);
Index: gcc/ChangeLog =================================================================== --- gcc/ChangeLog (revision 200375) +++ gcc/ChangeLog (working copy) @@ -1,3 +1,7 @@ +2013-06-21 Dehao Chen <de...@google.com> + + * ipa-inline.c (edge_badness): Fix integer underflow. + 2013-06-24 Martin Jambor <mjam...@suse.cz> PR tree-optimization/57358 Index: gcc/Makefile.in =================================================================== --- gcc/Makefile.in (revision 200375) +++ gcc/Makefile.in (working copy) @@ -2947,7 +2947,7 @@ ipa-inline.o : ipa-inline.c $(CONFIG_H) $(SYSTEM_H $(DIAGNOSTIC_H) $(FIBHEAP_H) $(PARAMS_H) $(TREE_PASS_H) \ $(COVERAGE_H) $(GGC_H) $(TREE_FLOW_H) $(RTL_H) $(IPA_PROP_H) \ $(EXCEPT_H) $(GIMPLE_PRETTY_PRINT_H) $(IPA_INLINE_H) $(TARGET_H) \ - $(IPA_UTILS_H) + $(IPA_UTILS_H) sreal.h ipa-inline-analysis.o : ipa-inline-analysis.c $(CONFIG_H) $(SYSTEM_H) coretypes.h $(TM_H) \ $(TREE_H) langhooks.h $(TREE_INLINE_H) $(FLAGS_H) $(CGRAPH_H) intl.h \ $(DIAGNOSTIC_H) $(PARAMS_H) $(TREE_PASS_H) $(CFGLOOP_H) \ Index: gcc/ipa-inline.c =================================================================== --- gcc/ipa-inline.c (revision 200375) +++ gcc/ipa-inline.c (working copy) @@ -113,10 +113,12 @@ along with GCC; see the file COPYING3. If not see #include "target.h" #include "ipa-inline.h" #include "ipa-utils.h" +#include "sreal.h" /* Statistics we collect about inlining algorithm. */ static int overall_size; static gcov_type max_count; +static sreal max_count_real, max_relbenefit_real, half_int_min_real; /* Return false when inlining edge E would lead to violating limits on function unit growth or stack usage growth. @@ -887,12 +889,26 @@ edge_badness (struct cgraph_edge *edge, bool dump) else if (max_count) { + sreal tmp, relbenefit_real, growth_real; int relbenefit = relative_time_benefit (callee_info, edge, edge_time); - badness = - ((int) - ((double) edge->count * INT_MIN / 2 / max_count / RELATIVE_TIME_BENEFIT_RANGE) * - relbenefit) / growth; - + + sreal_init(&relbenefit_real, relbenefit, 0); + sreal_init(&growth_real, growth, 0); + + /* relative_edge_count. */ + sreal_init (&tmp, edge->count, 0); + sreal_div (&tmp, &tmp, &max_count_real); + + /* relative_time_benefit. */ + sreal_mul (&tmp, &tmp, &relbenefit_real); + sreal_div (&tmp, &tmp, &max_relbenefit_real); + + /* growth_f_caller. */ + sreal_mul (&tmp, &tmp, &half_int_min_real); + sreal_div (&tmp, &tmp, &growth_real); + + badness = -1 * sreal_to_int (&tmp); + /* Be sure that insanity of the profile won't lead to increasing counts in the scalling and thus to overflow in the computation above. */ gcc_assert (max_count >= edge->count); @@ -1448,6 +1464,9 @@ inline_small_functions (void) if (max_count < edge->count) max_count = edge->count; } + sreal_init (&max_count_real, max_count, 0); + sreal_init (&max_relbenefit_real, RELATIVE_TIME_BENEFIT_RANGE, 0); + sreal_init (&half_int_min_real, INT_MAX / 2, 0); ipa_free_postorder_info (); initialize_growth_caches ();