Hi, On Tue, 24 Jan 2012, Richard Guenther wrote:
> > + && !is_gimple_reg (t)) > > + > > Ok with the excessive vertical space removed. Actually the patch as is was regressing some testcases (pr48794.f90, fixed with an tree-eh change in another thread) and pack9.adb, which is fixed with the tree-eh change below. As we emit more clobbers with my gimplify patch we more often run into the situation that certain EH blocks aren't really empty. The easy fix is to try making them empty before checking in cleanup_empty_eh. Compared to the last version I also removed the unrelated changes in the predicate, it was a thinko. This patch as is completed regstrap on x86_64-linux for all,ada,obj-c++ . Still okay? Ciao, Michael. PR tree-optimization/46590 * cfgexpand.c: Revert last change (r183305). * gimplify.c (gimplify_bind_expr): Add clobbers for all non-gimple regs. * tree-eh.c (cleanup_empty_eh): Try to optimize clobbers before checking for emptiness. Index: gimplify.c =================================================================== --- gimplify.c (revision 183524) +++ gimplify.c (working copy) @@ -1231,7 +1231,7 @@ gimplify_bind_expr (tree *expr_p, gimple && !DECL_HAS_VALUE_EXPR_P (t) /* Only care for variables that have to be in memory. Others will be rewritten into SSA names, hence moved to the top-level. */ - && needs_to_live_in_memory (t)) + && !is_gimple_reg (t)) { tree clobber = build_constructor (TREE_TYPE (t), NULL); TREE_THIS_VOLATILE (clobber) = 1; Index: tree-eh.c =================================================================== --- tree-eh.c (revision 183559) +++ tree-eh.c (working copy) @@ -4056,6 +4056,7 @@ cleanup_empty_eh (eh_landing_pad lp) edge_iterator ei; edge e, e_out; bool has_non_eh_pred; + bool ret = false; int new_lp_nr; /* There can be zero or one edges out of BB. This is the quickest test. */ @@ -4070,6 +4071,16 @@ cleanup_empty_eh (eh_landing_pad lp) default: return false; } + + resx = last_stmt (bb); + if (resx && is_gimple_resx (resx)) + { + if (stmt_can_throw_external (resx)) + optimize_clobbers (bb); + else if (sink_clobbers (bb)) + ret = true; + } + gsi = gsi_after_labels (bb); /* Make sure to skip debug statements. */ @@ -4081,9 +4092,9 @@ cleanup_empty_eh (eh_landing_pad lp) { /* For the degenerate case of an infinite loop bail out. */ if (infinite_empty_loop_p (e_out)) - return false; + return ret; - return cleanup_empty_eh_unsplit (bb, e_out, lp); + return ret | cleanup_empty_eh_unsplit (bb, e_out, lp); } /* The block should consist only of a single RESX statement, modulo a @@ -4096,7 +4107,7 @@ cleanup_empty_eh (eh_landing_pad lp) resx = gsi_stmt (gsi); } if (!is_gimple_resx (resx)) - return false; + return ret; gcc_assert (gsi_one_before_end_p (gsi)); /* Determine if there are non-EH edges, or resx edges into the handler. */ @@ -4172,7 +4183,7 @@ cleanup_empty_eh (eh_landing_pad lp) return true; } - return false; + return ret; succeed: if (dump_file && (dump_flags & TDF_DETAILS)) Index: cfgexpand.c =================================================================== --- cfgexpand.c (revision 183524) +++ cfgexpand.c (working copy) @@ -440,12 +440,11 @@ visit_conflict (gimple stmt ATTRIBUTE_UN /* Helper routine for add_scope_conflicts, calculating the active partitions at the end of BB, leaving the result in WORK. We're called to generate - conflicts when OLD_CONFLICTS is non-null, otherwise we're just tracking - liveness. If we generate conflicts then OLD_CONFLICTS stores the bits - for which we generated conflicts already. */ + conflicts when FOR_CONFLICT is true, otherwise we're just tracking + liveness. */ static void -add_scope_conflicts_1 (basic_block bb, bitmap work, bitmap old_conflicts) +add_scope_conflicts_1 (basic_block bb, bitmap work, bool for_conflict) { edge e; edge_iterator ei; @@ -482,7 +481,7 @@ add_scope_conflicts_1 (basic_block bb, b } else if (!is_gimple_debug (stmt)) { - if (old_conflicts + if (for_conflict && visit == visit_op) { /* If this is the first real instruction in this BB we need @@ -490,27 +489,16 @@ add_scope_conflicts_1 (basic_block bb, b Unlike classical liveness for named objects we can't rely on seeing a def/use of the names we're interested in. There might merely be indirect loads/stores. We'd not add any - conflicts for such partitions. We know that we generated - conflicts between all partitions in old_conflicts already, - so we need to generate only the new ones, avoiding to - repeatedly pay the O(N^2) cost for each basic block. */ + conflicts for such partitions. */ bitmap_iterator bi; unsigned i; - - EXECUTE_IF_AND_COMPL_IN_BITMAP (work, old_conflicts, 0, i, bi) + EXECUTE_IF_SET_IN_BITMAP (work, 0, i, bi) { unsigned j; bitmap_iterator bj; - /* First the conflicts between new and old_conflicts. */ - EXECUTE_IF_SET_IN_BITMAP (old_conflicts, 0, j, bj) - add_stack_var_conflict (i, j); - /* Then the conflicts between only the new members. */ - EXECUTE_IF_AND_COMPL_IN_BITMAP (work, old_conflicts, i + 1, - j, bj) + EXECUTE_IF_SET_IN_BITMAP (work, i + 1, j, bj) add_stack_var_conflict (i, j); } - /* And remember for the next basic block. */ - bitmap_ior_into (old_conflicts, work); visit = visit_conflict; } walk_stmt_load_store_addr_ops (stmt, work, visit, visit, visit); @@ -527,7 +515,6 @@ add_scope_conflicts (void) basic_block bb; bool changed; bitmap work = BITMAP_ALLOC (NULL); - bitmap old_conflicts; /* We approximate the live range of a stack variable by taking the first mention of its name as starting point(s), and by the end-of-scope @@ -549,18 +536,15 @@ add_scope_conflicts (void) FOR_EACH_BB (bb) { bitmap active = (bitmap)bb->aux; - add_scope_conflicts_1 (bb, work, NULL); + add_scope_conflicts_1 (bb, work, false); if (bitmap_ior_into (active, work)) changed = true; } } - old_conflicts = BITMAP_ALLOC (NULL); - FOR_EACH_BB (bb) - add_scope_conflicts_1 (bb, work, old_conflicts); + add_scope_conflicts_1 (bb, work, true); - BITMAP_FREE (old_conflicts); BITMAP_FREE (work); FOR_ALL_BB (bb) BITMAP_FREE (bb->aux);