The following fixes three memleaks I discovered when double-checking a
local patch.

Bootstrap / regtest running on x86_64-unknown-linux-gnu.

Richard.

2018-05-04  Richard Biener  <rguent...@suse.de>

        * bb-reorder.c (sanitize_hot_paths): Release hot_bbs_to_check.
        * gimple-ssa-store-merging.c
        (imm_store_chain_info::output_merged_store): Remove redundant create,
        release split_store vector contents on failure.
        * tree-vect-slp.c (vect_schedule_slp_instance): Avoid leaking
        scalar stmt vector on cache hit.

diff --git a/gcc/bb-reorder.c b/gcc/bb-reorder.c
index d2b41606a14..bc08e11a81d 100644
--- a/gcc/bb-reorder.c
+++ b/gcc/bb-reorder.c
@@ -1572,6 +1572,7 @@ sanitize_hot_paths (bool walk_up, unsigned int 
cold_bb_count,
           hot_bbs_to_check.safe_push (reach_bb);
         }
     }
+  hot_bbs_to_check.release ();
 
   return cold_bb_count;
 }
diff --git a/gcc/gimple-ssa-store-merging.c b/gcc/gimple-ssa-store-merging.c
index 6f6538bf37e..2e1a6ef0e55 100644
--- a/gcc/gimple-ssa-store-merging.c
+++ b/gcc/gimple-ssa-store-merging.c
@@ -3343,6 +3343,8 @@ invert_op (split_store *split_store, int idx, tree 
int_type, tree &mask)
 bool
 imm_store_chain_info::output_merged_store (merged_store_group *group)
 {
+  split_store *split_store;
+  unsigned int i;
   unsigned HOST_WIDE_INT start_byte_pos
     = group->bitregion_start / BITS_PER_UNIT;
 
@@ -3351,7 +3353,6 @@ imm_store_chain_info::output_merged_store 
(merged_store_group *group)
     return false;
 
   auto_vec<struct split_store *, 32> split_stores;
-  split_stores.create (0);
   bool allow_unaligned_store
     = !STRICT_ALIGNMENT && PARAM_VALUE (PARAM_STORE_MERGING_ALLOW_UNALIGNED);
   bool allow_unaligned_load = allow_unaligned_store;
@@ -3378,6 +3379,8 @@ imm_store_chain_info::output_merged_store 
(merged_store_group *group)
        fprintf (dump_file, "Exceeded original number of stmts (%u)."
                            "  Not profitable to emit new sequence.\n",
                 orig_num_stmts);
+      FOR_EACH_VEC_ELT (split_stores, i, split_store)
+       delete split_store;
       return false;
     }
   if (total_orig <= total_new)
@@ -3389,6 +3392,8 @@ imm_store_chain_info::output_merged_store 
(merged_store_group *group)
                            " not larger than estimated number of new"
                            " stmts (%u).\n",
                 total_orig, total_new);
+      FOR_EACH_VEC_ELT (split_stores, i, split_store)
+       delete split_store;
       return false;
     }
 
@@ -3453,8 +3458,6 @@ imm_store_chain_info::output_merged_store 
(merged_store_group *group)
     }
 
   gimple *stmt = NULL;
-  split_store *split_store;
-  unsigned int i;
   auto_vec<gimple *, 32> orig_stmts;
   gimple_seq this_seq;
   tree addr = force_gimple_operand_1 (unshare_expr (base_addr), &this_seq,
diff --git a/gcc/tree-vect-slp.c b/gcc/tree-vect-slp.c
index 73aa2271b53..d1703cfca43 100644
--- a/gcc/tree-vect-slp.c
+++ b/gcc/tree-vect-slp.c
@@ -4064,15 +4086,15 @@ vect_schedule_slp_instance (slp_tree node, slp_instance 
instance,
 
   /* See if we have already vectorized the same set of stmts and reuse their
      vectorized stmts.  */
-  slp_tree &leader
-    = bst_map->get_or_insert (SLP_TREE_SCALAR_STMTS (node).copy ());
-  if (leader)
+  if (slp_tree *leader = bst_map->get (SLP_TREE_SCALAR_STMTS (node)))
     {
-      SLP_TREE_VEC_STMTS (node).safe_splice (SLP_TREE_VEC_STMTS (leader));
+      SLP_TREE_VEC_STMTS (node).safe_splice (SLP_TREE_VEC_STMTS (*leader));
+      SLP_TREE_NUMBER_OF_VEC_STMTS (node)
+       = SLP_TREE_NUMBER_OF_VEC_STMTS (*leader);
       return false;
     }
 
-  leader = node;
+  bst_map->put (SLP_TREE_SCALAR_STMTS (node).copy (), node);
   FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
     vect_schedule_slp_instance (child, instance, bst_map);
 

Reply via email to