From ae0d372bc194119013a66c63e4ec371db23779be Mon Sep 17 00:00:00 2001
From: Richard Guo <riguo@pivotal.io>
Date: Tue, 11 Jun 2019 07:48:29 +0000
Subject: [PATCH] Implementing parallel grouping sets.

Parallel aggregation has already been supported in PostgreSQL and it is
implemented by aggregating in two stages. First, each worker performs an
aggregation step, producing a partial result for each group of which
that process is aware. Second, the partial results are transferred to
the leader via the Gather node. Finally, the leader merges the partial
results and produces the final result for each group.

We are implementing parallel grouping sets in the same way. The only
difference is that in the final stage, the leader performs a grouping
sets aggregation, rather than a normal aggregation.
---
 src/backend/optimizer/plan/createplan.c            |   4 +-
 src/backend/optimizer/plan/planner.c               | 129 ++++++++++++----
 src/backend/optimizer/util/pathnode.c              |   2 +
 src/include/nodes/pathnodes.h                      |   1 +
 src/include/optimizer/pathnode.h                   |   1 +
 .../regress/expected/groupingsets_parallel.out     | 172 +++++++++++++++++++++
 src/test/regress/parallel_schedule                 |   1 +
 src/test/regress/serial_schedule                   |   1 +
 src/test/regress/sql/groupingsets_parallel.sql     |  43 ++++++
 9 files changed, 319 insertions(+), 35 deletions(-)
 create mode 100644 src/test/regress/expected/groupingsets_parallel.out
 create mode 100644 src/test/regress/sql/groupingsets_parallel.sql

diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c
index c6b8553..a6dd314 100644
--- a/src/backend/optimizer/plan/createplan.c
+++ b/src/backend/optimizer/plan/createplan.c
@@ -2244,7 +2244,7 @@ create_groupingsets_plan(PlannerInfo *root, GroupingSetsPath *best_path)
 			agg_plan = (Plan *) make_agg(NIL,
 										 NIL,
 										 strat,
-										 AGGSPLIT_SIMPLE,
+										 best_path->aggsplit,
 										 list_length((List *) linitial(rollup->gsets)),
 										 new_grpColIdx,
 										 extract_grouping_ops(rollup->groupClause),
@@ -2282,7 +2282,7 @@ create_groupingsets_plan(PlannerInfo *root, GroupingSetsPath *best_path)
 		plan = make_agg(build_path_tlist(root, &best_path->path),
 						best_path->qual,
 						best_path->aggstrategy,
-						AGGSPLIT_SIMPLE,
+						best_path->aggsplit,
 						numGroupCols,
 						top_grpColIdx,
 						extract_grouping_ops(rollup->groupClause),
diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c
index 36fefd9..b1adda5 100644
--- a/src/backend/optimizer/plan/planner.c
+++ b/src/backend/optimizer/plan/planner.c
@@ -148,7 +148,8 @@ static void standard_qp_callback(PlannerInfo *root, void *extra);
 static double get_number_of_groups(PlannerInfo *root,
 								   double path_rows,
 								   grouping_sets_data *gd,
-								   List *target_list);
+								   List *target_list,
+								   bool is_partial);
 static RelOptInfo *create_grouping_paths(PlannerInfo *root,
 										 RelOptInfo *input_rel,
 										 PathTarget *target,
@@ -176,7 +177,8 @@ static void consider_groupingsets_paths(PlannerInfo *root,
 										bool can_hash,
 										grouping_sets_data *gd,
 										const AggClauseCosts *agg_costs,
-										double dNumGroups);
+										double dNumGroups,
+										AggSplit aggsplit);
 static RelOptInfo *create_window_paths(PlannerInfo *root,
 									   RelOptInfo *input_rel,
 									   PathTarget *input_target,
@@ -3670,6 +3672,7 @@ standard_qp_callback(PlannerInfo *root, void *extra)
  * path_rows: number of output rows from scan/join step
  * gd: grouping sets data including list of grouping sets and their clauses
  * target_list: target list containing group clause references
+ * is_partial: whether the grouping is in partial aggregate
  *
  * If doing grouping sets, we also annotate the gsets data with the estimates
  * for each set and each individual rollup list, with a view to later
@@ -3679,7 +3682,8 @@ static double
 get_number_of_groups(PlannerInfo *root,
 					 double path_rows,
 					 grouping_sets_data *gd,
-					 List *target_list)
+					 List *target_list,
+					 bool is_partial)
 {
 	Query	   *parse = root->parse;
 	double		dNumGroups;
@@ -3688,7 +3692,7 @@ get_number_of_groups(PlannerInfo *root,
 	{
 		List	   *groupExprs;
 
-		if (parse->groupingSets)
+		if (parse->groupingSets && !is_partial)
 		{
 			/* Add up the estimates for each grouping set */
 			ListCell   *lc;
@@ -3751,7 +3755,7 @@ get_number_of_groups(PlannerInfo *root,
 		}
 		else
 		{
-			/* Plain GROUP BY */
+			/* Plain GROUP BY, or grouping is in partial aggregate */
 			groupExprs = get_sortgrouplist_exprs(parse->groupClause,
 												 target_list);
 
@@ -4144,7 +4148,8 @@ create_ordinary_grouping_paths(PlannerInfo *root, RelOptInfo *input_rel,
 	dNumGroups = get_number_of_groups(root,
 									  cheapest_path->rows,
 									  gd,
-									  extra->targetList);
+									  extra->targetList,
+									  false);
 
 	/* Build final grouping paths */
 	add_paths_to_grouping_rel(root, input_rel, grouped_rel,
@@ -4189,7 +4194,8 @@ consider_groupingsets_paths(PlannerInfo *root,
 							bool can_hash,
 							grouping_sets_data *gd,
 							const AggClauseCosts *agg_costs,
-							double dNumGroups)
+							double dNumGroups,
+							AggSplit aggsplit)
 {
 	Query	   *parse = root->parse;
 
@@ -4351,6 +4357,7 @@ consider_groupingsets_paths(PlannerInfo *root,
 										  path,
 										  (List *) parse->havingQual,
 										  strat,
+										  aggsplit,
 										  new_rollups,
 										  agg_costs,
 										  dNumGroups));
@@ -4508,6 +4515,7 @@ consider_groupingsets_paths(PlannerInfo *root,
 											  path,
 											  (List *) parse->havingQual,
 											  AGG_MIXED,
+											  aggsplit,
 											  rollups,
 											  agg_costs,
 											  dNumGroups));
@@ -4524,6 +4532,7 @@ consider_groupingsets_paths(PlannerInfo *root,
 										  path,
 										  (List *) parse->havingQual,
 										  AGG_SORTED,
+										  aggsplit,
 										  gd->rollups,
 										  agg_costs,
 										  dNumGroups));
@@ -5198,7 +5207,15 @@ make_partial_grouping_target(PlannerInfo *root,
 	foreach(lc, grouping_target->exprs)
 	{
 		Expr	   *expr = (Expr *) lfirst(lc);
-		Index		sgref = get_pathtarget_sortgroupref(grouping_target, i);
+		Index		sgref = get_pathtarget_sortgroupref(grouping_target, i++);
+
+		/*
+		 * GroupingFunc does not need to be evaluated in Partial Aggregate,
+		 * since Partial Aggregate will not handle multiple grouping sets at
+		 * once.
+		 */
+		if (IsA(expr, GroupingFunc))
+			continue;
 
 		if (sgref && parse->groupClause &&
 			get_sortgroupref_clause_noerr(sgref, parse->groupClause) != NULL)
@@ -5217,8 +5234,6 @@ make_partial_grouping_target(PlannerInfo *root,
 			 */
 			non_group_cols = lappend(non_group_cols, expr);
 		}
-
-		i++;
 	}
 
 	/*
@@ -6412,7 +6427,7 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
 				{
 					consider_groupingsets_paths(root, grouped_rel,
 												path, true, can_hash,
-												gd, agg_costs, dNumGroups);
+												gd, agg_costs, dNumGroups, AGGSPLIT_SIMPLE);
 				}
 				else if (parse->hasAggs)
 				{
@@ -6479,7 +6494,14 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
 													 -1.0);
 				}
 
-				if (parse->hasAggs)
+				/*
+				 * parallel grouping sets
+				 */
+				if (parse->groupingSets)
+					consider_groupingsets_paths(root, grouped_rel,
+												path, true, can_hash,
+												gd, agg_final_costs, dNumGroups, AGGSPLIT_FINAL_DESERIAL);
+				else if (parse->hasAggs)
 					add_path(grouped_rel, (Path *)
 							 create_agg_path(root,
 											 grouped_rel,
@@ -6514,7 +6536,7 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
 			 */
 			consider_groupingsets_paths(root, grouped_rel,
 										cheapest_path, false, true,
-										gd, agg_costs, dNumGroups);
+										gd, agg_costs, dNumGroups, AGGSPLIT_SIMPLE);
 		}
 		else
 		{
@@ -6562,17 +6584,27 @@ add_paths_to_grouping_rel(PlannerInfo *root, RelOptInfo *input_rel,
 														  dNumGroups);
 
 			if (hashaggtablesize < work_mem * 1024L)
-				add_path(grouped_rel, (Path *)
-						 create_agg_path(root,
-										 grouped_rel,
-										 path,
-										 grouped_rel->reltarget,
-										 AGG_HASHED,
-										 AGGSPLIT_FINAL_DESERIAL,
-										 parse->groupClause,
-										 havingQual,
-										 agg_final_costs,
-										 dNumGroups));
+			{
+				/*
+				 * parallel grouping sets
+				 */
+				if (parse->groupingSets)
+					consider_groupingsets_paths(root, grouped_rel,
+												path, false, true,
+												gd, agg_final_costs, dNumGroups, AGGSPLIT_FINAL_DESERIAL);
+				else
+					add_path(grouped_rel, (Path *)
+							 create_agg_path(root,
+											 grouped_rel,
+											 path,
+											 grouped_rel->reltarget,
+											 AGG_HASHED,
+											 AGGSPLIT_FINAL_DESERIAL,
+											 parse->groupClause,
+											 havingQual,
+											 agg_final_costs,
+											 dNumGroups));
+			}
 		}
 	}
 
@@ -6712,13 +6744,15 @@ create_partial_grouping_paths(PlannerInfo *root,
 			get_number_of_groups(root,
 								 cheapest_total_path->rows,
 								 gd,
-								 extra->targetList);
+								 extra->targetList,
+								 true);
 	if (cheapest_partial_path != NULL)
 		dNumPartialPartialGroups =
 			get_number_of_groups(root,
 								 cheapest_partial_path->rows,
 								 gd,
-								 extra->targetList);
+								 extra->targetList,
+								 true);
 
 	if (can_sort && cheapest_total_path != NULL)
 	{
@@ -6740,11 +6774,28 @@ create_partial_grouping_paths(PlannerInfo *root,
 			{
 				/* Sort the cheapest partial path, if it isn't already */
 				if (!is_sorted)
+				{
+					List *pathkeys;
+
+					/*
+					 * If we are performing Partial Aggregate for grouping
+					 * sets, we need to sort by all the columns in
+					 * parse->groupClause.
+					 */
+					if (parse->groupingSets)
+						pathkeys =
+							make_pathkeys_for_sortclauses(root,
+														  parse->groupClause,
+														  root->processed_tlist);
+					else
+						pathkeys = root->group_pathkeys;
+
 					path = (Path *) create_sort_path(root,
 													 partially_grouped_rel,
 													 path,
-													 root->group_pathkeys,
+													 pathkeys,
 													 -1.0);
+				}
 
 				if (parse->hasAggs)
 					add_path(partially_grouped_rel, (Path *)
@@ -6784,11 +6835,28 @@ create_partial_grouping_paths(PlannerInfo *root,
 			{
 				/* Sort the cheapest partial path, if it isn't already */
 				if (!is_sorted)
+				{
+					List *pathkeys;
+
+					/*
+					 * If we are performing Partial Aggregate for grouping
+					 * sets, we need to sort by all the columns in
+					 * parse->groupClause.
+					 */
+					if (parse->groupingSets)
+						pathkeys =
+							make_pathkeys_for_sortclauses(root,
+														  parse->groupClause,
+														  root->processed_tlist);
+					else
+						pathkeys = root->group_pathkeys;
+
 					path = (Path *) create_sort_path(root,
 													 partially_grouped_rel,
 													 path,
-													 root->group_pathkeys,
+													 pathkeys,
 													 -1.0);
+				}
 
 				if (parse->hasAggs)
 					add_partial_path(partially_grouped_rel, (Path *)
@@ -6958,11 +7026,6 @@ can_partial_agg(PlannerInfo *root, const AggClauseCosts *agg_costs)
 		 */
 		return false;
 	}
-	else if (parse->groupingSets)
-	{
-		/* We don't know how to do grouping sets in parallel. */
-		return false;
-	}
 	else if (agg_costs->hasNonPartial || agg_costs->hasNonSerial)
 	{
 		/* Insufficient support for partial mode. */
diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c
index 0ac7398..6c1b5d9 100644
--- a/src/backend/optimizer/util/pathnode.c
+++ b/src/backend/optimizer/util/pathnode.c
@@ -2990,6 +2990,7 @@ create_groupingsets_path(PlannerInfo *root,
 						 Path *subpath,
 						 List *having_qual,
 						 AggStrategy aggstrategy,
+						 AggSplit aggsplit,
 						 List *rollups,
 						 const AggClauseCosts *agg_costs,
 						 double numGroups)
@@ -3035,6 +3036,7 @@ create_groupingsets_path(PlannerInfo *root,
 		pathnode->path.pathkeys = NIL;
 
 	pathnode->aggstrategy = aggstrategy;
+	pathnode->aggsplit = aggsplit;
 	pathnode->rollups = rollups;
 	pathnode->qual = having_qual;
 
diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h
index e3c579e..6b89a12 100644
--- a/src/include/nodes/pathnodes.h
+++ b/src/include/nodes/pathnodes.h
@@ -1698,6 +1698,7 @@ typedef struct GroupingSetsPath
 	Path		path;
 	Path	   *subpath;		/* path representing input source */
 	AggStrategy aggstrategy;	/* basic strategy */
+	AggSplit	aggsplit;		/* agg-splitting mode, see nodes.h */
 	List	   *rollups;		/* list of RollupData */
 	List	   *qual;			/* quals (HAVING quals), if any */
 } GroupingSetsPath;
diff --git a/src/include/optimizer/pathnode.h b/src/include/optimizer/pathnode.h
index 182ffee..6288da8 100644
--- a/src/include/optimizer/pathnode.h
+++ b/src/include/optimizer/pathnode.h
@@ -215,6 +215,7 @@ extern GroupingSetsPath *create_groupingsets_path(PlannerInfo *root,
 												  Path *subpath,
 												  List *having_qual,
 												  AggStrategy aggstrategy,
+												  AggSplit aggsplit,
 												  List *rollups,
 												  const AggClauseCosts *agg_costs,
 												  double numGroups);
diff --git a/src/test/regress/expected/groupingsets_parallel.out b/src/test/regress/expected/groupingsets_parallel.out
new file mode 100644
index 0000000..b0b143f
--- /dev/null
+++ b/src/test/regress/expected/groupingsets_parallel.out
@@ -0,0 +1,172 @@
+--
+-- parallel grouping sets
+--
+-- test data sources
+create table gstest(c1 int, c2 int, c3 int) with (parallel_workers = 4);
+insert into gstest select 1,10,100 from generate_series(1,10)i;
+insert into gstest select 1,10,200 from generate_series(1,10)i;
+insert into gstest select 1,20,30 from generate_series(1,10)i;
+insert into gstest select 2,30,40 from generate_series(1,10)i;
+insert into gstest select 2,40,50 from generate_series(1,10)i;
+insert into gstest select 3,50,60 from generate_series(1,10)i;
+insert into gstest select 1,NULL,0 from generate_series(1,10)i;
+analyze gstest;
+SET parallel_tuple_cost=0;
+SET parallel_setup_cost=0;
+SET max_parallel_workers_per_gather=4;
+-- test for hashagg
+set enable_hashagg to on;
+explain (costs off, verbose)
+select c1, c2, avg(c3) from gstest group by grouping sets((c1,c2),(c1));
+                      QUERY PLAN                      
+------------------------------------------------------
+ Finalize HashAggregate
+   Output: c1, c2, avg(c3)
+   Hash Key: gstest.c1, gstest.c2
+   Hash Key: gstest.c1
+   ->  Gather
+         Output: c1, c2, (PARTIAL avg(c3))
+         Workers Planned: 4
+         ->  Partial HashAggregate
+               Output: c1, c2, PARTIAL avg(c3)
+               Group Key: gstest.c1, gstest.c2
+               ->  Parallel Seq Scan on public.gstest
+                     Output: c1, c2, c3
+(12 rows)
+
+select c1, c2, avg(c3) from gstest group by grouping sets((c1,c2),(c1)) order by 1,2,3;
+ c1 | c2 |          avg           
+----+----+------------------------
+  1 | 10 |   150.0000000000000000
+  1 | 20 |    30.0000000000000000
+  1 |    | 0.00000000000000000000
+  1 |    |    82.5000000000000000
+  2 | 30 |    40.0000000000000000
+  2 | 40 |    50.0000000000000000
+  2 |    |    45.0000000000000000
+  3 | 50 |    60.0000000000000000
+  3 |    |    60.0000000000000000
+(9 rows)
+
+explain (costs off, verbose)
+select c1, c2, c3, avg(c3) from gstest group by grouping sets((c1,c2),(c1),(c2,c3));
+                        QUERY PLAN                        
+----------------------------------------------------------
+ Finalize HashAggregate
+   Output: c1, c2, c3, avg(c3)
+   Hash Key: gstest.c1, gstest.c2
+   Hash Key: gstest.c1
+   Hash Key: gstest.c2, gstest.c3
+   ->  Gather
+         Output: c1, c2, c3, (PARTIAL avg(c3))
+         Workers Planned: 4
+         ->  Partial HashAggregate
+               Output: c1, c2, c3, PARTIAL avg(c3)
+               Group Key: gstest.c1, gstest.c2, gstest.c3
+               ->  Parallel Seq Scan on public.gstest
+                     Output: c1, c2, c3
+(13 rows)
+
+select c1, c2, c3, avg(c3) from gstest group by grouping sets((c1,c2),(c1),(c2,c3)) order by 1,2,3,4;
+ c1 | c2 | c3  |          avg           
+----+----+-----+------------------------
+  1 | 10 |     |   150.0000000000000000
+  1 | 20 |     |    30.0000000000000000
+  1 |    |     | 0.00000000000000000000
+  1 |    |     |    82.5000000000000000
+  2 | 30 |     |    40.0000000000000000
+  2 | 40 |     |    50.0000000000000000
+  2 |    |     |    45.0000000000000000
+  3 | 50 |     |    60.0000000000000000
+  3 |    |     |    60.0000000000000000
+    | 10 | 100 |   100.0000000000000000
+    | 10 | 200 |   200.0000000000000000
+    | 20 |  30 |    30.0000000000000000
+    | 30 |  40 |    40.0000000000000000
+    | 40 |  50 |    50.0000000000000000
+    | 50 |  60 |    60.0000000000000000
+    |    |   0 | 0.00000000000000000000
+(16 rows)
+
+-- test for groupagg
+set enable_hashagg to off;
+explain (costs off, verbose)
+select c1, c2, avg(c3) from gstest group by grouping sets((c1,c2),(c1));
+                         QUERY PLAN                         
+------------------------------------------------------------
+ Finalize GroupAggregate
+   Output: c1, c2, avg(c3)
+   Group Key: gstest.c1, gstest.c2
+   Group Key: gstest.c1
+   ->  Gather Merge
+         Output: c1, c2, (PARTIAL avg(c3))
+         Workers Planned: 4
+         ->  Partial GroupAggregate
+               Output: c1, c2, PARTIAL avg(c3)
+               Group Key: gstest.c1, gstest.c2
+               ->  Sort
+                     Output: c1, c2, c3
+                     Sort Key: gstest.c1, gstest.c2
+                     ->  Parallel Seq Scan on public.gstest
+                           Output: c1, c2, c3
+(15 rows)
+
+select c1, c2, avg(c3) from gstest group by grouping sets((c1,c2),(c1)) order by 1,2,3;
+ c1 | c2 |          avg           
+----+----+------------------------
+  1 | 10 |   150.0000000000000000
+  1 | 20 |    30.0000000000000000
+  1 |    | 0.00000000000000000000
+  1 |    |    82.5000000000000000
+  2 | 30 |    40.0000000000000000
+  2 | 40 |    50.0000000000000000
+  2 |    |    45.0000000000000000
+  3 | 50 |    60.0000000000000000
+  3 |    |    60.0000000000000000
+(9 rows)
+
+explain (costs off, verbose)
+select c1, c2, c3, avg(c3) from gstest group by grouping sets((c1,c2),(c1),(c2,c3));
+                          QUERY PLAN                           
+---------------------------------------------------------------
+ Finalize GroupAggregate
+   Output: c1, c2, c3, avg(c3)
+   Group Key: gstest.c1, gstest.c2
+   Group Key: gstest.c1
+   Sort Key: gstest.c2, gstest.c3
+     Group Key: gstest.c2, gstest.c3
+   ->  Gather Merge
+         Output: c1, c2, c3, (PARTIAL avg(c3))
+         Workers Planned: 4
+         ->  Partial GroupAggregate
+               Output: c1, c2, c3, PARTIAL avg(c3)
+               Group Key: gstest.c1, gstest.c2, gstest.c3
+               ->  Sort
+                     Output: c1, c2, c3
+                     Sort Key: gstest.c1, gstest.c2, gstest.c3
+                     ->  Parallel Seq Scan on public.gstest
+                           Output: c1, c2, c3
+(17 rows)
+
+select c1, c2, c3, avg(c3) from gstest group by grouping sets((c1,c2),(c1),(c2,c3)) order by 1,2,3,4;
+ c1 | c2 | c3  |          avg           
+----+----+-----+------------------------
+  1 | 10 |     |   150.0000000000000000
+  1 | 20 |     |    30.0000000000000000
+  1 |    |     | 0.00000000000000000000
+  1 |    |     |    82.5000000000000000
+  2 | 30 |     |    40.0000000000000000
+  2 | 40 |     |    50.0000000000000000
+  2 |    |     |    45.0000000000000000
+  3 | 50 |     |    60.0000000000000000
+  3 |    |     |    60.0000000000000000
+    | 10 | 100 |   100.0000000000000000
+    | 10 | 200 |   200.0000000000000000
+    | 20 |  30 |    30.0000000000000000
+    | 30 |  40 |    40.0000000000000000
+    | 40 |  50 |    50.0000000000000000
+    | 50 |  60 |    60.0000000000000000
+    |    |   0 | 0.00000000000000000000
+(16 rows)
+
+drop table gstest;
diff --git a/src/test/regress/parallel_schedule b/src/test/regress/parallel_schedule
index 8fb55f0..8a88b83 100644
--- a/src/test/regress/parallel_schedule
+++ b/src/test/regress/parallel_schedule
@@ -87,6 +87,7 @@ test: rules psql psql_crosstab amutils stats_ext
 # run by itself so it can run parallel workers
 test: select_parallel
 test: write_parallel
+test: groupingsets_parallel
 
 # no relation related tests can be put in this group
 test: publication subscription
diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule
index a39ca10..4495155 100644
--- a/src/test/regress/serial_schedule
+++ b/src/test/regress/serial_schedule
@@ -140,6 +140,7 @@ test: amutils
 test: stats_ext
 test: select_parallel
 test: write_parallel
+test: groupingsets_parallel
 test: publication
 test: subscription
 test: select_views
diff --git a/src/test/regress/sql/groupingsets_parallel.sql b/src/test/regress/sql/groupingsets_parallel.sql
new file mode 100644
index 0000000..fee2c9a
--- /dev/null
+++ b/src/test/regress/sql/groupingsets_parallel.sql
@@ -0,0 +1,43 @@
+--
+-- parallel grouping sets
+--
+
+-- test data sources
+create table gstest(c1 int, c2 int, c3 int) with (parallel_workers = 4);
+
+insert into gstest select 1,10,100 from generate_series(1,10)i;
+insert into gstest select 1,10,200 from generate_series(1,10)i;
+insert into gstest select 1,20,30 from generate_series(1,10)i;
+insert into gstest select 2,30,40 from generate_series(1,10)i;
+insert into gstest select 2,40,50 from generate_series(1,10)i;
+insert into gstest select 3,50,60 from generate_series(1,10)i;
+insert into gstest select 1,NULL,0 from generate_series(1,10)i;
+analyze gstest;
+
+SET parallel_tuple_cost=0;
+SET parallel_setup_cost=0;
+SET max_parallel_workers_per_gather=4;
+
+-- test for hashagg
+set enable_hashagg to on;
+explain (costs off, verbose)
+select c1, c2, avg(c3) from gstest group by grouping sets((c1,c2),(c1));
+select c1, c2, avg(c3) from gstest group by grouping sets((c1,c2),(c1)) order by 1,2,3;
+
+explain (costs off, verbose)
+select c1, c2, c3, avg(c3) from gstest group by grouping sets((c1,c2),(c1),(c2,c3));
+select c1, c2, c3, avg(c3) from gstest group by grouping sets((c1,c2),(c1),(c2,c3)) order by 1,2,3,4;
+
+
+-- test for groupagg
+set enable_hashagg to off;
+explain (costs off, verbose)
+select c1, c2, avg(c3) from gstest group by grouping sets((c1,c2),(c1));
+select c1, c2, avg(c3) from gstest group by grouping sets((c1,c2),(c1)) order by 1,2,3;
+
+explain (costs off, verbose)
+select c1, c2, c3, avg(c3) from gstest group by grouping sets((c1,c2),(c1),(c2,c3));
+select c1, c2, c3, avg(c3) from gstest group by grouping sets((c1,c2),(c1),(c2,c3)) order by 1,2,3,4;
+
+
+drop table gstest;
-- 
2.7.4

