This is an automated email from the ASF dual-hosted git repository.
csringhofer pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git
The following commit(s) were added to refs/heads/master by this push:
new 97d766577 IMPALA-14680: Improve row regex search syntax in runtime
profile tests
97d766577 is described below
commit 97d766577df69b5e602c811063f365e464390e21
Author: Surya Hebbar <[email protected]>
AuthorDate: Tue Jan 13 18:02:33 2026 +0530
IMPALA-14680: Improve row regex search syntax in runtime profile tests
Currently, the runtime profile tests contain row regex searches
which try to find matches by comparing the regex line by line.
This form of search is inefficient.
So, while updating the tests for the aggregated profile IMPALA-9846,
this performance is being improved by accumulating row regexes together,
then searching the entire profile at once.
In order to support this improvement, we need to correct the current
`row_regex` syntax being used.
The current tests use greedy regex like ".*" at the beginning and end
of `row_regex` searches. Using greedy regex in this way consumes more
resources and is redundant for the current implementation.
To fix this, these additional greedy regex characters(i.e. `.*`,`.+`)
are being removed or replaced across all the runtime profile tests.
Change-Id: I1460c2d22b03c06aa43c85f78fa9e05cec2775ec
Reviewed-on: http://gerrit.cloudera.org:8080/23864
Tested-by: Impala Public Jenkins <[email protected]>
Reviewed-by: Csaba Ringhofer <[email protected]>
---
.../queries/QueryTest/acid-profile.test | 2 +-
.../QueryTest/admission-max-min-mem-limits.test | 34 +++----
.../QueryTest/admission-reject-mem-estimate.test | 24 ++---
.../queries/QueryTest/all_runtime_filters.test | 92 +++++++++---------
.../queries/QueryTest/alter-table.test | 22 ++---
.../queries/QueryTest/bloom_filters.test | 38 ++++----
.../queries/QueryTest/calcite.test | 106 ++++++++++-----------
.../queries/QueryTest/codegen-mem-limit.test | 2 +-
.../queries/QueryTest/data-cache.test | 38 ++++----
.../QueryTest/datastream-sender-codegen.test | 6 +-
.../QueryTest/dedicated-coord-mem-estimates.test | 48 +++++-----
.../queries/QueryTest/disable-codegen.test | 6 +-
.../queries/QueryTest/hbase-hms-column-order.test | 6 +-
.../queries/QueryTest/hdfs-partition-pruning.test | 2 +-
.../QueryTest/hdfs_parquet_scan_node_profile.test | 6 +-
.../queries/QueryTest/hdfs_scanner_profile.test | 8 +-
.../iceberg-mixed-format-position-deletes.test | 2 +-
.../queries/QueryTest/iceberg-negative.test | 2 +-
.../queries/QueryTest/iceberg-partitions.test | 12 +--
.../QueryTest/iceberg-scan-metrics-basic.test | 6 +-
.../iceberg-scan-metrics-with-deletes.test | 39 ++------
.../QueryTest/iceberg-v2-directed-mode.test | 4 +-
.../queries/QueryTest/in_list_filters.test | 98 +++++++++----------
.../queries/QueryTest/joins_mt_dop.test | 2 +-
.../queries/QueryTest/kudu_insert.test | 2 +-
.../queries/QueryTest/kudu_insert_mem_limit.test | 2 +-
...u_runtime_filter_with_timestamp_conversion.test | 6 +-
.../queries/QueryTest/max-mt-dop.test | 16 ++--
.../queries/QueryTest/min_max_filters.test | 6 +-
.../QueryTest/mt-dop-parquet-scheduling.test | 90 ++++++++---------
.../QueryTest/no-block-locations-hdfs-only.test | 2 +-
.../queries/QueryTest/no-block-locations.test | 2 +-
.../queries/QueryTest/overlap_min_max_filters.test | 6 +-
.../overlap_min_max_filters_on_sorted_columns.test | 10 +-
.../QueryTest/parquet-late-materialization.test | 4 +-
.../QueryTest/processing-cost-admission-slots.test | 36 +++----
.../queries/QueryTest/query-impala-13138.test | 2 +-
.../queries/QueryTest/query-resource-limits.test | 2 +-
.../queries/QueryTest/runtime_filters.test | 32 +++----
.../QueryTest/runtime_row_filter_reservations.test | 4 +-
.../queries/QueryTest/scanner-reservation.test | 26 ++---
.../queries/QueryTest/scratch-limit.test | 10 +-
.../single-node-joins-with-limits-exhaustive.test | 8 +-
.../queries/QueryTest/single-node-large-sorts.test | 6 +-
.../QueryTest/spilling-no-debug-action.test | 10 +-
...gression-exhaustive-no-default-buffer-size.test | 2 +-
.../QueryTest/spilling-regression-exhaustive.test | 26 ++---
.../QueryTest/union-const-scalar-expr-codegen.test | 3 -
.../targeted-perf/queries/aggregation.test | 4 +-
.../tpcds/queries/unpartitioned-probe.test | 8 +-
.../workloads/tpch/queries/datastream-sender.test | 2 +-
tests/common/test_result_verifier.py | 4 +-
52 files changed, 453 insertions(+), 483 deletions(-)
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/acid-profile.test
b/testdata/workloads/functional-query/queries/QueryTest/acid-profile.test
index 598a02b3c..7cfc2d62e 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/acid-profile.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/acid-profile.test
@@ -14,5 +14,5 @@ select * from tbl_ld
1
---- RUNTIME_PROFILE
# Verify that ValidWriteIdLists is in the profile
-row_regex: .*Loaded ValidWriteIdLists for transactional tables:.*
+row_regex: Loaded ValidWriteIdLists for transactional tables:
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/admission-max-min-mem-limits.test
b/testdata/workloads/functional-query/queries/QueryTest/admission-max-min-mem-limits.test
index 19391d597..126a405d2 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/admission-max-min-mem-limits.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/admission-max-min-mem-limits.test
@@ -10,8 +10,8 @@ set num_scanner_threads=2;
select * from (select * from functional_parquet.alltypes limit 10) A,
(select * from functional_parquet.alltypes limit 10) B;
---- RUNTIME_PROFILE
-row_regex: .*Per-Host Resource Estimates: Memory=68MB.*
-row_regex: .*Cluster Memory Admitted: 68.00 MB.*
+row_regex: Per-Host Resource Estimates: Memory=68MB
+row_regex: Cluster Memory Admitted: 68.00 MB
====
---- QUERY
# No mem_limit set
@@ -19,8 +19,8 @@ row_regex: .*Cluster Memory Admitted: 68.00 MB.*
set request_pool=poolLowMinLimit;
select * from functional_parquet.alltypes limit 1;
---- RUNTIME_PROFILE
-row_regex: .*Per-Host Resource Estimates: Memory=16MB.*
-row_regex: .*Cluster Memory Admitted: 32.09 MB.*
+row_regex: Per-Host Resource Estimates: Memory=16MB
+row_regex: Cluster Memory Admitted: 32.09 MB
====
---- QUERY
# No mem_limit set
@@ -47,8 +47,8 @@ set num_scanner_threads=2;
select * from functional_parquet.alltypes A, functional_parquet.alltypes B
where
A.int_col = B.int_col limit 1;
---- RUNTIME_PROFILE
-row_regex: .*Per-Host Resource Estimates: Memory=68MB.*
-row_regex: .*Cluster Memory Admitted: 67.88 MB.*
+row_regex: Per-Host Resource Estimates: Memory=68MB
+row_regex: Cluster Memory Admitted: 67.88 MB
====
---- QUERY
# No mem_limit set
@@ -61,8 +61,8 @@ set DISABLE_HDFS_NUM_ROWS_ESTIMATE=1;
select * from functional_parquet.alltypes A, functional_parquet.alltypes B
where
A.int_col = B.int_col limit 1;
---- RUNTIME_PROFILE
-row_regex: .*Per-Host Resource Estimates: Memory=2.06GB.*
-row_regex: .*Cluster Memory Admitted: 1.50 GB.*
+row_regex: Per-Host Resource Estimates: Memory=2.06GB
+row_regex: Cluster Memory Admitted: 1.50 GB
====
---- QUERY
# No mem_limit set
@@ -70,8 +70,8 @@ row_regex: .*Cluster Memory Admitted: 1.50 GB.*
set request_pool=regularPool;
select 1;
---- RUNTIME_PROFILE
-row_regex: .*Per-Host Resource Estimates: Memory=10MB.*
-row_regex: .*Cluster Memory Admitted: 50.00 MB.*
+row_regex: Per-Host Resource Estimates: Memory=10MB
+row_regex: Cluster Memory Admitted: 50.00 MB
====
---- QUERY
############################
@@ -81,7 +81,7 @@ set request_pool=regularPool;
set mem_limit=200mb;
select 1;
---- RUNTIME_PROFILE
-row_regex: .*Cluster Memory Admitted: 200.00 MB.*
+row_regex: Cluster Memory Admitted: 200.00 MB
====
---- QUERY
# mem_limit is set
@@ -104,7 +104,7 @@ set request_pool=regularPool;
set mem_limit=2G;
select 1;
---- RUNTIME_PROFILE
-row_regex: .*Cluster Memory Admitted: 1.50 GB.*
+row_regex: Cluster Memory Admitted: 1.50 GB
====
---- QUERY
# mem_limit is set and pool.clamp_mem_limit_query_option is true
@@ -113,7 +113,7 @@ set request_pool=regularPool;
set mem_limit=40mb;
select 1;
---- RUNTIME_PROFILE
-row_regex: .*Cluster Memory Admitted: 50.00 MB.*
+row_regex: Cluster Memory Admitted: 50.00 MB
====
---- QUERY
# mem_limit is set and pool.clamp_mem_limit_query_option is false
@@ -122,7 +122,7 @@ set request_pool=regularPoolWithoutClamping;
set mem_limit=2G;
select 1;
---- RUNTIME_PROFILE
-row_regex: .*Cluster Memory Admitted: 2.00 GB.*
+row_regex: Cluster Memory Admitted: 2.00 GB
====
---- QUERY
# mem_limit is set and pool.clamp_mem_limit_query_option is false
@@ -131,7 +131,7 @@ set request_pool=regularPoolWithoutClamping;
set mem_limit=50mb;
select 1;
---- RUNTIME_PROFILE
-row_regex: .*Cluster Memory Admitted: 50.00 MB.*
+row_regex: Cluster Memory Admitted: 50.00 MB
====
---- QUERY
############################
@@ -142,8 +142,8 @@ row_regex: .*Cluster Memory Admitted: 50.00 MB.*
set request_pool=poolNoMemLimits;
select 1;
---- RUNTIME_PROFILE
-row_regex: .*Per-Host Resource Estimates: Memory=10MB.*
-row_regex: .*Cluster Memory Admitted: 10.00 MB.*
+row_regex: Per-Host Resource Estimates: Memory=10MB
+row_regex: Cluster Memory Admitted: 10.00 MB
====
---- QUERY
############################
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/admission-reject-mem-estimate.test
b/testdata/workloads/functional-query/queries/QueryTest/admission-reject-mem-estimate.test
index 651ce6c46..a61fbbc1a 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/admission-reject-mem-estimate.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/admission-reject-mem-estimate.test
@@ -19,10 +19,10 @@ from tpch_parquet.customer
---- TYPES
STRING,STRING
---- RUNTIME_PROFILE
-row_regex: .*Query Options \(set by
configuration\).*MAX_MEM_ESTIMATE_FOR_ADMISSION=31457280.*
+row_regex: Query Options \(set by
configuration\).*MAX_MEM_ESTIMATE_FOR_ADMISSION=31457280
# Memory estimate sent to backend is overridden, but the explain plan shows
the planner output.
-row_regex: .*Estimated Per-Host Mem: 31457280.*
-row_regex: .*Per-Host Resource Estimates: Memory=83MB.*
+row_regex: Estimated Per-Host Mem: 31457280
+row_regex: Per-Host Resource Estimates: Memory=83MB
====
---- QUERY
# If the estimate is set to a higher value that is still > the max mem
resources for
@@ -58,10 +58,10 @@ from tpch_parquet.customer
---- TYPES
STRING,STRING
---- RUNTIME_PROFILE
-row_regex: .*Query Options \(set by
configuration\).*MEM_LIMIT=41943040.*MAX_MEM_ESTIMATE_FOR_ADMISSION=47185920.*
+row_regex: Query Options \(set by
configuration\).*MEM_LIMIT=41943040.*MAX_MEM_ESTIMATE_FOR_ADMISSION=47185920
# Memory estimate sent to backend is overridden, but the explain plan shows
the planner output.
-row_regex: .*Estimated Per-Host Mem: 47185920.*
-row_regex: .*Per-Host Resource Estimates: Memory=83MB.*
+row_regex: Estimated Per-Host Mem: 47185920
+row_regex: Per-Host Resource Estimates: Memory=83MB
====
---- QUERY
# Larger queries that use more memory than the estimate can still run because
no mem_limit is set.
@@ -72,10 +72,10 @@ select min(l_comment) from tpch_parquet.lineitem
---- TYPES
STRING
---- RUNTIME_PROFILE
-row_regex: .*Query Options \(set by
configuration\).*MAX_MEM_ESTIMATE_FOR_ADMISSION=10485760.*
+row_regex: Query Options \(set by
configuration\).*MAX_MEM_ESTIMATE_FOR_ADMISSION=10485760
# Memory estimate sent to backend is overridden, but the explain plan shows
the planner output.
-row_regex: .*Estimated Per-Host Mem: 10485760.*
-row_regex: .*Per-Host Resource Estimates: Memory=80MB.*
+row_regex: Estimated Per-Host Mem: 10485760
+row_regex: Per-Host Resource Estimates: Memory=80MB
====
---- QUERY
# If the memory estimate is less than max_mem_estimate_for_admission, then the
estimate
@@ -88,7 +88,7 @@ select 'foo'
---- TYPES
STRING
---- RUNTIME_PROFILE
-row_regex: .*Query Options \(set by
configuration\).*MAX_MEM_ESTIMATE_FOR_ADMISSION=52428800.*
-row_regex: .*Estimated Per-Host Mem: 10485760.*
-row_regex: .*Per-Host Resource Estimates: Memory=10MB.*
+row_regex: Query Options \(set by
configuration\).*MAX_MEM_ESTIMATE_FOR_ADMISSION=52428800
+row_regex: Estimated Per-Host Mem: 10485760
+row_regex: Per-Host Resource Estimates: Memory=10MB
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/all_runtime_filters.test
b/testdata/workloads/functional-query/queries/QueryTest/all_runtime_filters.test
index da073acc6..ff023556c 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/all_runtime_filters.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/all_runtime_filters.test
@@ -10,7 +10,7 @@ where a.bool_col = (b.bool_col && !b.bool_col)
29200
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 3650
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -20,7 +20,7 @@ where a.tinyint_col = b.tinyint_col
5840
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 1460
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -30,7 +30,7 @@ where a.smallint_col = b.smallint_col
5840
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 1460
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -40,7 +40,7 @@ where a.int_col = b.int_col
5840
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 1460
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -50,7 +50,7 @@ where a.bigint_col = b.bigint_col
5840
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 1460
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -60,7 +60,7 @@ where a.float_col = b.float_col
5840
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 1460
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -70,7 +70,7 @@ where a.double_col = b.double_col
5840
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 1460
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -80,7 +80,7 @@ where a.string_col = b.string_col
5840
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 1460
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
# IMPALA-9691: Support Kudu Timestamp and Date Bloom Filter
@@ -91,7 +91,7 @@ where a.timestamp_col = b.timestamp_col
8
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 8
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -102,7 +102,7 @@ where a.d5_0 = b.d5_0
38
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 38
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -113,7 +113,7 @@ where a.d5_1 = b.d5_1
38
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 38
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -124,7 +124,7 @@ where a.d5_3 = b.d5_3
38
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 38
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -135,7 +135,7 @@ where a.d5_5 = b.d5_5 and b.d5_5 != 0
37
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 37
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -146,7 +146,7 @@ where a.d5_5 = b.d5_5 and b.d5_5 = 0
180
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 180
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -157,7 +157,7 @@ where a.d9_0 = b.d9_0
38
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 38
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -168,7 +168,7 @@ where a.d9_1 = b.d9_1
38
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 38
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -179,7 +179,7 @@ where a.d9_5 = b.d9_5
38
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 38
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -190,7 +190,7 @@ where a.d9_9 = b.d9_9 and b.d9_9 != 0
37
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 37
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -201,7 +201,7 @@ where a.d9_9 = b.d9_9 and b.d9_9 = 0
306
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 306
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -212,7 +212,7 @@ where a.d14_0 = b.d14_0
38
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 38
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -223,7 +223,7 @@ where a.d14_1 = b.d14_1
38
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 38
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -234,7 +234,7 @@ where a.d14_7 = b.d14_7
38
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 38
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -245,7 +245,7 @@ where a.d14_14 = b.d14_14 and b.d14_14 != 0
37
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 37
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -256,7 +256,7 @@ where a.d14_14 = b.d14_14 and b.d14_14 = 0
441
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 441
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -267,7 +267,7 @@ where a.d28_0 = b.d28_0
38
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 38
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -278,7 +278,7 @@ where a.d28_1 = b.d28_1
38
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 38
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -289,7 +289,7 @@ where a.d28_14 = b.d28_14
38
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 38
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -300,7 +300,7 @@ where a.d28_28 = b.d28_28 and b.d28_28 != 0
37
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 37
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -311,7 +311,7 @@ where a.d28_28 = b.d28_28 and b.d28_28 = 0
686
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 686
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -322,7 +322,7 @@ where a.d38_0 = b.d38_0
38
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 38
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -333,7 +333,7 @@ where a.d38_1 = b.d38_1
38
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 38
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -344,7 +344,7 @@ where a.d38_19 = b.d38_19
38
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 38
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -355,7 +355,7 @@ where a.d38_38 = b.d38_38 and b.d38_38 != 0
37
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 37
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -366,7 +366,7 @@ where a.d38_38 = b.d38_38 and b.d38_38 = 0
732
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 732
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -377,7 +377,7 @@ where a.date_col = b.date_col and b.date_col = DATE
'2017-11-28'
9
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 3
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
@@ -402,7 +402,7 @@ where a.id = b.tinyint_col * 2;
INT,TINYINT,INT,TINYINT
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 2
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
select STRAIGHT_JOIN count(*) from tpch_kudu.orders a
@@ -412,7 +412,7 @@ where a.o_orderkey = b.o_orderkey and b.o_orderkey = 100000;
1
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 1
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
select STRAIGHT_JOIN count(*) from tpch_kudu.orders a
@@ -422,7 +422,7 @@ where a.o_orderkey = b.o_orderkey and b.o_orderkey !=
100000;
1499999
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 1499999
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
select STRAIGHT_JOIN count(*) from tpch_kudu.orders a
@@ -432,7 +432,7 @@ where a.o_orderkey = b.o_orderkey and b.o_orderkey = 100009;
0
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 0
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
@@ -450,8 +450,8 @@ where a.tinyint_col = b.int_col and b.int_col in (0, 1)
1065800
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 1460
-row_regex: .*1 of 1 Runtime Filter Published.*
-row_regex: .*BloomFilterBytes: 0.*
+row_regex: 1 of 1 Runtime Filter Published
+row_regex: BloomFilterBytes: 0
====
@@ -467,7 +467,7 @@ where a.id in (select b.id from alltypes b where b.int_col
< 10);
7300
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 7300
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
@@ -485,7 +485,7 @@ where a.int_col = b.int_col
5329000
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 7300
-row_regex: .*1 of 1 Runtime Filter Published.*
+row_regex: 1 of 1 Runtime Filter Published
====
---- QUERY
# Join on the integer type of columns with Kudu as target
@@ -498,7 +498,7 @@ where a.int_col = b.int_col
5329000
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 7300
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
# Join on the timestamp type of columns with kudu as target
@@ -512,7 +512,7 @@ where a.timestamp_col = b.timestamp_col
7300
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 7300
-row_regex: .*2 of 2 Runtime Filters Published.*
+row_regex: 2 of 2 Runtime Filters Published
====
---- QUERY
# Join on the timestamp type of columns with HDFS as target
@@ -525,5 +525,5 @@ where a.timestamp_col = b.timestamp_col
7300
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 7300
-row_regex: .*1 of 1 Runtime Filter Published.*
+row_regex: 1 of 1 Runtime Filter Published
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/alter-table.test
b/testdata/workloads/functional-query/queries/QueryTest/alter-table.test
index ebdd2b3e5..e02034111 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/alter-table.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/alter-table.test
@@ -1257,7 +1257,7 @@ STRING,STRING,STRING
# Test inserting after alter table
insert into table insert_sorted select i, d, f, b from insert_data;
---- RUNTIME_PROFILE
-row_regex: .*order by: i ASC NULLS LAST, d ASC NULLS LAST
+row_regex: order by: i ASC NULLS LAST, d ASC NULLS LAST
====
---- QUERY
# Test selection after alter table
@@ -1278,7 +1278,7 @@ STRING,STRING,STRING
# Test inserting after alter table
insert into table insert_sorted select i, d, f, b from insert_data;
---- RUNTIME_PROFILE
-row_regex: .*order by: b ASC NULLS LAST, d ASC NULLS LAST, f ASC NULLS LAST
+row_regex: order by: b ASC NULLS LAST, d ASC NULLS LAST, f ASC NULLS LAST
====
---- QUERY
# Test selection after alter table
@@ -1299,7 +1299,7 @@ STRING,STRING,STRING
# Test inserting after alter table
insert into table insert_sorted select i, d, f, b from insert_data;
---- RUNTIME_PROFILE
-row_regex: .*order by: b ASC NULLS LAST, d ASC NULLS LAST, f ASC NULLS LAST
+row_regex: order by: b ASC NULLS LAST, d ASC NULLS LAST, f ASC NULLS LAST
====
---- QUERY
# Test selection after alter table
@@ -1320,7 +1320,7 @@ STRING,STRING,STRING
# Test inserting after alter table
insert into table insert_sorted select i, cast(d as decimal(12,2)), b from
insert_data;
---- RUNTIME_PROFILE
-row_regex: .*order by: CAST\(d AS DECIMAL\(12,2\)\) ASC NULLS LAST, b ASC
NULLS LAST
+row_regex: order by: CAST\(d AS DECIMAL\(12,2\)\) ASC NULLS LAST, b ASC NULLS
LAST
====
---- QUERY
# Test selection after alter table
@@ -1355,7 +1355,7 @@ STRING,STRING,STRING
# Test inserting after alter table
insert into table insert_sorted select i, cast(d as decimal(12,2)) from
insert_data;
---- RUNTIME_PROFILE
-row_regex: .*order by: CAST\(d AS DECIMAL\(12,2\)\) ASC NULLS LAST
+row_regex: order by: CAST\(d AS DECIMAL\(12,2\)\) ASC NULLS LAST
====
---- QUERY
# Test selection after alter table
@@ -1421,7 +1421,7 @@ STRING,STRING,STRING
# Test inserting after alter table
insert into table insert_sorted_partitioned partition (p=1) select i, d, f, b
from insert_data;
---- RUNTIME_PROFILE
-row_regex: .*order by: i ASC NULLS LAST
+row_regex: order by: i ASC NULLS LAST
====
---- QUERY
# Test selection after alter table
@@ -1563,7 +1563,7 @@ STRING,STRING,STRING
# Test inserting after alter table
insert into table insert_zsorted select i, d, f, b from insert_data_z;
---- RUNTIME_PROFILE
-row_regex: .*order by: ZORDER: i, d
+row_regex: order by: ZORDER: i, d
====
---- QUERY
# Test selection after alter table
@@ -1585,7 +1585,7 @@ STRING,STRING,STRING
# Test inserting after alter table
insert into table insert_zsorted select i, d, f, b from insert_data_z;
---- RUNTIME_PROFILE
-row_regex: .*order by: ZORDER: b, d, f
+row_regex: order by: ZORDER: b, d, f
====
---- QUERY
# Test selection after alter table
@@ -1607,7 +1607,7 @@ STRING,STRING,STRING
# Test inserting after alter table
insert into table insert_zsorted select i, d, f, b from insert_data_z;
---- RUNTIME_PROFILE
-row_regex: .*order by: ZORDER: b, d, f
+row_regex: order by: ZORDER: b, d, f
====
---- QUERY
# Test selection after alter table
@@ -1629,7 +1629,7 @@ STRING,STRING,STRING
# Test inserting after alter table
insert into table insert_zsorted select i, cast(d as decimal(12,2)), b from
insert_data_z;
---- RUNTIME_PROFILE
-row_regex: .*order by: ZORDER: CAST\(d AS DECIMAL\(12,2\)\), b
+row_regex: order by: ZORDER: CAST\(d AS DECIMAL\(12,2\)\), b
====
---- QUERY
# Test selection after alter table
@@ -1720,7 +1720,7 @@ STRING,STRING,STRING
---- QUERY
insert into table insert_zsorted_partitioned partition (p=1) select i, d, f, b
from insert_data_z;
---- RUNTIME_PROFILE
-row_regex: .*order by: ZORDER: d, i
+row_regex: order by: ZORDER: d, i
====
---- QUERY
# Test selection after alter table
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/bloom_filters.test
b/testdata/workloads/functional-query/queries/QueryTest/bloom_filters.test
index d9a57bef6..f6744b757 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/bloom_filters.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/bloom_filters.test
@@ -18,8 +18,8 @@ select count(*) from tpch.orders join tpch.customer on
o_comment = c_mktsegment;
---- RESULTS
0
---- RUNTIME_PROFILE
-row_regex: .*1 of 1 Runtime Filter Published.*
-row_regex: .*Filter 0 \(8.00 KB\).*
+row_regex: 1 of 1 Runtime Filter Published
+row_regex: Filter 0 \(8.00 KB\)
====
---- QUERY
SET RUNTIME_FILTER_MODE=GLOBAL;
@@ -31,8 +31,8 @@ select STRAIGHT_JOIN count(*) from (select * from
tpch.lineitem a LIMIT 1) a
---- RESULTS
0
---- RUNTIME_PROFILE
-row_regex: .*1 of 1 Runtime Filter Published.*
-row_regex: .*Filter 0 \(128.00 KB\).*
+row_regex: 1 of 1 Runtime Filter Published
+row_regex: Filter 0 \(128.00 KB\)
====
---- QUERY
SET RUNTIME_FILTER_MODE=GLOBAL;
@@ -44,8 +44,8 @@ select STRAIGHT_JOIN count(*) from (select * from
tpch.lineitem a LIMIT 1) a
---- RESULTS
0
---- RUNTIME_PROFILE
-row_regex: .*1 of 1 Runtime Filter Published.*
-row_regex: .*Filter 0 \(512.00 KB\).*
+row_regex: 1 of 1 Runtime Filter Published
+row_regex: Filter 0 \(512.00 KB\)
====
---- QUERY
SET RUNTIME_FILTER_MODE=GLOBAL;
@@ -57,8 +57,8 @@ select STRAIGHT_JOIN count(*) from (select * from
tpch.lineitem a LIMIT 1) a
---- RESULTS
0
---- RUNTIME_PROFILE
-row_regex: .*1 of 1 Runtime Filter Published.*
-row_regex: .*Filter 0 \(1.00 MB\).*
+row_regex: 1 of 1 Runtime Filter Published
+row_regex: Filter 0 \(1.00 MB\)
====
---- QUERY
SET RUNTIME_FILTER_MODE=GLOBAL;
@@ -71,8 +71,8 @@ select STRAIGHT_JOIN count(*) from (select * from
tpch.lineitem a LIMIT 1) a
---- RESULTS
0
---- RUNTIME_PROFILE
-row_regex: .*1 of 1 Runtime Filter Published.*
-row_regex: .*Filter 0 \(2.00 MB\).*
+row_regex: 1 of 1 Runtime Filter Published
+row_regex: Filter 0 \(2.00 MB\)
====
@@ -90,8 +90,8 @@ select STRAIGHT_JOIN count(*) from alltypes a join [SHUFFLE]
alltypes b on a.id
---- RESULTS
7300
---- RUNTIME_PROFILE
-row_regex: .*1|2 of 1|2 Runtime Filter Published.*
-row_regex: .*Filter 0 \(128.00 KB\).*
+row_regex: 1|2 of 1|2 Runtime Filter Published
+row_regex: Filter 0 \(128.00 KB\)
====
---- QUERY
# Check that filter sizes are rounded up to power-of-two
@@ -101,8 +101,8 @@ select STRAIGHT_JOIN count(*) from alltypes a join
[SHUFFLE] alltypes b on a.id
---- RESULTS
7300
---- RUNTIME_PROFILE
-row_regex: .*1|2 of 1|2 Runtime Filter Published.*
-row_regex: .*Filter 0 \(128.00 KB\).*
+row_regex: 1|2 of 1|2 Runtime Filter Published
+row_regex: Filter 0 \(128.00 KB\)
====
---- QUERY
SET RUNTIME_FILTER_MODE=GLOBAL;
@@ -113,7 +113,7 @@ with l as (select * from tpch.lineitem UNION ALL select *
from tpch.lineitem)
select STRAIGHT_JOIN count(*) from (select * from tpch.lineitem a LIMIT 1) a
join (select * from l LIMIT 1000000) b on a.l_orderkey = -b.l_orderkey;
---- RUNTIME_PROFILE
-row_regex: .*Filter 0 \(64.00 KB\).*
+row_regex: Filter 0 \(64.00 KB\)
====
@@ -134,8 +134,8 @@ select STRAIGHT_JOIN count(*) from alltypes a join
[SHUFFLE] alltypes b on a.id
---- RESULTS
7300
---- RUNTIME_PROFILE
-row_regex: .*1|2 of 1|2 Runtime Filter Published.*
-row_regex: .*Filter 0 \(8.00 KB\).*
+row_regex: 1|2 of 1|2 Runtime Filter Published
+row_regex: Filter 0 \(8.00 KB\)
====
---- QUERY
####################################################
@@ -153,6 +153,6 @@ select STRAIGHT_JOIN count(*) from (select * from
tpch.lineitem a LIMIT 1) a
---- RESULTS
0
---- RUNTIME_PROFILE
-row_regex: .*1 of 1 Runtime Filter Published.*
-row_regex: .*Filter 0 \(256.00 KB\).*
+row_regex: 1 of 1 Runtime Filter Published
+row_regex: Filter 0 \(256.00 KB\)
====
diff --git a/testdata/workloads/functional-query/queries/QueryTest/calcite.test
b/testdata/workloads/functional-query/queries/QueryTest/calcite.test
index ab3f04be0..e90c9b1a7 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/calcite.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/calcite.test
@@ -5,12 +5,12 @@
select * from functional.alltypestiny;
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
create table calcite_alltypes as select * from functional.alltypes order by id
limit 5;
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: OriginalPlanner.*
+row_regex: PlannerType: OriginalPlanner
====
---- QUERY
select * from calcite_alltypes;
@@ -23,12 +23,12 @@ select * from calcite_alltypes;
---- TYPES
int,boolean,tinyint,smallint,int,bigint,float,double,string,string,timestamp,int,int
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
select string_col, tinyint_col from calcite_alltypes;
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
---- RESULTS
'0',0
'1',1
@@ -49,7 +49,7 @@ select d1,d2,d3,d4,d5,d6 from functional.decimal_tbl;
---- TYPES
decimal,decimal,decimal,decimal,decimal,decimal
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
select * from functional.chars_tiny;
@@ -69,7 +69,7 @@ char,char,string
---- HS2_TYPES
char,char,varchar
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
select * from functional.date_tbl;
@@ -99,7 +99,7 @@ select * from functional.date_tbl;
---- TYPES
int,date,date
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# creating a new table. We cannot use any functions at this point to
@@ -115,7 +115,7 @@ select * from ascii_binary;
---- TYPES
int,string,binary
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# Tiny test for Calcite. At the point of this commit, very few functions work.
This
@@ -135,7 +135,7 @@ select bigint_col, abs(cast(-3 as bigint)),
abs(-3000000000) from functional.all
---- TYPES
bigint,bigint,bigint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# Tiny test for Calcite. At the point of this commit, very few functions work.
This
@@ -155,7 +155,7 @@ select cast(cast('2005-12-13 08:00:00' as string) AS
TIMESTAMP) from functional
---- TYPES
timestamp
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
select * from calcite_alltypes where bigint_col = 20;
@@ -164,7 +164,7 @@ select * from calcite_alltypes where bigint_col = 20;
---- TYPES
int,boolean,tinyint,smallint,int,bigint,float,double,string,string,timestamp,int,int
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
select tinyint_col from calcite_alltypes where bigint_col = 20;
@@ -173,7 +173,7 @@ select tinyint_col from calcite_alltypes where bigint_col =
20;
---- TYPES
tinyint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# Values test
@@ -183,7 +183,7 @@ select abs(cast(-8 as bigint));
---- TYPES
bigint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
select 'hello'
@@ -192,7 +192,7 @@ select 'hello'
---- TYPES
string
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# Union test
@@ -203,7 +203,7 @@ select 3 union select 4;
---- TYPES
tinyint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
select * from (values (1)) union (values (2), (3));
@@ -225,7 +225,7 @@ select id, abs(bigint_col) from functional.alltypestiny
where id > 3 order by ab
---- TYPES
int, bigint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# sort test
@@ -239,7 +239,7 @@ select id, abs(bigint_col) from functional.alltypestiny
where id >= 3 order by a
---- TYPES
int, bigint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# sort test
@@ -251,7 +251,7 @@ select id, abs(bigint_col) from functional.alltypestiny
where id < 3 order by ab
---- TYPES
int, bigint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# sort test
@@ -264,7 +264,7 @@ select id, abs(bigint_col) from functional.alltypestiny
where id <= 3 order by a
---- TYPES
int, bigint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# sort test
@@ -280,7 +280,7 @@ select id, abs(bigint_col) from functional.alltypestiny
where id != 3 order by a
---- TYPES
int, bigint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# sort test
@@ -296,7 +296,7 @@ select id, abs(bigint_col) from functional.alltypestiny
where id != 3 order by a
---- TYPES
int, bigint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# sort test
@@ -310,7 +310,7 @@ select group_str, some_nulls from functional.nullrows where
group_str = 'a' orde
---- TYPES
string, string
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# sort test
@@ -324,7 +324,7 @@ select group_str, some_nulls from functional.nullrows where
group_str = 'a' orde
---- TYPES
string, string
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# limit test
@@ -345,7 +345,7 @@ select id, abs(bigint_col) from functional.alltypestiny
where id > 2 order by ab
---- TYPES
int, bigint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# aggregation test
@@ -355,7 +355,7 @@ select sum(bigint_col) from functional.alltypestiny;
---- TYPES
bigint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# aggregation test
@@ -372,7 +372,7 @@ select id, sum(bigint_col) from functional.alltypestiny
group by id order by id;
---- TYPES
int, bigint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# aggregation test
@@ -386,7 +386,7 @@ having sum(bigint_col) > cast(5 as bigint) order by id;
---- TYPES
int, bigint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# aggregation test
@@ -416,7 +416,7 @@ NULL,10,40
---- TYPES
int, bigint, bigint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# having test
@@ -429,7 +429,7 @@ select id, sum(bigint_col) from functional.alltypestiny
group by id having sum(b
---- TYPES
int, bigint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# join test
@@ -449,7 +449,7 @@ on (a.id = b.id) order by a.id;
---- TYPES
int, bigint, bigint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# join inequality test
@@ -470,7 +470,7 @@ order by a.id;
---- TYPES
int, int, bigint, bigint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# cross join test
@@ -498,7 +498,7 @@ order by a.id, b.id;
---- TYPES
int, int, bigint, bigint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# left outer join test
@@ -532,7 +532,7 @@ order by a.id
---- TYPES
int, bigint, bigint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
select bigint_col + bigint_col, int_col + int_col, smallint_col + smallint_col,
@@ -550,7 +550,7 @@ from functional.alltypestiny;
---- TYPES
bigint, bigint, int, smallint, int
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
select float_col + int_col, float_col + 3.0, 3.0 + 3.0
@@ -567,7 +567,7 @@ from functional.alltypestiny;
---- TYPES
double, decimal, decimal
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
select bigint_col - bigint_col, int_col - int_col, smallint_col - smallint_col,
@@ -711,7 +711,7 @@ select 3 union select 458;
---- TYPES
smallint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# case test
@@ -728,7 +728,7 @@ select tinyint_col, case tinyint_col when 1 then 5 else 458
end from functional.
---- TYPES
tinyint,smallint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# case test
@@ -745,7 +745,7 @@ select tinyint_col, case tinyint_col when 1 then 5 when 2
then 7 else 458 end fr
---- TYPES
tinyint,smallint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# case test
@@ -762,7 +762,7 @@ select tinyint_col, case tinyint_col when 0 then 458 else 5
end from functional.
---- TYPES
tinyint,smallint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# case test
@@ -779,7 +779,7 @@ select tinyint_col, case tinyint_col when 0 then 458 end
from functional.alltype
---- TYPES
tinyint,smallint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# case test other format (the calcite rexnode should be the same)
@@ -796,7 +796,7 @@ select tinyint_col, case when tinyint_col=00 then 458 else
5 end from functional
---- TYPES
tinyint,smallint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# or test
@@ -813,7 +813,7 @@ true
---- TYPES
boolean
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# and test
@@ -830,7 +830,7 @@ true
---- TYPES
boolean
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# sum cast tinyint agg test
@@ -840,7 +840,7 @@ select sum(tinyint_col) from functional.alltypestiny;
---- TYPES
bigint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# duplicate test from analytics-fn.test, delete when it is activated.
@@ -876,7 +876,7 @@ order by date_part;
---- TYPES
DATE, BIGINT, DATE, DATE
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# Test ROWS windows with start boundaries
@@ -923,7 +923,7 @@ select "1" as "hello";
---- TYPES
string
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
explain select * from functional.alltypestiny;
@@ -931,7 +931,7 @@ explain select * from functional.alltypestiny;
row_regex:.*01:EXCHANGE.*
row_regex:.*00:SCAN.*
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
set explain_level=3;
@@ -941,7 +941,7 @@ row_regex:.*01:EXCHANGE.*
row_regex:.*00:SCAN.*
row_regex:.*partitions=4/4.*
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
select count(*) from functional.alltypes_view;
@@ -1107,7 +1107,7 @@ select 2, 1 + 1;
---- TYPES
tinyint,smallint
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# Labels test
@@ -1119,7 +1119,7 @@ length('hello')
---- TYPES
int
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
# IMPALA-14561: Should not be using MySqlDialect to capture labels
@@ -1127,7 +1127,7 @@ select timestamp_col + interval 3 milliseconds from
functional.alltypestiny;
---- LABELS
expr$0
---- RUNTIME_PROFILE
-row_regex: .*PlannerType: CalcitePlanner.*
+row_regex: PlannerType: CalcitePlanner
====
---- QUERY
select cast('nan' as double), cast('inf' as float);
@@ -1139,12 +1139,12 @@ DOUBLE, FLOAT
---- QUERY
select count(*) from functional.alltypestiny;
---- RUNTIME_PROFILE
-row_regex: .*SPOOL_QUERY_RESULTS=0.*
+row_regex: SPOOL_QUERY_RESULTS=0
====
---- QUERY
select * from (values(0));
---- RUNTIME_PROFILE
-row_regex: .*SPOOL_QUERY_RESULTS=0.*
+row_regex: SPOOL_QUERY_RESULTS=0
====
---- QUERY
# IMPALA-14421: Repeat of test in chars.test since Calcite
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/codegen-mem-limit.test
b/testdata/workloads/functional-query/queries/QueryTest/codegen-mem-limit.test
index 09d0912d8..2b5b16ae6 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/codegen-mem-limit.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/codegen-mem-limit.test
@@ -8,5 +8,5 @@ with t as (values(1), (2), (3), (4)) select min(t1.`1` +
t2.`1`) from t t1 join
---- CATCH
Codegen failed to reserve
---- RUNTIME_PROFILE
-row_regex: .*EXEC_SINGLE_NODE_ROWS_THRESHOLD=0.*
+row_regex: EXEC_SINGLE_NODE_ROWS_THRESHOLD=0
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/data-cache.test
b/testdata/workloads/functional-query/queries/QueryTest/data-cache.test
index 2c5b0782d..e4f0390b5 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/data-cache.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/data-cache.test
@@ -3,9 +3,9 @@
create table test_parquet stored as parquet as select * from
tpch_parquet.lineitem;
---- RUNTIME_PROFILE
# Exepct all cache misses for tpch_parquet.lineitem.
-row_regex: .*DataCacheHitBytes: 0.*
-row_regex: .*DataCacheHitCount: 0 \(0\).*
-row_regex: .*DataCacheMissCount: 64 \(64\).*
+row_regex: DataCacheHitBytes: 0
+row_regex: DataCacheHitCount: 0 \(0\)
+row_regex: DataCacheMissCount: 64 \(64\)
====
---- QUERY
select count(*) from tpch_parquet.lineitem t1, test_parquet t2 where
t1.l_orderkey = t2.l_orderkey;
@@ -13,14 +13,14 @@ select count(*) from tpch_parquet.lineitem t1, test_parquet
t2 where t1.l_orderk
30012985
---- RUNTIME_PROFILE
# Exepct cache hits for t1 and cache misses for t2.
-row_regex: .*DataCacheHitCount: 6 \(6\).*
-row_regex: .*DataCacheMissBytes: 0.*
-row_regex: .*DataCacheMissCount: 0 \(0\).*
-row_regex: .*DataCachePartialHitCount: 0 \(0\).*
-row_regex: .*DataCacheHitBytes: 0.*
-row_regex: .*DataCacheHitCount: 0 \(0\).*
-row_regex: .*DataCacheMissCount: 3 \(3\).*
-row_regex: .*DataCachePartialHitCount: 0 \(0\).*
+row_regex: DataCacheHitCount: 6 \(6\)
+row_regex: DataCacheMissBytes: 0
+row_regex: DataCacheMissCount: 0 \(0\)
+row_regex: DataCachePartialHitCount: 0 \(0\)
+row_regex: DataCacheHitBytes: 0
+row_regex: DataCacheHitCount: 0 \(0\)
+row_regex: DataCacheMissCount: 3 \(3\)
+row_regex: DataCachePartialHitCount: 0 \(0\)
====
---- QUERY
select count(distinct l_orderkey) from test_parquet;
@@ -28,10 +28,10 @@ select count(distinct l_orderkey) from test_parquet;
1500000
---- RUNTIME_PROFILE
# Expect all cache hits.
-row_regex: .*DataCacheHitCount: 3 \(3\).*
-row_regex: .*DataCacheMissBytes: 0.*
-row_regex: .*DataCacheMissCount: 0 \(0\).*
-row_regex: .*DataCachePartialHitCount: 0 \(0\).*
+row_regex: DataCacheHitCount: 3 \(3\)
+row_regex: DataCacheMissBytes: 0
+row_regex: DataCacheMissCount: 0 \(0\)
+row_regex: DataCachePartialHitCount: 0 \(0\)
====
---- QUERY
# Overwrite temp table with subset of data.
@@ -44,10 +44,10 @@ select count(distinct l_orderkey) from test_parquet;
652393
---- RUNTIME_PROFILE
# Expect all cache misses due to change in mtime.
-row_regex: .*DataCacheHitBytes: 0.*
-row_regex: .*DataCacheHitCount: 0 \(0\).*
-row_regex: .*DataCacheMissCount: 2 \(2\).*
-row_regex: .*DataCachePartialHitCount: 0 \(0\).*
+row_regex: DataCacheHitBytes: 0
+row_regex: DataCacheHitCount: 0 \(0\)
+row_regex: DataCacheMissCount: 2 \(2\)
+row_regex: DataCachePartialHitCount: 0 \(0\)
====
---- QUERY
# Exercise HDFS cache
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/datastream-sender-codegen.test
b/testdata/workloads/functional-query/queries/QueryTest/datastream-sender-codegen.test
index a7e92020d..3e785f512 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/datastream-sender-codegen.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/datastream-sender-codegen.test
@@ -11,7 +11,7 @@ select count(*) from alltypes t1
bigint
---- RUNTIME_PROFILE
# Verify that codegen was enabled
-row_regex: .*Hash Partitioned Sender Codegen Enabled.*
+row_regex: Hash Partitioned Sender Codegen Enabled
====
---- QUERY
set disable_codegen_rows_threshold=0;
@@ -25,7 +25,7 @@ select count(*) from alltypes t1
bigint
---- RUNTIME_PROFILE
# Verify that codegen was enabled
-row_regex: .*Unpartitioned Sender Codegen Disabled: not needed.*
+row_regex: Unpartitioned Sender Codegen Disabled: not needed
====
---- QUERY
set disable_codegen_rows_threshold=0;
@@ -38,5 +38,5 @@ bigint
---- RUNTIME_PROFILE
# Verify that CHAR codegen was enabled for hash partitioning even though CHAR
# codegen isn't supported everywhere.
-row_regex: .*Hash Partitioned Sender Codegen Enabled.*
+row_regex: Hash Partitioned Sender Codegen Enabled
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/dedicated-coord-mem-estimates.test
b/testdata/workloads/functional-query/queries/QueryTest/dedicated-coord-mem-estimates.test
index 29731ae91..6bdc07137 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/dedicated-coord-mem-estimates.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/dedicated-coord-mem-estimates.test
@@ -3,9 +3,9 @@
# CTAS
create table test as select id from functional.alltypes where id > 1
---- RUNTIME_PROFILE
-row_regex: .*Per-Host Resource Estimates: Memory=16MB.*
-row_regex: .*Dedicated Coordinator Resource Estimate: Memory=100MB.*
-row_regex: .*Cluster Memory Admitted: 132.00 MB.*
+row_regex: Per-Host Resource Estimates: Memory=16MB
+row_regex: Dedicated Coordinator Resource Estimate: Memory=100MB
+row_regex: Cluster Memory Admitted: 132.00 MB
====
---- QUERY
# Truncate table to run the following inserts.
@@ -15,41 +15,41 @@ truncate table test
# Small insert(i.e. values list, runs on coordinator only).
insert into test values (1), (2), (3)
---- RUNTIME_PROFILE
-row_regex: .*Per-Host Resource Estimates: Memory=10MB.*
-row_regex: .*Dedicated Coordinator Resource Estimate: Memory=100MB.*
-row_regex: .*Cluster Memory Admitted: 10.00 MB.*
+row_regex: Per-Host Resource Estimates: Memory=10MB
+row_regex: Dedicated Coordinator Resource Estimate: Memory=100MB
+row_regex: Cluster Memory Admitted: 10.00 MB
====
---- QUERY
# Large insert where it doesn't run on the coordinator.
insert into test select id from functional.alltypes where id > 3
---- RUNTIME_PROFILE
-row_regex: .*Per-Host Resource Estimates: Memory=16MB.*
-row_regex: .*Dedicated Coordinator Resource Estimate: Memory=100MB.*
-row_regex: .*Cluster Memory Admitted: 132.00 MB.*
+row_regex: Per-Host Resource Estimates: Memory=16MB
+row_regex: Dedicated Coordinator Resource Estimate: Memory=100MB
+row_regex: Cluster Memory Admitted: 132.00 MB
====
---- QUERY
# SELECT with merging exchange (i.e. order by).
select * from functional.alltypes order by int_col;
---- RUNTIME_PROFILE
-row_regex: .*Per-Host Resource Estimates: Memory=32MB.*
-row_regex: .*Dedicated Coordinator Resource Estimate: Memory=104MB.*
-row_regex: .*Cluster Memory Admitted: 169.47 MB.*
+row_regex: Per-Host Resource Estimates: Memory=32MB
+row_regex: Dedicated Coordinator Resource Estimate: Memory=104MB
+row_regex: Cluster Memory Admitted: 169.47 MB
====
---- QUERY
# SELECT with non-merging exchange.
select * from functional.alltypes;
---- RUNTIME_PROFILE
-row_regex: .*Per-Host Resource Estimates: Memory=21MB.*
-row_regex: .*Dedicated Coordinator Resource Estimate: Memory=104MB.*
-row_regex: .*Cluster Memory Admitted: 146.20 MB.*
+row_regex: Per-Host Resource Estimates: Memory=21MB
+row_regex: Dedicated Coordinator Resource Estimate: Memory=104MB
+row_regex: Cluster Memory Admitted: 146.20 MB
====
---- QUERY
# SELECT with a non-grouping aggregate in the coordinator fragment.
select avg(int_col) from functional.alltypes;
---- RUNTIME_PROFILE
-row_regex: .*Per-Host Resource Estimates: Memory=16MB.*
-row_regex: .*Dedicated Coordinator Resource Estimate: Memory=100MB.*
-row_regex: .*Cluster Memory Admitted: 132.12 MB.*
+row_regex: Per-Host Resource Estimates: Memory=16MB
+row_regex: Dedicated Coordinator Resource Estimate: Memory=100MB
+row_regex: Cluster Memory Admitted: 132.12 MB
====
---- QUERY
# SELECT with num_nodes=1 and a complex plan in the coordinator.
@@ -76,9 +76,9 @@ order by
l_returnflag,
l_linestatus
---- RUNTIME_PROFILE
-row_regex: .*Per-Host Resource Estimates: Memory=98MB.*
-row_regex: .*Dedicated Coordinator Resource Estimate: Memory=198MB.*
-row_regex: .*Cluster Memory Admitted: 198.00 MB.*
+row_regex: Per-Host Resource Estimates: Memory=98MB
+row_regex: Dedicated Coordinator Resource Estimate: Memory=198MB
+row_regex: Cluster Memory Admitted: 198.00 MB
====
---- QUERY
# SELECT with multiple unpartitioned analytic functions to force the sort and
analytics
@@ -90,7 +90,7 @@ avg(smallint_col) over (order by int_col),
max(int_col) over (order by smallint_col rows between unbounded preceding and
1 following)
from functional.alltypes;
---- RUNTIME_PROFILE
-row_regex: .*Per-Host Resource Estimates: Memory=46MB.*
-row_regex: .*Dedicated Coordinator Resource Estimate: Memory=124MB.*
-row_regex: .*Cluster Memory Admitted: 216.00 MB.*
+row_regex: Per-Host Resource Estimates: Memory=46MB
+row_regex: Dedicated Coordinator Resource Estimate: Memory=124MB
+row_regex: Cluster Memory Admitted: 216.00 MB
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/disable-codegen.test
b/testdata/workloads/functional-query/queries/QueryTest/disable-codegen.test
index bb9419537..59d33247c 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/disable-codegen.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/disable-codegen.test
@@ -11,8 +11,8 @@ select count(*) from alltypes t1
bigint
---- RUNTIME_PROFILE
# Verify that codegen was enabled for join and scan
-row_regex: .*Build Side Codegen Enabled.*
-row_regex: .*TEXT Codegen Enabled.*
+row_regex: Build Side Codegen Enabled
+row_regex: TEXT Codegen Enabled
====
---- QUERY
# alltypes has 7300 rows - codegen should be disabled regardless
@@ -26,7 +26,7 @@ select count(*) from alltypes t1
bigint
---- RUNTIME_PROFILE
# Verify that codegen was disabled
-row_regex: .*Codegen Disabled: disabled due to optimization hints.*
+row_regex: Codegen Disabled: disabled due to optimization hints
====
---- QUERY
# IMPALA-6435: codegen for NULL CHAR literals was broken. This query crashed
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/hbase-hms-column-order.test
b/testdata/workloads/functional-query/queries/QueryTest/hbase-hms-column-order.test
index 4bf9167c5..eb941a2a4 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/hbase-hms-column-order.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/hbase-hms-column-order.test
@@ -38,7 +38,7 @@ select * from functional_hbase.stringids
---- TYPES
string, boolean, tinyint, smallint, int, bigint, float, double, string,
string, timestamp, int, int, int
---- RUNTIME_PROFILE
-row_regex: .*key predicates: id = '5'.*
-row_regex: .*start key: 5.*
-row_regex: .*stop key: 5\\0.*
+row_regex: key predicates: id = '5'
+row_regex: start key: 5
+row_regex: stop key: 5\\0
====
\ No newline at end of file
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/hdfs-partition-pruning.test
b/testdata/workloads/functional-query/queries/QueryTest/hdfs-partition-pruning.test
index a948b14fa..022c6985d 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/hdfs-partition-pruning.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/hdfs-partition-pruning.test
@@ -14,4 +14,4 @@ where date_stored_as_date in ( '2025-12-12');
---- TYPES
INT,DATE,INT,STRING
---- RUNTIME_PROFILE
-row_regex:.*partitions=1/2 files=1 size=.*
+row_regex: partitions=1/2 files=1 size=
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/hdfs_parquet_scan_node_profile.test
b/testdata/workloads/functional-query/queries/QueryTest/hdfs_parquet_scan_node_profile.test
index 7e55ab1ea..a5720d611 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/hdfs_parquet_scan_node_profile.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/hdfs_parquet_scan_node_profile.test
@@ -2,19 +2,19 @@
# This query will do a full scan on a parquet file
select * from functional_parquet.alltypestiny where year=2009 and month=1
---- RUNTIME_PROFILE
-row_regex: .*File Formats: PARQUET/SNAPPY:1
+row_regex: File Formats: PARQUET/SNAPPY:1
====
---- QUERY
# This query will do a full scan on a parquet table with two partitions.
# Each partition uses different compression types.
select * from alltypes_multi_compression
---- RUNTIME_PROFILE
-row_regex: .*File Formats: PARQUET/GZIP:1 PARQUET/SNAPPY:1
+row_regex: File Formats: PARQUET/GZIP:1 PARQUET/SNAPPY:1
====
---- QUERY
# This query will do a full scan on a parquet table with multiple
# compression types
select * from multi_compression
---- RUNTIME_PROFILE
-row_regex: .*File Formats: PARQUET/\(GZIP,SNAPPY\):2
+row_regex: File Formats: PARQUET/\(GZIP,SNAPPY\):2
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/hdfs_scanner_profile.test
b/testdata/workloads/functional-query/queries/QueryTest/hdfs_scanner_profile.test
index 0fe5fb38f..1f1258d2f 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/hdfs_scanner_profile.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/hdfs_scanner_profile.test
@@ -2,7 +2,7 @@
# read during a scan
select * from alltypesagg
---- RUNTIME_PROFILE
-row_regex: .*RowsRead: 11.00K .
+row_regex: RowsRead: 11.00K .
====
---- QUERY
# This query verifies that a scan range is marked as skipped
@@ -10,7 +10,7 @@ row_regex: .*RowsRead: 11.00K .
# for a scan range
select count(*) from tpcds_parquet.store_sales
---- RUNTIME_PROFILE
-row_regex: .*File Formats: PARQUET/Unknown\(Skipped\):.*
+row_regex: File Formats: PARQUET/Unknown\(Skipped\):
====
---- QUERY
# This query verifies that a when a parquet scan range is runtime
@@ -21,7 +21,7 @@ select count(*) from tpcds_parquet.store_sales
join tpcds_parquet.date_dim on
ss_sold_date_sk = d_date_sk where d_qoy=1
---- RUNTIME_PROFILE
-row_regex: .*File Formats: PARQUET/NONE:.* PARQUET/Unknown\(Skipped\).*
+row_regex: File Formats: PARQUET/NONE:.* PARQUET/Unknown\(Skipped\)
====
---- QUERY
# This query verifies that a when a text scan range is runtime
@@ -30,5 +30,5 @@ set runtime_filter_wait_time_ms=100000;
select count(*) from tpcds.store_sales join tpcds.date_dim on
ss_sold_date_sk = d_date_sk where d_qoy=1
---- RUNTIME_PROFILE
-row_regex: .*File Formats: TEXT/NONE:.* TEXT/NONE\(Skipped\):.*
+row_regex: File Formats: TEXT/NONE:.* TEXT/NONE\(Skipped\):
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-mixed-format-position-deletes.test
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-mixed-format-position-deletes.test
index eb0bec7fd..3663d15c0 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-mixed-format-position-deletes.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-mixed-format-position-deletes.test
@@ -18,7 +18,7 @@ DELETE FROM ice_mixed_formats WHERE i = 3;
refresh ice_mixed_formats;
refresh ice_mixed_formats;
---- RUNTIME_PROFILE
-row_regex:.*Iceberg table reload skipped as no change detected
+row_regex: Iceberg table reload skipped as no change detected
====
---- QUERY
SHOW FILES IN ice_mixed_formats;
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-negative.test
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-negative.test
index c8ec7a609..1f5d68760 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-negative.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-negative.test
@@ -151,7 +151,7 @@ TBLPROPERTIES('iceberg.catalog'='hadoop.catalog',
'iceberg.table_identifier'='fake_db.fake_table');
SHOW CREATE TABLE fake_iceberg_table_hadoop_catalog;
---- CATCH
-row_regex:.*CAUSED BY: IcebergTableLoadingException: Table does not exist:
fake_db.fake_table*
+row_regex:.*CAUSED BY: IcebergTableLoadingException: Table does not exist:
fake_db.fake_table
====
---- QUERY
CREATE TABLE iceberg_overwrite_bucket (i int, j int)
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-partitions.test
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-partitions.test
index a515e72e9..4d2de38fb 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-partitions.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-partitions.test
@@ -25,7 +25,7 @@ INT, STRING, DOUBLE, TIMESTAMP, BOOLEAN
2,'Bob',20.0,2023-01-15 11:00:00,false
3,'Charlie',30.2,2023-01-16 12:00:00,true
---- RUNTIME_PROFILE
-row_regex: .* partitions=1/1 files=1 size=.*B
+row_regex: partitions=1/1 files=1 size=.*B
====
---- QUERY
ALTER TABLE ice_num_partitions SET PARTITION SPEC(year(ts), bucket(4, id));
@@ -39,7 +39,7 @@ INSERT INTO ice_num_partitions VALUES
---- QUERY
SELECT * FROM ice_num_partitions;
---- RUNTIME_PROFILE
-row_regex: .* partitions=4/4 files=4 size=.*B
+row_regex: partitions=4/4 files=4 size=.*B
====
---- QUERY
SELECT id, name FROM ice_num_partitions where ts between '2024-02-20 12:00:00'
and '2024-02-20 18:00:00';
@@ -49,7 +49,7 @@ INT, STRING
6,'Frank'
4,'David'
---- RUNTIME_PROFILE
-row_regex: .* partitions=2/4 files=2 size=.*B
+row_regex: partitions=2/4 files=2 size=.*B
====
---- QUERY
ALTER TABLE ice_num_partitions SET PARTITION SPEC(void(ts), bucket(4, id));
@@ -60,7 +60,7 @@ INSERT INTO ice_num_partitions VALUES
(8, 'Heidi', 80.6, '2025-04-10 17:00:00', TRUE);
SELECT * FROM ice_num_partitions;
---- RUNTIME_PROFILE
-row_regex: .* partitions=5/5 files=5 size=.*B
+row_regex: partitions=5/5 files=5 size=.*B
====
---- QUERY
ALTER TABLE ice_num_partitions DROP PARTITION(bucket(4, id)=1);
@@ -83,7 +83,7 @@ INT, STRING, DOUBLE, TIMESTAMP, BOOLEAN
7,'Grace',70.3,2025-04-05 16:00:00,false
8,'Heidi',80.6,2025-04-10 17:00:00,true
---- RUNTIME_PROFILE
-row_regex: .* partitions=4/4 files=4 size=.*B
+row_regex: partitions=4/4 files=4 size=.*B
====
---- QUERY
show partitions ice_num_partitions;
@@ -103,5 +103,5 @@ INT, STRING
---- RESULTS
4,'David'
---- RUNTIME_PROFILE
-row_regex: .* partitions=4/4 files=4 size=.*B
+row_regex: partitions=4/4 files=4 size=.*B
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-scan-metrics-basic.test
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-scan-metrics-basic.test
index 55155cdc5..4ebb441f8 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-scan-metrics-basic.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-scan-metrics-basic.test
@@ -10,15 +10,15 @@ Iceberg Plan Metrics for Node 00:
select * from functional_parquet.iceberg_partitioned where action='download'
---- RUNTIME_PROFILE
Iceberg Plan Metrics for Node 00:
-row_regex:.*total-planning-duration: .+
+row_regex: total-planning-duration:
result-data-files: 6
result-delete-files: 0
total-data-manifests: 1
total-delete-manifests: 0
scanned-data-manifests: 1
skipped-data-manifests: 0
-row_regex:.*total-file-size-in-bytes: .+ \(\d+\)
-row_regex:.*total-delete-file-size-in-bytes: .+ \(\d+\)
+row_regex: total-file-size-in-bytes: .+ \(\d+\)
+row_regex: total-delete-file-size-in-bytes: .+ \(\d+\)
skipped-data-files: 14
skipped-delete-files: 0
scanned-delete-manifests: 0
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-scan-metrics-with-deletes.test
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-scan-metrics-with-deletes.test
index aed019c21..8b2c9eedd 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-scan-metrics-with-deletes.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-scan-metrics-with-deletes.test
@@ -7,15 +7,15 @@ select * from
functional_parquet.iceberg_v2_positional_not_all_data_files_have_delete_files
FOR SYSTEM_VERSION AS OF NOT_ALL_DATA_FILES_HAVE_DELETE_FILES_SNAPSHOT_ID
---- RUNTIME_PROFILE
Iceberg Plan Metrics for Node 00:
-row_regex:.*total-planning-duration: .+
+row_regex: total-planning-duration:
result-data-files: 1
result-delete-files: 0
total-data-manifests: 1
total-delete-manifests: 0
scanned-data-manifests: 1
skipped-data-manifests: 0
-row_regex:.*total-file-size-in-bytes: .+ \(\d+\)
-row_regex:.*total-delete-file-size-in-bytes: .+ \(\d+\)
+row_regex: total-file-size-in-bytes: .+ \(\d+\)
+row_regex: total-delete-file-size-in-bytes: .+ \(\d+\)
skipped-data-files: 0
skipped-delete-files: 0
scanned-delete-manifests: 0
@@ -24,38 +24,19 @@ row_regex:.*total-delete-file-size-in-bytes: .+ \(\d+\)
equality-delete-files: 0
positional-delete-files: 0
Iceberg Plan Metrics for Node 03:
-row_regex:.*total-planning-duration: .+
- result-data-files: 1
result-delete-files: 1
- total-data-manifests: 1
total-delete-manifests: 1
- scanned-data-manifests: 1
- skipped-data-manifests: 0
-row_regex:.*total-file-size-in-bytes: .+ \(\d+\)
-row_regex:.*total-delete-file-size-in-bytes: .+ \(\d+\)
- skipped-data-files: 0
- skipped-delete-files: 0
scanned-delete-manifests: 1
- skipped-delete-manifests: 0
indexed-delete-files: 1
- equality-delete-files: 0
positional-delete-files: 1
Iceberg Plan Metrics for Node 08:
-row_regex:.*total-planning-duration: .+
result-data-files: 4
result-delete-files: 2
total-data-manifests: 4
total-delete-manifests: 2
scanned-data-manifests: 4
- skipped-data-manifests: 0
-row_regex:.*total-file-size-in-bytes: .+ \(\d+\)
-row_regex:.*total-delete-file-size-in-bytes: .+ \(\d+\)
- skipped-data-files: 0
- skipped-delete-files: 0
scanned-delete-manifests: 2
- skipped-delete-manifests: 0
indexed-delete-files: 2
- equality-delete-files: 0
positional-delete-files: 2
====
---- QUERY
@@ -68,15 +49,15 @@ select * from
Iceberg Plan Metrics for Node 00:
Planning done without Iceberg: no Iceberg scan metrics available.
Iceberg Plan Metrics for Node 03:
-row_regex:.*total-planning-duration: .+
+row_regex: total-planning-duration:
result-data-files: 1
result-delete-files: 1
total-data-manifests: 1
total-delete-manifests: 1
scanned-data-manifests: 1
skipped-data-manifests: 0
-row_regex:.*total-file-size-in-bytes: .+ \(\d+\)
-row_regex:.*total-delete-file-size-in-bytes: .+ \(\d+\)
+row_regex: total-file-size-in-bytes: .+ \(\d+\)
+row_regex: total-delete-file-size-in-bytes: .+ \(\d+\)
skipped-data-files: 0
skipped-delete-files: 0
scanned-delete-manifests: 1
@@ -85,20 +66,12 @@ row_regex:.*total-delete-file-size-in-bytes: .+ \(\d+\)
equality-delete-files: 0
positional-delete-files: 1
Iceberg Plan Metrics for Node 08:
-row_regex:.*total-planning-duration: .+
result-data-files: 4
result-delete-files: 2
total-data-manifests: 4
total-delete-manifests: 2
scanned-data-manifests: 4
- skipped-data-manifests: 0
-row_regex:.*total-file-size-in-bytes: .+ \(\d+\)
-row_regex:.*total-delete-file-size-in-bytes: .+ \(\d+\)
- skipped-data-files: 0
- skipped-delete-files: 0
scanned-delete-manifests: 2
- skipped-delete-manifests: 0
indexed-delete-files: 2
- equality-delete-files: 0
positional-delete-files: 2
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-directed-mode.test
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-directed-mode.test
index c582bebf9..8c918a75b 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-directed-mode.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/iceberg-v2-directed-mode.test
@@ -11,7 +11,7 @@ select count(1) from iceberg_v2_partitioned_position_deletes;
bigint
---- RUNTIME_PROFILE
aggregation(SUM, BuildRows): 30
-!row_regex: .*F03:JOIN BUILD.*
+!row_regex: F03:JOIN BUILD
====
---- QUERY
# Same as above but here there is a separate fragment for the join build.
@@ -24,5 +24,5 @@ select count(1) from iceberg_v2_partitioned_position_deletes;
bigint
---- RUNTIME_PROFILE
aggregation(SUM, BuildRows): 30
-row_regex: .*F03:JOIN BUILD.*
+row_regex: F03:JOIN BUILD
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/in_list_filters.test
b/testdata/workloads/functional-query/queries/QueryTest/in_list_filters.test
index aafab1756..ca45dab7d 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/in_list_filters.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/in_list_filters.test
@@ -8,8 +8,8 @@ on p.month = b.int_col and b.month = 1 and b.string_col = "1"
---- RESULTS
620
---- RUNTIME_PROFILE
-row_regex: .*Filter 0 arrival with 1 items.*
-row_regex: .*Files rejected: 7 \(7\).*
+row_regex: Filter 0 arrival with 1 items
+row_regex: Files rejected: 7 \(7\)
====
---- QUERY
# Test two hop IN-list filters on partition columns.
@@ -21,9 +21,9 @@ select STRAIGHT_JOIN count(*) from alltypes a
---- RESULTS
0
---- RUNTIME_PROFILE
-row_regex: .*Filter 0 arrival with 0 items.*
-row_regex: .*Filter 1 arrival with 0 items.*
-row_regex: .*Files rejected: 8 \(8\).*
+row_regex: Filter 0 arrival with 0 items
+row_regex: Filter 1 arrival with 0 items
+row_regex: Files rejected: 8 \(8\)
====
---- QUERY
# Test IN-list filter on string column.
@@ -33,9 +33,9 @@ select STRAIGHT_JOIN count(*) from alltypes a
---- RESULTS
0
---- RUNTIME_PROFILE
-row_regex: .*Filter 0 arrival with 1 items.*
-row_regex: .*NumPushedDownRuntimeFilters: 1 \(1\).*
-row_regex: .*RowsRead: 0 \(0\).*
+row_regex: Filter 0 arrival with 1 items
+row_regex: NumPushedDownRuntimeFilters: 1 \(1\)
+row_regex: RowsRead: 0 \(0\)
====
---- QUERY
# Test IN-list filter on empty strings.
@@ -45,9 +45,9 @@ select STRAIGHT_JOIN count(*) from alltypes a
---- RESULTS
0
---- RUNTIME_PROFILE
-row_regex: .*Filter 0 arrival with 1 items.*
-row_regex: .*NumPushedDownRuntimeFilters: 1 \(1\).*
-row_regex: .*RowsRead: 0 \(0\).*
+row_regex: Filter 0 arrival with 1 items
+row_regex: NumPushedDownRuntimeFilters: 1 \(1\)
+row_regex: RowsRead: 0 \(0\)
====
---- QUERY
# Test IN-list filter on tinyint column.
@@ -57,9 +57,9 @@ select STRAIGHT_JOIN count(*) from alltypes a
---- RESULTS
0
---- RUNTIME_PROFILE
-row_regex: .*Filter 0 arrival with 1 items.*
-row_regex: .*NumPushedDownRuntimeFilters: 1 \(1\).*
-row_regex: .*RowsRead: 0 \(0\).*
+row_regex: Filter 0 arrival with 1 items
+row_regex: NumPushedDownRuntimeFilters: 1 \(1\)
+row_regex: RowsRead: 0 \(0\)
====
---- QUERY
# Test IN-list filter on smallint column.
@@ -69,9 +69,9 @@ select STRAIGHT_JOIN count(*) from alltypes a
---- RESULTS
0
---- RUNTIME_PROFILE
-row_regex: .*Filter 0 arrival with 1 items.*
-row_regex: .*NumPushedDownRuntimeFilters: 1 \(1\).*
-row_regex: .*RowsRead: 0 \(0\).*
+row_regex: Filter 0 arrival with 1 items
+row_regex: NumPushedDownRuntimeFilters: 1 \(1\)
+row_regex: RowsRead: 0 \(0\)
====
---- QUERY
# Test IN-list filter on int column.
@@ -81,9 +81,9 @@ select STRAIGHT_JOIN count(*) from alltypes a
---- RESULTS
0
---- RUNTIME_PROFILE
-row_regex: .*Filter 0 arrival with 1 items.*
-row_regex: .*NumPushedDownRuntimeFilters: 1 \(1\).*
-row_regex: .*RowsRead: 0 \(0\).*
+row_regex: Filter 0 arrival with 1 items
+row_regex: NumPushedDownRuntimeFilters: 1 \(1\)
+row_regex: RowsRead: 0 \(0\)
====
---- QUERY
# Test IN-list filter on bigint column.
@@ -93,9 +93,9 @@ select STRAIGHT_JOIN count(*) from alltypes a
---- RESULTS
0
---- RUNTIME_PROFILE
-row_regex: .*Filter 0 arrival with 1 items.*
-row_regex: .*NumPushedDownRuntimeFilters: 1 \(1\).*
-row_regex: .*RowsRead: 0 \(0\).*
+row_regex: Filter 0 arrival with 1 items
+row_regex: NumPushedDownRuntimeFilters: 1 \(1\)
+row_regex: RowsRead: 0 \(0\)
====
---- QUERY
# Test IN-list filter on bigint column.
@@ -105,9 +105,9 @@ select STRAIGHT_JOIN count(*) from alltypes a
---- RESULTS
0
---- RUNTIME_PROFILE
-row_regex: .*Filter 0 arrival with 2 items.*
-row_regex: .*NumPushedDownRuntimeFilters: 1 \(1\).*
-row_regex: .*RowsRead: 0 \(0\).*
+row_regex: Filter 0 arrival with 2 items
+row_regex: NumPushedDownRuntimeFilters: 1 \(1\)
+row_regex: RowsRead: 0 \(0\)
====
---- QUERY
# Test IN-list filter on DATE partition column.
@@ -118,7 +118,7 @@ select STRAIGHT_JOIN count(*) from date_tbl a
---- RESULTS
11
---- RUNTIME_PROFILE
-row_regex: .*Filter 0 arrival with 17 items.*
+row_regex: Filter 0 arrival with 17 items
aggregation(SUM, Files rejected): 2
====
---- QUERY
@@ -129,8 +129,8 @@ select STRAIGHT_JOIN count(*) from date_tbl a
---- RESULTS
11
---- RUNTIME_PROFILE
-row_regex: .*Filter 0 arrival with 4 items.*
-row_regex: .*NumPushedDownRuntimeFilters: 1 \(1\).*
+row_regex: Filter 0 arrival with 4 items
+row_regex: NumPushedDownRuntimeFilters: 1 \(1\)
====
---- QUERY
# Test IN-list filter with NULL.
@@ -142,9 +142,9 @@ select STRAIGHT_JOIN count(*) from nullrows a
---- RESULTS
0
---- RUNTIME_PROFILE
-row_regex: .*Filter 0 arrival with 1 items.*
-row_regex: .*NumPushedDownRuntimeFilters: 1 \(1\).*
-row_regex: .*RowsRead: 0 \(0\).*
+row_regex: Filter 0 arrival with 1 items
+row_regex: NumPushedDownRuntimeFilters: 1 \(1\)
+row_regex: RowsRead: 0 \(0\)
====
---- QUERY
# Test IN-list filter on complex target expr, i.e. not a simple slot ref.
@@ -156,7 +156,7 @@ select STRAIGHT_JOIN count(*) from
functional_orc_def.alltypes a
---- RESULTS
7
---- RUNTIME_PROFILE
-row_regex: .*RowsRead: 2.43K \(2433\).*
+row_regex: RowsRead: 2.43K \(2433\)
====
---- QUERY
# Test IN-list filter on wide string that exceeds the total string size.
@@ -167,8 +167,8 @@ select STRAIGHT_JOIN count(*) from alltypes a
join [BROADCAST] widerow b
on a.string_col = b.string_col
---- RUNTIME_PROFILE
-row_regex: .*Filter 0 arrival with 0 items.*
-row_regex: .*RowsRead: 2.43K \(2433\).*
+row_regex: Filter 0 arrival with 0 items
+row_regex: RowsRead: 2.43K \(2433\)
====
---- QUERY
# IMPALA-11707: Regression test on global IN-list filter
@@ -185,8 +185,8 @@ where t.id = a.id and a.tinyint_col = b.tinyint_col and
b.id = 0;
---- RESULTS
4
---- RUNTIME_PROFILE
-row_regex: .*0\s+4\s+1\s+REMOTE\s+false.*IN_LIST\s+1
-row_regex: .*Filter 0 arrival with 1 items.*
+row_regex: 0\s+4\s+1\s+REMOTE\s+false.*IN_LIST\s+1
+row_regex: Filter 0 arrival with 1 items
====
---- QUERY
# IMPALA-11707: Regression test on global IN-list filter
@@ -195,8 +195,8 @@ where t.id = a.id and a.smallint_col = b.smallint_col and
b.id = 0;
---- RESULTS
4
---- RUNTIME_PROFILE
-row_regex: .*0\s+4\s+1\s+REMOTE\s+false.*IN_LIST\s+1
-row_regex: .*Filter 0 arrival with 1 items.*
+row_regex: 0\s+4\s+1\s+REMOTE\s+false.*IN_LIST\s+1
+row_regex: Filter 0 arrival with 1 items
====
---- QUERY
# IMPALA-11707: Regression test on global IN-list filter
@@ -205,8 +205,8 @@ where t.id = a.id and a.int_col = b.int_col and b.id = 0;
---- RESULTS
4
---- RUNTIME_PROFILE
-row_regex: .*0\s+4\s+1\s+REMOTE\s+false.*IN_LIST\s+1
-row_regex: .*Filter 0 arrival with 1 items.*
+row_regex: 0\s+4\s+1\s+REMOTE\s+false.*IN_LIST\s+1
+row_regex: Filter 0 arrival with 1 items
====
---- QUERY
# IMPALA-11707: Regression test on global IN-list filter
@@ -215,8 +215,8 @@ where t.id = a.id and a.bigint_col = b.bigint_col and b.id
= 0;
---- RESULTS
4
---- RUNTIME_PROFILE
-row_regex: .*0\s+4\s+1\s+REMOTE\s+false.*IN_LIST\s+1
-row_regex: .*Filter 0 arrival with 1 items.*
+row_regex: 0\s+4\s+1\s+REMOTE\s+false.*IN_LIST\s+1
+row_regex: Filter 0 arrival with 1 items
====
---- QUERY
# IMPALA-11707: Regression test on global IN-list filter
@@ -225,8 +225,8 @@ where t.id = a.id and a.string_col = b.string_col and b.id
= 0;
---- RESULTS
4
---- RUNTIME_PROFILE
-row_regex: .*0\s+4\s+1\s+REMOTE\s+false.*IN_LIST\s+1
-row_regex: .*Filter 0 arrival with 1 items.*
+row_regex: 0\s+4\s+1\s+REMOTE\s+false.*IN_LIST\s+1
+row_regex: Filter 0 arrival with 1 items
====
---- QUERY
# IMPALA-11707: Regression test on global IN-list filter on DATE type
@@ -243,8 +243,8 @@ where b.id_col < 5;
---- RESULTS
7
---- RUNTIME_PROFILE
-row_regex: .*0\s+4\s+1\s+REMOTE\s+false.*IN_LIST\s+5
-row_regex: .*Filter 0 arrival with 5 items.*
+row_regex: 0\s+4\s+1\s+REMOTE\s+false.*IN_LIST\s+5
+row_regex: Filter 0 arrival with 5 items
====
---- QUERY
# IMPALA-11707: Regression test on global IN-list filter
@@ -261,6 +261,6 @@ where s_nationkey = n_nationkey
---- RESULTS
1987
---- RUNTIME_PROFILE
-row_regex: .*0\s+4\s+1\s+REMOTE\s+false.*IN_LIST\s+1
-row_regex: .*Filter 0 arrival with 1 items.*
+row_regex: 0\s+4\s+1\s+REMOTE\s+false.*IN_LIST\s+1
+row_regex: Filter 0 arrival with 1 items
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/joins_mt_dop.test
b/testdata/workloads/functional-query/queries/QueryTest/joins_mt_dop.test
index a3a2bfcc3..fed9a9634 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/joins_mt_dop.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/joins_mt_dop.test
@@ -10,7 +10,7 @@ on p.month = b.int_col and b.month = 1 and b.string_col = "1"
---- RESULTS
620
---- RUNTIME_PROFILE
-row_regex: .*Files rejected: 22 \(22\).*
+row_regex: Files rejected: 22 \(22\)
====
---- QUERY
# Test that the single-node plan is executable and produces the correct result
for a
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/kudu_insert.test
b/testdata/workloads/functional-query/queries/QueryTest/kudu_insert.test
index 3fc054e9d..d8f7971c4 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/kudu_insert.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/kudu_insert.test
@@ -326,7 +326,7 @@ insert into allkeytypes values (1,1,1,1,'1','2009-01-01
00:01:00',cast('1' as va
---- RUNTIME_PROFILE
NumModifiedRows: 1
NumRowErrors: 0
-row_regex: .*EXEC_SINGLE_NODE_ROWS_THRESHOLD=0.*
+row_regex: EXEC_SINGLE_NODE_ROWS_THRESHOLD=0
====
---- QUERY
create table timestampkey (t timestamp primary key)
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/kudu_insert_mem_limit.test
b/testdata/workloads/functional-query/queries/QueryTest/kudu_insert_mem_limit.test
index b48c5ad46..ab8fb13e8 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/kudu_insert_mem_limit.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/kudu_insert_mem_limit.test
@@ -6,5 +6,5 @@ set mem_limit=400m;
create table kudu_test primary key(a, b) partition by hash(a, b) partitions 8
stored as kudu as
select l_orderkey a, concat(l_comment, l_comment, l_comment) b from
tpch.lineitem
---- RUNTIME_PROFILE
-row_regex: .*SortType: Partial
+row_regex: SortType: Partial
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/kudu_runtime_filter_with_timestamp_conversion.test
b/testdata/workloads/functional-query/queries/QueryTest/kudu_runtime_filter_with_timestamp_conversion.test
index 3be96d5fd..e0eac06f8 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/kudu_runtime_filter_with_timestamp_conversion.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/kudu_runtime_filter_with_timestamp_conversion.test
@@ -27,7 +27,7 @@ INT,TIMESTAMP,INT,TIMESTAMP
10,2011-11-06 01:40:00,7,2011-11-06 01:40:00
10,2011-11-06 01:40:00,10,2011-11-06 01:40:00
---- RUNTIME_PROFILE
-row_regex: .*RF00.\[min_max\] <- from_utc_timestamp\(t2.ts,
'America/Los_Angeles'\).*
+row_regex: RF00.\[min_max\] <- from_utc_timestamp\(t2.ts,
'America/Los_Angeles'\)
====
---- QUERY
# Test that kudu min_max runtime filter with timestamps can be work correctly
in the
@@ -41,7 +41,7 @@ INT,BIGINT,TIMESTAMP,STRING
5,1320566400,2011-11-06 01:00:00,'2011-11-06 01:00:00'
8,1320570000,2011-11-06 01:00:00,'2011-11-06 01:00:00'
---- RUNTIME_PROFILE
-row_regex: .*RF00.\[min_max\] <- t2.ts.*
+row_regex: RF00.\[min_max\] <- t2.ts
====
---- QUERY
# Test that kudu bloom runtime filter with timestamps can be work correctly in
the
@@ -69,5 +69,5 @@ INT,TIMESTAMP,TIMESTAMP
9,2011-11-06 17:20:00,2011-11-06 09:20:00
10,2011-11-06 17:40:00,2011-11-06 09:40:00
---- RUNTIME_PROFILE
-row_regex: .*RF00.\[bloom\] <-
utc_to_unix_micros\(to_utc_timestamp\(from_utc_timestamp\(t2.ts,
'Asia/Shanghai'\), 'Asia/Shanghai'\)\).*
+row_regex: RF00.\[bloom\] <-
utc_to_unix_micros\(to_utc_timestamp\(from_utc_timestamp\(t2.ts,
'Asia/Shanghai'\), 'Asia/Shanghai'\)\)
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/max-mt-dop.test
b/testdata/workloads/functional-query/queries/QueryTest/max-mt-dop.test
index bf082ae1a..eeed97a91 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/max-mt-dop.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/max-mt-dop.test
@@ -6,8 +6,8 @@ set request_pool=nosetting;
set mt_dop=9;
select 1;
---- RUNTIME_PROFILE
-row_regex: .*Query Options \(set by configuration\): .*MT_DOP=8.*
-row_regex: .*MT_DOP limited by admission control: Requested MT_DOP=9 reduced
to MT_DOP=8.*
+row_regex: Query Options \(set by configuration\): .*MT_DOP=8
+row_regex: MT_DOP limited by admission control: Requested MT_DOP=9 reduced to
MT_DOP=8
====
---- QUERY
# The 'limited' resource pool has max-mt-dop set to 4, so the query is
downgraded.
@@ -15,8 +15,8 @@ set request_pool=limited;
set mt_dop=9;
select 1;
---- RUNTIME_PROFILE
-row_regex: .*Query Options \(set by configuration\): .*MT_DOP=4.*
-row_regex: .*MT_DOP limited by admission control: Requested MT_DOP=9 reduced
to MT_DOP=4.*
+row_regex: Query Options \(set by configuration\): .*MT_DOP=4
+row_regex: MT_DOP limited by admission control: Requested MT_DOP=9 reduced to
MT_DOP=4
====
---- QUERY
# The 'negative' resource pool has max-mt-dop set to -1, which means the limit
is
@@ -25,7 +25,7 @@ set request_pool=negative;
set mt_dop=9;
select 1;
---- RUNTIME_PROFILE
-row_regex: .*Query Options \(set by configuration\): .*MT_DOP=9.*
+row_regex: Query Options \(set by configuration\): .*MT_DOP=9
====
---- QUERY
# The 'largeint' resource pool has max-mt-dop set to a value that doesn't fit
in 4 bytes.
@@ -34,7 +34,7 @@ set request_pool=largeint;
set mt_dop=9;
select 1;
---- RUNTIME_PROFILE
-row_regex: .*Query Options \(set by configuration\): .*MT_DOP=9.*
+row_regex: Query Options \(set by configuration\): .*MT_DOP=9
====
---- QUERY
# The 'zero' resource pool has max-mt-dop set to 0, so the query is downgraded
to 0.
@@ -42,6 +42,6 @@ set request_pool=zero;
set mt_dop=9;
select 1;
---- RUNTIME_PROFILE
-row_regex: .*Query Options \(set by configuration\): .*MT_DOP=0.*
-row_regex: .*MT_DOP limited by admission control: Requested MT_DOP=9 reduced
to MT_DOP=0.*
+row_regex: Query Options \(set by configuration\): .*MT_DOP=0
+row_regex: MT_DOP limited by admission control: Requested MT_DOP=9 reduced to
MT_DOP=0
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/min_max_filters.test
b/testdata/workloads/functional-query/queries/QueryTest/min_max_filters.test
index f14e02ad7..e5367429e 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/min_max_filters.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/min_max_filters.test
@@ -325,7 +325,7 @@ where a.date_col = b.date_col and b.date_col between DATE
'2010-01-01' and
10
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 4
-row_regex: .*1 of 1 Runtime Filter Published.*
+row_regex: 1 of 1 Runtime Filter Published
====
---- QUERY
@@ -423,7 +423,7 @@ on a.date_col = b.date_col and b.date_col = c.date_col;
50
---- RUNTIME_PROFILE
aggregation(SUM, ProbeRows): 48
-row_regex: .*1 of 1 Runtime Filter Published.*
+row_regex: 1 of 1 Runtime Filter Published
====
---- QUERY
SET RUNTIME_FILTER_WAIT_TIME_MS=$RUNTIME_FILTER_WAIT_TIME_MS;
@@ -433,5 +433,5 @@ where i1.action = i2.action and
i1.id = i2.id and
i2.event_time = '2020-01-01 10:00:00';
---- RUNTIME_PROFILE
-row_regex:.* RF00.\[min_max\] -> i1\.action.*
+row_regex: RF00.\[min_max\] -> i1\.action
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/mt-dop-parquet-scheduling.test
b/testdata/workloads/functional-query/queries/QueryTest/mt-dop-parquet-scheduling.test
index 169385d3d..c279351da 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/mt-dop-parquet-scheduling.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/mt-dop-parquet-scheduling.test
@@ -6,7 +6,7 @@ select min(string_col) from alltypes
---- RESULTS
'0'
---- RUNTIME_PROFILE
-row_regex:.*AdmissionSlots: 4.*
+row_regex:AdmissionSlots: 4
====
---- QUERY
# 2 partitions across 3 backends means that we'll get 1 finstance per backend
@@ -15,7 +15,7 @@ select min(string_col) from alltypes where month = 1
---- RESULTS
'0'
---- RUNTIME_PROFILE
-row_regex:.*AdmissionSlots: 1.*
+row_regex:AdmissionSlots: 1
====
---- QUERY
# 7 partitions across 3 backends results in 3 finstances on one backend and 2
@@ -25,8 +25,8 @@ select min(string_col) from alltypes where month <= 7 and
year = 2009
---- RESULTS
'0'
---- RUNTIME_PROFILE
-row_regex:.*AdmissionSlots: 2.*
-row_regex:.*AdmissionSlots: 3.*
+row_regex:AdmissionSlots: 2
+row_regex:AdmissionSlots: 3
====
---- QUERY
# This query should have three scans in the same fragment. The scan of
'alltypes'
@@ -40,13 +40,13 @@ select min(string_col) from (
---- RESULTS
'0'
---- RUNTIME_PROFILE
-row_regex:.*AdmissionSlots: 4 .*
-row_regex:.*F04:ROOT * 1 * 1 .*
-row_regex:.*04:AGGREGATE * 3 * 12 .*
-row_regex:.*00:UNION * 3 * 12 *
-row_regex:.*02:SCAN (HDFS|OZONE) * 3 * 12 .*alltypessmall.*
-row_regex:.*03:SCAN (HDFS|OZONE) * 3 * 12 .*alltypestiny.*
-row_regex:.*01:SCAN (HDFS|OZONE) * 3 * 12 .*alltypes.*
+row_regex: AdmissionSlots: 4
+row_regex: F04:ROOT * 1 * 1
+row_regex: 04:AGGREGATE * 3 * 12
+row_regex: 00:UNION * 3 * 12 *
+row_regex: 02:SCAN (?:HDFS|OZONE) * 3 * 12 .*alltypessmall
+row_regex: 03:SCAN (?:HDFS|OZONE) * 3 * 12 .*alltypestiny
+row_regex: 01:SCAN (?:HDFS|OZONE) * 3 * 12 .*alltypes
====
---- QUERY
# Same idea, but with smallest scan first to check that the scheduler is
taking the
@@ -60,13 +60,13 @@ select min(string_col) from (
---- RESULTS
'0'
---- RUNTIME_PROFILE
-row_regex:.*AdmissionSlots: 4 .*
-row_regex:.*F04:ROOT * 1 * 1 .*
-row_regex:.*04:AGGREGATE * 3 * 12 .*
-row_regex:.*00:UNION * 3 * 12 *
-row_regex:.*02:SCAN (HDFS|OZONE) * 3 * 12 .*alltypessmall.*
-row_regex:.*03:SCAN (HDFS|OZONE) * 3 * 12 .*alltypes.*
-row_regex:.*01:SCAN (HDFS|OZONE) * 3 * 12 .*alltypestiny.*
+row_regex: AdmissionSlots: 4
+row_regex: F04:ROOT * 1 * 1
+row_regex: 04:AGGREGATE * 3 * 12
+row_regex: 00:UNION * 3 * 12 *
+row_regex: 02:SCAN (?:HDFS|OZONE) * 3 * 12 .*alltypessmall
+row_regex: 03:SCAN (?:HDFS|OZONE) * 3 * 12 .*alltypes
+row_regex: 01:SCAN (?:HDFS|OZONE) * 3 * 12 .*alltypestiny
====
---- QUERY
# This query should have one scan and one exchange in the interior fragment.
@@ -79,14 +79,14 @@ select min(string_col) from (
---- RESULTS
'0'
---- RUNTIME_PROFILE
-row_regex:.*AdmissionSlots: 4.*
-row_regex:.*F04:ROOT * 1 * 1 .*
-row_regex:.*04:AGGREGATE * 3 * 12 .*
-row_regex:.*06:AGGREGATE * 3 * 12 .*
-row_regex:.*03:AGGREGATE * 3 * 12 .*
-row_regex:.*00:UNION * 3 * 12 *
-row_regex:.*02:SCAN (HDFS|OZONE) * 3 * 12 .*alltypes.*
-row_regex:.*01:SCAN (HDFS|OZONE) * 3 * 12 .*alltypestiny.*
+row_regex:AdmissionSlots: 4
+row_regex: F04:ROOT * 1 * 1
+row_regex: 04:AGGREGATE * 3 * 12
+row_regex: 06:AGGREGATE * 3 * 12
+row_regex: 03:AGGREGATE * 3 * 12
+row_regex: 00:UNION * 3 * 12 *
+row_regex: 02:SCAN (?:HDFS|OZONE) * 3 * 12 .*alltypes
+row_regex: 01:SCAN (?:HDFS|OZONE) * 3 * 12 .*alltypestiny
====
---- QUERY
# This query should have one scan and one exchange in the interior fragment.
@@ -101,14 +101,14 @@ select min(string_col) from (
---- RESULTS
'0'
---- RUNTIME_PROFILE
-row_regex:.*AdmissionSlots: 4.*
-row_regex:.*F04:ROOT * 1 * 1 .*
-row_regex:.*04:AGGREGATE * 3 * 12 .*
-row_regex:.*06:AGGREGATE * 3 * 12 .*
-row_regex:.*03:AGGREGATE * 3 * 4 .*
-row_regex:.*00:UNION * 3 * 12 *
-row_regex:.*02:SCAN (HDFS|OZONE) * 3 * 4 .*alltypestiny.*
-row_regex:.*01:SCAN (HDFS|OZONE) * 3 * 12 .*alltypes.*
+row_regex:AdmissionSlots: 4
+row_regex: F04:ROOT * 1 * 1
+row_regex: 04:AGGREGATE * 3 * 12
+row_regex: 06:AGGREGATE * 3 * 12
+row_regex: 03:AGGREGATE * 3 * 4
+row_regex: 00:UNION * 3 * 12 *
+row_regex: 02:SCAN (?:HDFS|OZONE) * 3 * 4 .*alltypestiny
+row_regex: 01:SCAN (?:HDFS|OZONE) * 3 * 12 .*alltypes
====
---- QUERY
# This query should have one scan and two exchanges in the interior fragment.
@@ -123,13 +123,13 @@ select min(string_col) from (
---- RESULTS
'0'
---- RUNTIME_PROFILE
-row_regex:.*F06:ROOT * 1 * 1 .*
-row_regex:.*AdmissionSlots: 2.*
-row_regex:.*00:UNION * 3 * 6 .*
-row_regex:.*08:AGGREGATE * 3 * 6 .*
-row_regex:.*03:AGGREGATE * 3 * 6 .*
-row_regex:.*04:SCAN (HDFS|OZONE) * 3 * 6 .*
-row_regex:.*01:SCAN (HDFS|OZONE) * 3 * 6 .*
+row_regex: F06:ROOT * 1 * 1
+row_regex:AdmissionSlots: 2
+row_regex: 00:UNION * 3 * 6
+row_regex: 08:AGGREGATE * 3 * 6
+row_regex: 03:AGGREGATE * 3 * 6
+row_regex: 04:SCAN (?:HDFS|OZONE) * 3 * 6
+row_regex: 01:SCAN (?:HDFS|OZONE) * 3 * 6
====
---- QUERY: TPCDS-Q11
with year_total as (
@@ -302,8 +302,8 @@ limit 100;
---- TYPES
STRING, STRING, STRING, STRING
---- RUNTIME_PROFILE
-row_regex:.*21:UNION * 2 * 2 .*
-row_regex:.*00:UNION * 3 * 12 .*
-row_regex:.*14:UNION * 2 * 2 .*
-row_regex:.*07:UNION * 3 * 12 .*
+row_regex: 21:UNION * 2 * 2
+row_regex: 00:UNION * 3 * 12
+row_regex: 14:UNION * 2 * 2
+row_regex: 07:UNION * 3 * 12
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/no-block-locations-hdfs-only.test
b/testdata/workloads/functional-query/queries/QueryTest/no-block-locations-hdfs-only.test
index b3fc8b3d1..c3e171bd1 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/no-block-locations-hdfs-only.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/no-block-locations-hdfs-only.test
@@ -11,5 +11,5 @@ select count(*) from
functional_parquet.iceberg_lineitem_sixblocks where l_order
BIGINT
---- RUNTIME_PROFILE
# The following should be in the ExecSummary
-row_regex: 00:SCAN [A-Z0-9]+ +1 +1 +.*
+row_regex: 00:SCAN [A-Z0-9]+ +1 +1 +
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/no-block-locations.test
b/testdata/workloads/functional-query/queries/QueryTest/no-block-locations.test
index 9df5d0c94..b99ca6163 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/no-block-locations.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/no-block-locations.test
@@ -34,5 +34,5 @@ select count(*) from functional_parquet.lineitem_sixblocks
where l_orderkey % 2
BIGINT
---- RUNTIME_PROFILE
# The following should be in the ExecSummary
-row_regex: 00:SCAN [A-Z0-9]+ +1 +1 +.*
+row_regex: 00:SCAN [A-Z0-9]+ +1 +1 +
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/overlap_min_max_filters.test
b/testdata/workloads/functional-query/queries/QueryTest/overlap_min_max_filters.test
index bfbf19ac4..ef7a45e35 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/overlap_min_max_filters.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/overlap_min_max_filters.test
@@ -309,7 +309,7 @@ where a.l_orderkey = b.o_orderkey;
---- RESULTS
6001215
---- RUNTIME_PROFILE
-row_regex: .*1.+0 \(\d+\).+false.+MIN_MAX\s+AlwaysTrue\s+AlwaysTrue.*
+row_regex: 1.+0 \(\d+\).+false.+MIN_MAX\s+AlwaysTrue\s+AlwaysTrue
====
---- QUERY
# Run an equi hash join query in which the population of the hash table
produces
@@ -329,7 +329,7 @@ where a.l_orderkey = b.o_orderkey and b.o_custkey = 5;
---- RESULTS
19
---- RUNTIME_PROFILE
-row_regex: .*1.+0 \(\d+\).+true.+MIN_MAX\s+224167\s+2630562.*
+row_regex: 1.+0 \(\d+\).+true.+MIN_MAX\s+224167\s+2630562
====
---- QUERY
# Run an equi hash join query in which the population of the hash table
produces
@@ -347,7 +347,7 @@ where a.l_orderkey = b.o_orderkey and b.o_custkey = -5;
---- RESULTS
0
---- RUNTIME_PROFILE
-row_regex: .*1.+0 \(\d+\).+true.+MIN_MAX\s+AlwaysFalse\s+AlwaysFalse.*
+row_regex: 1.+0 \(\d+\).+true.+MIN_MAX\s+AlwaysFalse\s+AlwaysFalse
====
---- QUERY
# Positive tests to check out the explain output involving a non-correlated
one-row
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/overlap_min_max_filters_on_sorted_columns.test
b/testdata/workloads/functional-query/queries/QueryTest/overlap_min_max_filters_on_sorted_columns.test
index e721ef49f..8793d7a50 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/overlap_min_max_filters_on_sorted_columns.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/overlap_min_max_filters_on_sorted_columns.test
@@ -32,9 +32,9 @@ and b.o_custkey = 5
---- RESULTS
0
---- RUNTIME_PROFILE
-row_regex: .*Number of filters: 2.*
-row_regex: .*0.+0 \(\d+\).+true.+1.00 MB.*
-row_regex: .*1.+0 \(\d+\).+true.+MIN_MAX\s+224167\s+2630562.*
+row_regex: Number of filters: 2
+row_regex: 0.+0 \(\d+\).+true.+1.00 MB
+row_regex: 1.+0 \(\d+\).+true.+MIN_MAX\s+224167\s+2630562
====
---- QUERY
# Turn off the min/max filter on leading sort by column. Only the bloom filter
will be
@@ -53,7 +53,7 @@ and b.o_custkey = 5
---- RESULTS
0
---- RUNTIME_PROFILE
-row_regex: .*0.+0 \(\d+\).+true.+1.00 MB.*
+row_regex: 0.+0 \(\d+\).+true.+1.00 MB
====
---- QUERY
##################################################
@@ -366,5 +366,5 @@ lineitem_orderkey_partkey_only where l_orderkey < 3000);
---- RUNTIME_PROFILE
aggregation(SUM, NumRuntimeFilteredRowGroups): 2
aggregation(SUM, NumRuntimeFilteredPages)> 200
-row_regex:.*NESTED LOOP JOIN.*
+row_regex: NESTED LOOP JOIN
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/parquet-late-materialization.test
b/testdata/workloads/functional-query/queries/QueryTest/parquet-late-materialization.test
index 2e8b39093..f1308c3e0 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/parquet-late-materialization.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/parquet-late-materialization.test
@@ -21,7 +21,7 @@ select * from tpch_parquet.lineitem l
join tpch_parquet.orders o on l.l_orderkey = o.o_orderkey
where o_orderdate='1992-06-22' and o_totalprice = 153827.26;
---- RUNTIME_PROFILE
-row_regex: .*1 of 1 Runtime Filter Published.*
+row_regex: 1 of 1 Runtime Filter Published
aggregation(SUM, NumPagesSkippedByLateMaterialization)> 0
====
---- QUERY
@@ -34,7 +34,7 @@ select * from tpch_parquet.lineitem l
join tpch_parquet.orders o on l.l_orderkey = o.o_orderkey
where o_orderdate='1996-12-01' and o_totalprice >= 250000;
---- RUNTIME_PROFILE
-row_regex:.* RF00.\[min_max\] -. .\.l_orderkey.*
+row_regex: RF00.\[min_max\] -. .\.l_orderkey
aggregation(SUM, NumPagesSkippedByLateMaterialization)> 0
====
---- QUERY
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/processing-cost-admission-slots.test
b/testdata/workloads/functional-query/queries/QueryTest/processing-cost-admission-slots.test
index 422d43af5..cf63848e9 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/processing-cost-admission-slots.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/processing-cost-admission-slots.test
@@ -33,12 +33,12 @@ order by c_customer_id
limit 100;
---- RUNTIME_PROFILE
aggregation(SUM, AdmissionSlots): 16
-row_regex: .* AvgAdmissionSlotsPerExecutor: 6 .*
-row_regex: .* Executor group 2 \(large\):.*
-row_regex: .* CpuAsk: 18 .*
-row_regex: .* CpuAskBounded: 16 .*
-row_regex: .* EffectiveParallelism: 16 .*
-row_regex: .* MaxParallelism: 16 .*
+row_regex: AvgAdmissionSlotsPerExecutor: 6
+row_regex: Executor group 2 \(large\):
+row_regex: CpuAsk: 18
+row_regex: CpuAskBounded: 16
+row_regex: EffectiveParallelism: 16
+row_regex: MaxParallelism: 16
====
---- QUERY: TPCDS-Q1-CPC-LARGEST-FRAGMENT
-- Expect a total of 3 admission slots given to this query if using
LARGEST_FRAGMENT strategy.
@@ -69,12 +69,12 @@ order by c_customer_id
limit 100;
---- RUNTIME_PROFILE
aggregation(SUM, AdmissionSlots): 3
-!row_regex: .* AvgAdmissionSlotsPerExecutor: .*
-row_regex: .* Executor group 2 \(large\):.*
-row_regex: .* CpuAsk: 18 .*
-row_regex: .* CpuAskBounded: 16 .*
-row_regex: .* EffectiveParallelism: 16 .*
-row_regex: .* MaxParallelism: 16 .*
+!row_regex: AvgAdmissionSlotsPerExecutor:
+row_regex: Executor group 2 \(large\):
+row_regex: CpuAsk: 18
+row_regex: CpuAskBounded: 16
+row_regex: EffectiveParallelism: 16
+row_regex: MaxParallelism: 16
====
---- QUERY: TPCDS-Q1-NO-CPC
-- Expect a total of 3 admission slots given to this query if
COMPUTE_PROCESSING_COST is disabled.
@@ -107,10 +107,10 @@ order by c_customer_id
limit 100;
---- RUNTIME_PROFILE
aggregation(SUM, AdmissionSlots): 3
-!row_regex: .* AvgAdmissionSlotsPerExecutor: .*
-row_regex: .* Executor group 2 \(large\):.*
-!row_regex: .* CpuAsk: .*
-!row_regex: .* CpuAskBounded: .*
-!row_regex: .* EffectiveParallelism: .*
-!row_regex: .* MaxParallelism: .*
+!row_regex: AvgAdmissionSlotsPerExecutor:
+row_regex: Executor group 2 \(large\):
+!row_regex: CpuAsk:
+!row_regex: CpuAskBounded:
+!row_regex: EffectiveParallelism:
+!row_regex: MaxParallelism:
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/query-impala-13138.test
b/testdata/workloads/functional-query/queries/QueryTest/query-impala-13138.test
index d3f655ba7..b80f4dff9 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/query-impala-13138.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/query-impala-13138.test
@@ -23,7 +23,7 @@ FROM letter_marketing_response_events re
AND re.send_account_sk not in (43)
GROUP BY ecc.letter_mission_name, re.`date`, c.war_group;
---- RESULTS: VERIFY_IS_SUBSET
-row_regex:.*,'2024-06-07',.*
+row_regex:,'2024-06-07',
---- TYPES
STRING,STRING,STRING
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/query-resource-limits.test
b/testdata/workloads/functional-query/queries/QueryTest/query-resource-limits.test
index fa7554a94..2fab5d80a 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/query-resource-limits.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/query-resource-limits.test
@@ -129,7 +129,7 @@ order by
value desc
) t;
---- CATCH
-row_regex:.*terminated due to join rows produced exceeds the limit of 10.00k*
+row_regex:.*terminated due to join rows produced exceeds the limit of 10.00K
====
---- QUERY
# The query should succeed when it doesn't exceed the join rows produced limit.
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/runtime_filters.test
b/testdata/workloads/functional-query/queries/QueryTest/runtime_filters.test
index da06821d1..ad5f3009a 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/runtime_filters.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/runtime_filters.test
@@ -13,7 +13,7 @@ on p.month = b.int_col and b.month = 1 and b.string_col = "1"
---- RESULTS
620
---- RUNTIME_PROFILE
-row_regex: 00:SCAN.*s[ ]+7.30K[ ]+[0-9\.]+K.*
+row_regex: 00:SCAN.*s[ ]+7.30K[ ]+[0-9\.]+K
====
---- QUERY
# Now turn on local filtering: we expect to see a reduction in scan volume.
@@ -27,7 +27,7 @@ on p.month = b.int_col and b.month = 1 and b.string_col = "1"
---- RUNTIME_PROFILE
aggregation(SUM, Files rejected): 22
---- RUNTIME_PROFILE: table_format=kudu
-row_regex: 00:SCAN KUDU.*s[ ]+620[ ]+608.*
+row_regex: 00:SCAN KUDU.*s[ ]+620[ ]+608
====
@@ -46,7 +46,7 @@ on p.month = b.int_col and b.month = 1 and b.string_col = "1"
---- RESULTS
620
---- RUNTIME_PROFILE
-row_regex: 00:SCAN.*s[ ]+7.30K[ ]+[0-9\.]+K.*
+row_regex: 00:SCAN.*s[ ]+7.30K[ ]+[0-9\.]+K
====
---- QUERY
# Shuffle join, global mode. Expect filters to be propagated.
@@ -59,7 +59,7 @@ on p.month = b.int_col and b.month = 1 and b.string_col = "1"
---- RUNTIME_PROFILE
aggregation(SUM, Files rejected): 22
---- RUNTIME_PROFILE: table_format=kudu
-row_regex: 00:SCAN KUDU.*s[ ]+620[ ]+608.*
+row_regex: 00:SCAN KUDU.*s[ ]+620[ ]+608
====
@@ -85,7 +85,7 @@ select STRAIGHT_JOIN count(*) from alltypes a
---- RUNTIME_PROFILE
aggregation(SUM, Files rejected): 0
---- RUNTIME_PROFILE: table_format=kudu
-row_regex: 00:SCAN KUDU.*s[ ]+7.30K[ ]+7.30K.*
+row_regex: 00:SCAN KUDU.*s[ ]+7.30K[ ]+7.30K
====
---- QUERY
# Global mode. Scan of 'b' will receive highly effective filter, and will
propagate that
@@ -102,7 +102,7 @@ select STRAIGHT_JOIN count(*) from alltypes a
aggregation(SUM, Files rejected): 48
---- RUNTIME_PROFILE: table_format=kudu
aggregation(SUM, RowsRead): 0
-row_regex: 00:SCAN KUDU.*s[ ]+0[ ]+7.30K.*
+row_regex: 00:SCAN KUDU.*s[ ]+0[ ]+7.30K
====
@@ -125,7 +125,7 @@ select STRAIGHT_JOIN count(*) from alltypes a
aggregation(SUM, RowsRead): 8
aggregation(SUM, Files rejected): 24
---- RUNTIME_PROFILE: table_format=kudu
-row_regex: 00:SCAN KUDU.*s[ ]+7.30K[ ]+7.30K.*
+row_regex: 00:SCAN KUDU.*s[ ]+7.30K[ ]+7.30K
====
@@ -159,7 +159,7 @@ aggregation(SUM, FiltersReceived): 0
aggregation(SUM, Files rejected): 24
---- RUNTIME_PROFILE: table_format=kudu
aggregation(SUM, FiltersReceived): 0
-row_regex: 00:SCAN KUDU.*s[ ]+0[ ]+7.30K.*
+row_regex: 00:SCAN KUDU.*s[ ]+0[ ]+7.30K
====
@@ -195,10 +195,10 @@ select STRAIGHT_JOIN count(*) from alltypes a
0
---- RUNTIME_PROFILE
aggregation(SUM, FiltersReceived): $NUM_FILTER_UPDATES
-row_regex: .*REMOTE.*.s .*.s .*true
+row_regex: REMOTE.*.s .*.s .*true
---- RUNTIME_PROFILE: table_format=kudu
aggregation(SUM, FiltersReceived): 3
-row_regex: .*REMOTE.*.s .*.s .*true
+row_regex: REMOTE.*.s .*.s .*true
====
@@ -219,7 +219,7 @@ select STRAIGHT_JOIN count(*) from alltypes a
---- RUNTIME_PROFILE
aggregation(SUM, Files rejected): 23
---- RUNTIME_PROFILE: table_format=kudu
-row_regex: 00:SCAN KUDU.*s[ ]+7.30K[ ]+7.30K.*
+row_regex: 00:SCAN KUDU.*s[ ]+7.30K[ ]+7.30K
====
@@ -236,7 +236,7 @@ select STRAIGHT_JOIN count(*) from alltypes a
---- RESULTS
7300
---- RUNTIME_PROFILE
-row_regex: 00:SCAN.*s[ ]+7.30K[ ]+[0-9\.]+K.*
+row_regex: 00:SCAN.*s[ ]+7.30K[ ]+[0-9\.]+K
====
@@ -256,7 +256,7 @@ select STRAIGHT_JOIN count(*) from alltypes a
---- RUNTIME_PROFILE
aggregation(SUM, Files rejected): 24
---- RUNTIME_PROFILE: table_format=kudu
-row_regex: 00:SCAN KUDU.*s[ ]+0[ ]+7.30K.*
+row_regex: 00:SCAN KUDU.*s[ ]+0[ ]+7.30K
====
---- QUERY
@@ -272,7 +272,7 @@ select STRAIGHT_JOIN count(*) from alltypes a
---- RESULTS
7308
---- RUNTIME_PROFILE
-row_regex: 00:SCAN.*s[ ]+7.30K[ ]+[0-9\.]+K.*
+row_regex: 00:SCAN.*s[ ]+7.30K[ ]+[0-9\.]+K
====
@@ -299,7 +299,7 @@ select STRAIGHT_JOIN count(*) from alltypesagg a
---- RUNTIME_PROFILE
aggregation(SUM, Files rejected): 10
---- RUNTIME_PROFILE: table_format=kudu
-row_regex: 00:SCAN KUDU.*s[ ]+11.00K[ ]+11.00K.*
+row_regex: 00:SCAN KUDU.*s[ ]+11.00K[ ]+11.00K
====
@@ -321,7 +321,7 @@ with t1 as (select month x, bigint_col y from alltypes
limit 7301),
---- RUNTIME_PROFILE
aggregation(SUM, Files rejected): 22
---- RUNTIME_PROFILE: table_format=kudu
-row_regex: 00:SCAN KUDU.*s[ ]+620[ ]+1.82K.*
+row_regex: 00:SCAN KUDU.*s[ ]+620[ ]+1.82K
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/runtime_row_filter_reservations.test
b/testdata/workloads/functional-query/queries/QueryTest/runtime_row_filter_reservations.test
index c6943714c..7204a4b90 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/runtime_row_filter_reservations.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/runtime_row_filter_reservations.test
@@ -48,7 +48,7 @@ select STRAIGHT_JOIN * from alltypes a join [SHUFFLE]
alltypes b
on a.month = b.id and b.int_col = -3
---- RESULTS
---- RUNTIME_PROFILE
-row_regex: .*Filter 0 \(128.00 MB\).*
+row_regex: Filter 0 \(128.00 MB\)
aggregation(SUM, Files processed): 24
aggregation(SUM, Files rejected): 24
====
@@ -84,7 +84,7 @@ select STRAIGHT_JOIN * from alltypes a join [SHUFFLE]
alltypes b
on a.month = b.id and b.int_col = -3
---- RESULTS
---- RUNTIME_PROFILE
-row_regex: .*Filter 0 \(128.00 MB\).*
+row_regex: Filter 0 \(128.00 MB\)
aggregation(SUM, Files processed): 24
aggregation(SUM, Files rejected): 24
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/scanner-reservation.test
b/testdata/workloads/functional-query/queries/QueryTest/scanner-reservation.test
index 894fe448a..1dfe98796 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/scanner-reservation.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/scanner-reservation.test
@@ -8,8 +8,8 @@ BIGINT
---- RESULTS
150000
---- RUNTIME_PROFILE
-row_regex:.*InitialRangeIdealReservation.*Avg: 24.00 MB.*Number of samples: 1.*
-row_regex:.*InitialRangeActualReservation.*Avg: 24.00 MB.*Number of samples:
1.*
+row_regex: InitialRangeIdealReservation.*Avg: 24.00 MB.*Number of samples: 1
+row_regex: InitialRangeActualReservation.*Avg: 24.00 MB.*Number of samples: 1
====
---- QUERY
# Scan moderately large file - scanner should try to increase reservation and
fail.
@@ -21,8 +21,8 @@ BIGINT
---- RESULTS
150000
---- RUNTIME_PROFILE
-row_regex:.*InitialRangeIdealReservation.*Avg: 24.00 MB.*Number of samples: 1.*
-row_regex:.*InitialRangeActualReservation.*Avg: 8.00 MB.*Number of samples: 1.*
+row_regex: InitialRangeIdealReservation.*Avg: 24.00 MB.*Number of samples: 1
+row_regex: InitialRangeActualReservation.*Avg: 8.00 MB.*Number of samples: 1
====
---- QUERY
# Scan large Parquet column - scanner should try to increase reservation and
succeed.
@@ -33,10 +33,10 @@ STRING
---- RESULTS
' Tiresias '
---- RUNTIME_PROFILE
-row_regex:.*InitialRangeIdealReservation.*Avg: 128.00 KB.*
-row_regex:.*InitialRangeActualReservation.*Avg: 4.00 MB.*
-row_regex:.*ColumnarScannerIdealReservation.*Avg: 24.00 MB.*
-row_regex:.*ColumnarScannerActualReservation.*Avg: 24.00 MB.*
+row_regex: InitialRangeIdealReservation.*Avg: 128.00 KB
+row_regex: InitialRangeActualReservation.*Avg: 4.00 MB
+row_regex: ColumnarScannerIdealReservation.*Avg: 24.00 MB
+row_regex: ColumnarScannerActualReservation.*Avg: 24.00 MB
====
---- QUERY
# Scan moderately large file - scanner should try to increase reservation and
fail.
@@ -48,10 +48,10 @@ STRING
---- RESULTS
' Tiresias '
---- RUNTIME_PROFILE
-row_regex:.*InitialRangeIdealReservation.*Avg: 128.00 KB.*
-row_regex:.*InitialRangeActualReservation.*Avg: 4.00 MB.*
-row_regex:.*ColumnarScannerIdealReservation.*Avg: 24.00 MB.*
-row_regex:.*ColumnarScannerActualReservation.*Avg: 4.00 MB.*
+row_regex: InitialRangeIdealReservation.*Avg: 128.00 KB
+row_regex: InitialRangeActualReservation.*Avg: 4.00 MB
+row_regex: ColumnarScannerIdealReservation.*Avg: 24.00 MB
+row_regex: ColumnarScannerActualReservation.*Avg: 4.00 MB
====
---- QUERY
# IMPALA-8742: Use ScanRange::bytes_to_read() instead of len(), it has an
effect
@@ -59,5 +59,5 @@ row_regex:.*ColumnarScannerActualReservation.*Avg: 4.00 MB.*
select * from tpch_parquet.lineitem
where l_orderkey < 10;
---- RUNTIME_PROFILE
-row_regex:.*ColumnarScannerIdealReservation.*Avg: [34].\d+ MB.*
+row_regex: ColumnarScannerIdealReservation.*Avg: [34].\d+ MB
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/scratch-limit.test
b/testdata/workloads/functional-query/queries/QueryTest/scratch-limit.test
index fed015517..7e312c6b0 100644
--- a/testdata/workloads/functional-query/queries/QueryTest/scratch-limit.test
+++ b/testdata/workloads/functional-query/queries/QueryTest/scratch-limit.test
@@ -4,7 +4,7 @@
set scratch_limit=-1;
select o_orderdate, o_custkey, o_comment from tpch.orders limit 100000;
---- RUNTIME_PROFILE
-row_regex: .*set by configuration and planner.*
+row_regex: set by configuration and planner
row_regex: \| mem-estimate=8.63MB mem-reservation=4.00MB spill-buffer=2.00MB
thread-reservation=0
====
---- QUERY
@@ -12,7 +12,7 @@ row_regex: \| mem-estimate=8.63MB mem-reservation=4.00MB
spill-buffer=2.00MB th
set scratch_limit=0;
select o_orderdate, o_custkey, o_comment from tpch.orders limit 100000;
---- RUNTIME_PROFILE
-row_regex: .*set by configuration and planner.*SPOOL_QUERY_RESULTS=0
+row_regex: set by configuration and planner.*SPOOL_QUERY_RESULTS=0
row_regex: \| mem-estimate=0B mem-reservation=0B thread-reservation=0
====
---- QUERY
@@ -21,7 +21,7 @@ row_regex: \| mem-estimate=0B mem-reservation=0B
thread-reservation=0
set scratch_limit=2m;
select o_orderdate, o_custkey, o_comment from tpch.orders limit 100000;
---- RUNTIME_PROFILE
-row_regex: .*set by configuration and planner.*SPOOL_QUERY_RESULTS=0
+row_regex: set by configuration and planner.*SPOOL_QUERY_RESULTS=0
row_regex: \| mem-estimate=0B mem-reservation=0B thread-reservation=0
====
---- QUERY
@@ -30,7 +30,7 @@ row_regex: \| mem-estimate=0B mem-reservation=0B
thread-reservation=0
set scratch_limit=7m;
select o_orderdate, o_custkey, o_comment from tpch.orders limit 100000;
---- RUNTIME_PROFILE
-row_regex: .*set by configuration and
planner.*MAX_RESULT_SPOOLING_MEM=5242880,MAX_SPILLED_RESULT_SPOOLING_MEM=5242880
+row_regex: set by configuration and
planner.*MAX_RESULT_SPOOLING_MEM=5242880,MAX_SPILLED_RESULT_SPOOLING_MEM=5242880
row_regex: \| mem-estimate=5.00MB mem-reservation=4.00MB spill-buffer=2.00MB
thread-reservation=0
====
---- QUERY
@@ -39,6 +39,6 @@ row_regex: \| mem-estimate=5.00MB mem-reservation=4.00MB
spill-buffer=2.00MB th
set scratch_limit=200m;
select o_orderdate, o_custkey, o_comment from tpch.orders limit 100000;
---- RUNTIME_PROFILE
-row_regex: .*set by configuration and
planner.*MAX_SPILLED_RESULT_SPOOLING_MEM=207618048
+row_regex: set by configuration and
planner.*MAX_SPILLED_RESULT_SPOOLING_MEM=207618048
row_regex: \| mem-estimate=8.63MB mem-reservation=4.00MB spill-buffer=2.00MB
thread-reservation=0
====
\ No newline at end of file
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/single-node-joins-with-limits-exhaustive.test
b/testdata/workloads/functional-query/queries/QueryTest/single-node-joins-with-limits-exhaustive.test
index 14ad2ed3c..b58c9e541 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/single-node-joins-with-limits-exhaustive.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/single-node-joins-with-limits-exhaustive.test
@@ -10,7 +10,7 @@ select id, int_col, bigint_col from functional.alltypesagg a
where int_col not in (select int_col from functional.alltypestiny t
where a.id = t.id) limit 10995;
---- RUNTIME_PROFILE
-row_regex: .*RowsProduced: 10.99..\W10995\W
+row_regex: RowsProduced: 10.99..\W10995\W
====
---- QUERY
# Test to verify that is limit_ is correctly enforced when
@@ -20,7 +20,7 @@ set buffer_pool_limit=180m;
select * from tpch.lineitem t1 full outer join tpch.lineitem t2 on
t1.l_orderkey = t2.l_orderkey limit 10;
---- RUNTIME_PROFILE
-row_regex: .*RowsProduced: 10 .
+row_regex: RowsProduced: 10 .
====
---- QUERY
# IMPALA-4866: Hash join node does not apply limits correctly
@@ -31,7 +31,7 @@ select straight_join t1.id, t2.id from functional.alltypes t1
right join functional.alltypes t2 on t1.id = t2.int_col + 100000
limit 5;
---- RUNTIME_PROFILE
-row_regex: .*RowsProduced: 5 .
+row_regex: RowsProduced: 5 .
====
---- QUERY
# IMPALA-4866: Hash join node does not apply limits correctly
@@ -41,5 +41,5 @@ set batch_size=10;
select straight_join t1.id, t2.id from functional.alltypes t1
inner join functional.alltypes t2 on t1.id = t2.id limit 5;
---- RUNTIME_PROFILE
-row_regex: .*RowsProduced: 5 .
+row_regex: RowsProduced: 5 .
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/single-node-large-sorts.test
b/testdata/workloads/functional-query/queries/QueryTest/single-node-large-sorts.test
index 93ed5108f..7c5efb997 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/single-node-large-sorts.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/single-node-large-sorts.test
@@ -29,8 +29,8 @@ STRING,STRING,STRING
'Faaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa','',''
'Faaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa','',''
---- RUNTIME_PROFILE
-row_regex: .* TotalMergesPerformed: [^0].*
-row_regex: .* SpilledRuns: [^0].*
+row_regex: TotalMergesPerformed: [^0]
+row_regex: SpilledRuns: [^0]
====
---- QUERY
# Regression test for IMPALA-5554: first string column in sort tuple is null
@@ -62,5 +62,5 @@ STRING,STRING,STRING
'Faaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa','NULL','NULL'
'Faaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa','NULL','NULL'
---- RUNTIME_PROFILE
-row_regex: .* SpilledRuns: [^0].*
+row_regex: SpilledRuns: [^0]
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/spilling-no-debug-action.test
b/testdata/workloads/functional-query/queries/QueryTest/spilling-no-debug-action.test
index a5b6d6f17..0db81527c 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/spilling-no-debug-action.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/spilling-no-debug-action.test
@@ -18,7 +18,7 @@ BIGINT
---- RESULTS
173
---- RUNTIME_PROFILE
-row_regex: .*NumHashTableBuildsSkipped: .* \([1-9][0-9]*\)
+row_regex: NumHashTableBuildsSkipped: .* \([1-9][0-9]*\)
====
---- QUERY
# spilled partition with 0 probe rows, NULL AWARE LEFT ANTI JOIN
@@ -34,7 +34,7 @@ BIGINT
---- RESULTS
287
---- RUNTIME_PROFILE
-row_regex: .*NumHashTableBuildsSkipped: .* \([1-9][0-9]*\)
+row_regex: NumHashTableBuildsSkipped: .* \([1-9][0-9]*\)
====
---- QUERY
# spilled partition with 0 probe rows, RIGHT OUTER JOIN
@@ -48,7 +48,7 @@ BIGINT
---- RESULTS
12138
---- RUNTIME_PROFILE
-row_regex: .*NumHashTableBuildsSkipped: .* \([1-9][0-9]*\)
+row_regex: NumHashTableBuildsSkipped: .* \([1-9][0-9]*\)
====
---- QUERY
# spilled partition with 0 probe rows, RIGHT OUTER JOIN
@@ -64,7 +64,7 @@ BIGINT
---- RESULTS
12138
---- RUNTIME_PROFILE
-row_regex: .*NumHashTableBuildsSkipped: .* \([1-9][0-9]*\)
+row_regex: NumHashTableBuildsSkipped: .* \([1-9][0-9]*\)
====
---- QUERY
# spilled partition with 0 probe rows, RIGHT ANTI JOIN
@@ -78,7 +78,7 @@ BIGINT
---- RESULTS
5995258
---- RUNTIME_PROFILE
-row_regex: .*NumHashTableBuildsSkipped: .* \([1-9][0-9]*\)
+row_regex: NumHashTableBuildsSkipped: .* \([1-9][0-9]*\)
====
---- QUERY
# Aggregation query that will OOM and fail to spill because of IMPALA-3304
without
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/spilling-regression-exhaustive-no-default-buffer-size.test
b/testdata/workloads/functional-query/queries/QueryTest/spilling-regression-exhaustive-no-default-buffer-size.test
index d3470a308..706c45980 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/spilling-regression-exhaustive-no-default-buffer-size.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/spilling-regression-exhaustive-no-default-buffer-size.test
@@ -27,5 +27,5 @@ BIGINT,BIGINT,BIGINT,INT,STRING
6000000,32255,2256,1,'carefully '
6000000,96127,6128,2,'ooze furiously about the pe'
---- RUNTIME_PROFILE
-row_regex: .*SpilledRuns: .* \([1-9][0-9]*\)
+row_regex: SpilledRuns: .* \([1-9][0-9]*\)
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/spilling-regression-exhaustive.test
b/testdata/workloads/functional-query/queries/QueryTest/spilling-regression-exhaustive.test
index 87e1874dc..afec61c92 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/spilling-regression-exhaustive.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/spilling-regression-exhaustive.test
@@ -35,8 +35,8 @@ limit 20;
CHAR
---- RUNTIME_PROFILE
# Verify that the sort actually spilled
-row_regex: .*SpilledRuns: .* \([1-9][0-9]*\)
-row_regex: .*TotalMergesPerformed: .* \([1-9][0-9]*\)
+row_regex: SpilledRuns: .* \([1-9][0-9]*\)
+row_regex: TotalMergesPerformed: .* \([1-9][0-9]*\)
====
---- QUERY
# Test sort with small input char column materialized before sort.
@@ -74,8 +74,8 @@ limit 20;
CHAR
---- RUNTIME_PROFILE
# Verify that the sort actually spilled
-row_regex: .*SpilledRuns: .* \([1-9][0-9]*\)
-row_regex: .*TotalMergesPerformed: .* \([1-9][0-9]*\)
+row_regex: SpilledRuns: .* \([1-9][0-9]*\)
+row_regex: TotalMergesPerformed: .* \([1-9][0-9]*\)
====
---- QUERY
# Test sort with large input char column materialized before sort.
@@ -113,8 +113,8 @@ limit 20;
CHAR
---- RUNTIME_PROFILE
# Verify that the sort actually spilled
-row_regex: .*SpilledRuns: .* \([1-9][0-9]*\)
-row_regex: .*TotalMergesPerformed: .* \([1-9][0-9]*\)
+row_regex: SpilledRuns: .* \([1-9][0-9]*\)
+row_regex: TotalMergesPerformed: .* \([1-9][0-9]*\)
====
---- QUERY
# Test sort with varchar column materialized by exprs.
@@ -154,8 +154,8 @@ STRING
VARCHAR
---- RUNTIME_PROFILE
# Verify that the sort actually spilled
-row_regex: .*SpilledRuns: .* \([1-9][0-9]*\)
-row_regex: .*TotalMergesPerformed: .* \([1-9][0-9]*\)
+row_regex: SpilledRuns: .* \([1-9][0-9]*\)
+row_regex: TotalMergesPerformed: .* \([1-9][0-9]*\)
====
---- QUERY
# Test sort with input varchar column materialized before sort.
@@ -195,8 +195,8 @@ STRING
VARCHAR
---- RUNTIME_PROFILE
# Verify that the sort actually spilled
-row_regex: .*SpilledRuns: .* \([1-9][0-9]*\)
-row_regex: .*TotalMergesPerformed: .* \([1-9][0-9]*\)
+row_regex: SpilledRuns: .* \([1-9][0-9]*\)
+row_regex: TotalMergesPerformed: .* \([1-9][0-9]*\)
====
---- QUERY
# Regression test for IMPALA-2612. The following query will cause CastToChar
@@ -216,7 +216,7 @@ from lineitem
BIGINT
---- RUNTIME_PROFILE
# Verify that the agg spilled.
-row_regex: .*SpilledPartitions: .* \([1-9][0-9]*\)
+aggregation(SUM, SpilledPartitions)> 0
====
---- QUERY
# Same as above, except disable streaming preaggs to ensure that
AggregationNode is also
@@ -234,7 +234,7 @@ from lineitem
BIGINT
---- RUNTIME_PROFILE
# Verify that the agg spilled.
-row_regex: .*SpilledPartitions: .* \([1-9][0-9]*\)
+aggregation(SUM, SpilledPartitions)> 0
====
---- QUERY
# Same as above, except use a non-grouping aggregate function to ensure that
@@ -268,5 +268,5 @@ BIGINT,DECIMAL,DECIMAL,DECIMAL,DECIMAL
4571042,0.030000,50.000000,0.090000,104399.500000
1198304,0.010000,50.000000,0.020000,104299.500000
---- RUNTIME_PROFILE
-row_regex: .*SpilledPartitions: .* \([1-9][0-9]*\)
+aggregation(SUM, SpilledPartitions)> 0
====
diff --git
a/testdata/workloads/functional-query/queries/QueryTest/union-const-scalar-expr-codegen.test
b/testdata/workloads/functional-query/queries/QueryTest/union-const-scalar-expr-codegen.test
index e8398ebac..8db968980 100644
---
a/testdata/workloads/functional-query/queries/QueryTest/union-const-scalar-expr-codegen.test
+++
b/testdata/workloads/functional-query/queries/QueryTest/union-const-scalar-expr-codegen.test
@@ -14,7 +14,6 @@ tinyint,tinyint,tinyint
00:UNION
constant-operands=3
#SORT_NODE
-ExecOption: Codegen Enabled
#UNION_NODE
ExecOption: Codegen Enabled, Codegen Disabled for const scalar expressions
====
@@ -74,7 +73,5 @@ BIGINT, BIGINT
#AGGREGATION_NODE (id=6)
ExecOption: Codegen Enabled
#UNION_NODE (id=3)
-ExecOption: Codegen Enabled
#AGGREGATION_NODE (id=5)
-ExecOption: Codegen Enabled
====
diff --git a/testdata/workloads/targeted-perf/queries/aggregation.test
b/testdata/workloads/targeted-perf/queries/aggregation.test
index 9723a47b1..73ef38327 100644
--- a/testdata/workloads/targeted-perf/queries/aggregation.test
+++ b/testdata/workloads/targeted-perf/queries/aggregation.test
@@ -2726,10 +2726,10 @@ BIGINT,BIGINT,STRING
-- IMPALA-2581: LIMIT can be used to speed up aggregations
select distinct l_orderkey from lineitem limit 10;
---- RUNTIME_PROFILE
-row_regex: .*FastLimitCheckExceededRows: [1-9][0-9]*
+row_regex: FastLimitCheckExceededRows: [1-9][0-9]*
====
---- QUERY: PERF_AGG-Q12
select l_orderkey from lineitem group by 1 limit 10;
---- RUNTIME_PROFILE
-row_regex: .*FastLimitCheckExceededRows: [1-9][0-9]*
+row_regex: FastLimitCheckExceededRows: [1-9][0-9]*
====
diff --git a/testdata/workloads/tpcds/queries/unpartitioned-probe.test
b/testdata/workloads/tpcds/queries/unpartitioned-probe.test
index e4b414a0a..26ea2bc9b 100644
--- a/testdata/workloads/tpcds/queries/unpartitioned-probe.test
+++ b/testdata/workloads/tpcds/queries/unpartitioned-probe.test
@@ -23,8 +23,8 @@ from
---- TYPES
TIMESTAMP, BIGINT
---- RUNTIME_PROFILE
-row_regex: F03:EXCHANGE SENDER .* 1 .* 2 .*
-row_regex: 05:HASH JOIN .* 1 .* 2 .*
-row_regex: F01:EXCHANGE SENDER .* 1 .* 1 .*
-row_regex: 04:SELECT .* 1 .* 1 .*
+row_regex: F03:EXCHANGE SENDER .* 1 .* 2
+row_regex: 05:HASH JOIN .* 1 .* 2
+row_regex: F01:EXCHANGE SENDER .* 1 .* 1
+row_regex: 04:SELECT .* 1 .* 1
====
diff --git a/testdata/workloads/tpch/queries/datastream-sender.test
b/testdata/workloads/tpch/queries/datastream-sender.test
index 7656fe12a..33497d824 100644
--- a/testdata/workloads/tpch/queries/datastream-sender.test
+++ b/testdata/workloads/tpch/queries/datastream-sender.test
@@ -44,5 +44,5 @@ FROM cte1 t1
---- TYPES
BIGINT, INT, INT, INT, INT
---- RUNTIME_PROFILE
-row_regex:.*:EXCHANGE SENDER.*[0-9\.]+ MB.*
+row_regex: :EXCHANGE SENDER.*[0-9\.]+ MB
=====
diff --git a/tests/common/test_result_verifier.py
b/tests/common/test_result_verifier.py
index d44b046a6..305cccb05 100644
--- a/tests/common/test_result_verifier.py
+++ b/tests/common/test_result_verifier.py
@@ -676,12 +676,12 @@ def verify_runtime_profile(expected, actual,
update_section=False):
for i in range(len(expected_lines)):
if matched[i]: continue
if expected_regexes[i] is not None:
- match = expected_regexes[i].match(line)
+ match = expected_regexes[i].search(line)
elif expected_aggregations[i] is not None:
# Aggregations are enforced separately
match = True
elif unexpected_regexes[i] is not None:
- if unexpected_regexes[i].match(line):
+ if unexpected_regexes[i].search(line):
unexpected_matched_lines.append(line)
match = False
else: