This is an automated email from the ASF dual-hosted git repository.

yiguolei pushed a commit to branch branch-2.1
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-2.1 by this push:
     new c9d3f1122bd [regression-test](framework) disable defining global 
variable in test… (#45840)
c9d3f1122bd is described below

commit c9d3f1122bd272a79ef0fa43d1afd392d3972ed7
Author: shuke <sh...@selectdb.com>
AuthorDate: Tue Dec 24 17:21:27 2024 +0800

    [regression-test](framework) disable defining global variable in test… 
(#45840)
---
 .../test_uniq_vals_schema_change.out               |  3 +++
 .../doris/regression/suite/ScriptSource.groovy     |  9 ++++++++-
 .../backup_restore/test_backup_restore_db.groovy   |  2 +-
 .../test_backup_restore_exclude.groovy             |  2 +-
 .../test_backup_restore_multi_tables.groovy        |  2 +-
 ...st_backup_restore_multi_tables_overwrite.groovy |  2 +-
 .../backup_restore/test_backup_restore_mv.groovy   | 10 +++++-----
 .../test_restore_mix_exists_and_new_table.groovy   |  2 +-
 .../ccr_mow_syncer_p0/test_ingest_binlog.groovy    |  2 +-
 .../inverted_index/test_ingest_binlog.groovy       |  2 +-
 .../suites/ccr_syncer_p0/test_ingest_binlog.groovy |  2 +-
 .../auth/test_disable_revoke_admin_auth.groovy     |  1 +
 .../suites/compaction/test_full_compaction.groovy  |  2 +-
 .../correctness/test_trim_new_parameters.groovy    |  2 +-
 .../ddl_p0/test_create_table_properties.groovy     |  2 +-
 .../suites/export/test_array_export.groovy         |  6 +++---
 .../suites/export/test_map_export.groovy           |  4 ++--
 .../suites/export/test_struct_export.groovy        |  2 +-
 .../outfile/csv/test_outfile_empty_data.groovy     | 14 +++++++-------
 .../test_outfile_expr_generate_col_name.groovy     | 10 +++++-----
 .../suites/export_p0/test_export_basic.groovy      |  8 ++++----
 .../export_p0/test_outfile_file_suffix.groovy      |  2 +-
 .../export_p0/test_show_create_database.groovy     |  2 +-
 .../hive/test_autoinc_broker_load.groovy           | 22 +++++++++++-----------
 .../hive/test_hive_parquet_alter_column.groovy     |  2 +-
 .../hive/test_hive_statistic.groovy                |  2 +-
 .../hive/test_partial_update_broker_load.groovy    | 16 ++++++++--------
 .../iceberg/test_iceberg_optimize_count.groovy     | 10 +++++-----
 .../test_iceberg_predicate_conversion.groovy       |  2 +-
 .../insert_group_commit_with_exception.groovy      |  4 ++--
 .../test_add_drop_index_on_table_with_mv.groovy    |  2 +-
 .../index_change/test_pk_uk_index_change.groovy    |  2 +-
 .../test_schema_change_storage_format.groovy       |  2 +-
 .../test_array_contains_with_inverted_index.groovy |  4 ++--
 .../test_index_match_select.groovy                 |  8 ++++----
 .../test_primary_key_simple_case.groovy            |  2 +-
 .../http_stream/test_http_stream_2pc.groovy        |  8 ++++----
 .../test_insert_random_distribution_table.groovy   |  4 ++--
 .../load_p0/mysql_load/test_mysql_load.groovy      |  2 +-
 ...t_stream_load_with_nonexist_db_and_table.groovy |  6 +++---
 .../test_map_load_and_compaction.groovy            |  4 +++-
 .../load_p0/stream_load/test_stream_load.groovy    | 22 +++++++++++-----------
 .../test_stream_load_move_memtable.groovy          |  2 +-
 .../suites/manager/test_manager_interface_2.groovy |  4 ++--
 .../suites/manager/test_manager_interface_4.groovy |  2 +-
 .../suites/mtmv_p0/test_build_mtmv.groovy          |  2 +-
 .../suites/mtmv_p0/test_create_mv_mtmv.groovy      |  2 +-
 .../suites/mtmv_p0/test_create_rollup_mtmv.groovy  |  2 +-
 .../mtmv_p0/test_limit_partition_mtmv.groovy       |  2 +-
 .../mtmv_p0/test_rollup_partition_mtmv.groovy      |  4 ++--
 .../mv_p0/test_create_mv/test_create_mv.groovy     |  2 +-
 .../nereids_arith_p0/topn/accept_null.groovy       |  4 ++--
 .../cache/parse_sql_from_sql_cache.groovy          |  2 +-
 .../fold_constant/fold_constant_by_fe.groovy       |  2 +-
 .../suites/nereids_p0/outfile/test_outfile.groovy  | 12 +++++++-----
 .../nereids_p0/outfile/test_outfile_expr.groovy    |  2 +-
 .../nereids_p0/outfile/test_outfile_parquet.groovy |  8 +++++---
 ...st_date_or_datetime_computation_negative.groovy |  1 -
 .../mv/dimension/dimension_self_conn.groovy        |  2 +-
 .../suites/nereids_syntax_p0/group_bit.groovy      |  2 +-
 .../suites/nereids_syntax_p0/rollup/bitmap.groovy  |  2 +-
 .../auto_partition/test_auto_partition_load.groovy |  4 ++--
 .../test_auto_range_partition.groovy               |  4 ++--
 ...t_dynamic_partition_mod_distribution_key.groovy |  2 +-
 .../test_dynamic_partition_with_alter.groovy       |  2 +-
 .../suites/query_p0/sort/topn_2pr_rule.groovy      |  2 +-
 .../suites/query_profile/test_profile.groovy       |  2 +-
 .../test_agg_keys_schema_change.groovy             |  8 ++++----
 .../test_agg_mv_schema_change.groovy               |  8 ++++----
 .../test_agg_rollup_schema_change.groovy           | 10 +++++-----
 .../test_agg_schema_key_change_modify1.groovy      |  6 +++---
 .../test_agg_vals_schema_change.groovy             |  4 ++--
 .../schema_change_p0/test_alter_uniq_null.groovy   |  2 +-
 .../test_dup_keys_schema_change.groovy             |  8 ++++----
 .../test_dup_mv_schema_change.groovy               |  4 ++--
 .../test_dup_rollup_schema_change.groovy           |  6 +++---
 .../test_dup_schema_key_change_modify1.groovy      |  6 +++---
 .../test_dup_vals_schema_change.groovy             |  4 ++--
 .../test_enable_light_schema_change.groovy         |  2 +-
 .../test_uniq_keys_schema_change.groovy            |  4 ++--
 .../test_uniq_mv_schema_change.groovy              |  4 ++--
 .../test_uniq_rollup_schema_change.groovy          |  4 ++--
 .../test_uniq_vals_schema_change.groovy            |  8 ++++----
 .../test_varchar_schema_change.groovy              |  4 ++--
 .../suites/show_p0/test_show_data.groovy           |  2 +-
 .../suites/statistics/analyze_stats.groovy         |  6 +++---
 ...partial_update_delete_sign_with_conflict.groovy |  6 +++---
 .../test_partial_update_parallel.groovy            |  6 +++---
 .../suites/update/test_update_configs.groovy       |  2 +-
 .../suites/variant_p0/delete_update.groovy         |  6 +++---
 .../variant_p0/schema_change/schema_change.groovy  |  2 +-
 .../workload_manager_p0/test_resource_tag.groovy   | 10 +++++-----
 92 files changed, 220 insertions(+), 204 deletions(-)

diff --git 
a/regression-test/data/schema_change_p0/test_uniq_vals_schema_change.out 
b/regression-test/data/schema_change_p0/test_uniq_vals_schema_change.out
index de0526b3e73..22d1644a86b 100644
--- a/regression-test/data/schema_change_p0/test_uniq_vals_schema_change.out
+++ b/regression-test/data/schema_change_p0/test_uniq_vals_schema_change.out
@@ -11,6 +11,9 @@
 -- !sc --
 3
 
+-- !sc --
+3      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        1       32      20      2
+
 -- !sc --
 4      2017-10-01      Beijing 10      1       2020-01-03T00:00        
2020-01-03T00:00        1       32      20      2
 
diff --git 
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/ScriptSource.groovy
 
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/ScriptSource.groovy
index d73cf6afaf6..97d935c58ae 100644
--- 
a/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/ScriptSource.groovy
+++ 
b/regression-test/framework/src/main/groovy/org/apache/doris/regression/suite/ScriptSource.groovy
@@ -34,7 +34,14 @@ class GroovyFileSource implements ScriptSource {
 
     @Override
     SuiteScript toScript(ScriptContext scriptContext, GroovyShell shell) {
-        SuiteScript suiteScript = shell.parse(file) as SuiteScript
+        def setPropertyFunction = '''
+\nvoid setProperty(String key, value) {
+    throw new IllegalArgumentException("defined global variables in script are 
not allowed: ${key}")
+}
+'''
+        def scriptContent = file.text
+        scriptContent = scriptContent + setPropertyFunction
+        SuiteScript suiteScript = shell.parse(scriptContent, file.getName()) 
as SuiteScript
         suiteScript.init(scriptContext)
         return suiteScript
     }
diff --git 
a/regression-test/suites/backup_restore/test_backup_restore_db.groovy 
b/regression-test/suites/backup_restore/test_backup_restore_db.groovy
index 08b9f619d4f..2cca1356dc5 100644
--- a/regression-test/suites/backup_restore/test_backup_restore_db.groovy
+++ b/regression-test/suites/backup_restore/test_backup_restore_db.groovy
@@ -81,7 +81,7 @@ suite("test_backup_restore_db", "backup_restore") {
     syncer.waitAllRestoreFinish(dbName)
 
     for (def tableName in tables) {
-        result = sql "SELECT * FROM ${dbName}.${tableName}"
+        def result = sql "SELECT * FROM ${dbName}.${tableName}"
         assertEquals(result.size(), numRows);
         sql "DROP TABLE ${dbName}.${tableName} FORCE"
     }
diff --git 
a/regression-test/suites/backup_restore/test_backup_restore_exclude.groovy 
b/regression-test/suites/backup_restore/test_backup_restore_exclude.groovy
index e29ebfe3b6b..6993a0ca6fa 100644
--- a/regression-test/suites/backup_restore/test_backup_restore_exclude.groovy
+++ b/regression-test/suites/backup_restore/test_backup_restore_exclude.groovy
@@ -88,7 +88,7 @@ suite("test_backup_restore_exclude", "backup_restore") {
 
     qt_select "SELECT * FROM ${dbName}.${backupExcludeTable} ORDER BY id"
     for (def tableName in tables) {
-        result = sql "SELECT * FROM ${dbName}.${tableName}"
+        def result = sql "SELECT * FROM ${dbName}.${tableName}"
         assertEquals(result.size(), numRows);
         sql "DROP TABLE ${dbName}.${tableName} FORCE"
     }
diff --git 
a/regression-test/suites/backup_restore/test_backup_restore_multi_tables.groovy 
b/regression-test/suites/backup_restore/test_backup_restore_multi_tables.groovy
index 4fa274bbae3..2be39d8eef1 100644
--- 
a/regression-test/suites/backup_restore/test_backup_restore_multi_tables.groovy
+++ 
b/regression-test/suites/backup_restore/test_backup_restore_multi_tables.groovy
@@ -84,7 +84,7 @@ suite("test_backup_restore_multi_tables", "backup_restore") {
     syncer.waitAllRestoreFinish(dbName)
 
     for (def tableName in tables) {
-        result = sql "SELECT * FROM ${dbName}.${tableName}"
+        def result = sql "SELECT * FROM ${dbName}.${tableName}"
         assertEquals(result.size(), numRows);
         sql "DROP TABLE ${dbName}.${tableName} FORCE"
     }
diff --git 
a/regression-test/suites/backup_restore/test_backup_restore_multi_tables_overwrite.groovy
 
b/regression-test/suites/backup_restore/test_backup_restore_multi_tables_overwrite.groovy
index 30dc63dd195..9ee5c848aef 100644
--- 
a/regression-test/suites/backup_restore/test_backup_restore_multi_tables_overwrite.groovy
+++ 
b/regression-test/suites/backup_restore/test_backup_restore_multi_tables_overwrite.groovy
@@ -86,7 +86,7 @@ suite("test_backup_restore_multi_tables_overwrite", 
"backup_restore") {
 
     qt_select "SELECT * FROM ${dbName}.${firstTableName} ORDER BY id"
     for (def tableName in tables) {
-        result = sql "SELECT * FROM ${dbName}.${tableName}"
+        def result = sql "SELECT * FROM ${dbName}.${tableName}"
         assertEquals(result.size(), numRows);
         sql "DROP TABLE ${dbName}.${tableName} FORCE"
     }
diff --git 
a/regression-test/suites/backup_restore/test_backup_restore_mv.groovy 
b/regression-test/suites/backup_restore/test_backup_restore_mv.groovy
index 0a54aa76e17..398b51cc995 100644
--- a/regression-test/suites/backup_restore/test_backup_restore_mv.groovy
+++ b/regression-test/suites/backup_restore/test_backup_restore_mv.groovy
@@ -57,13 +57,13 @@ suite("test_backup_restore_mv", "backup_restore") {
     """
 
     def alter_finished = false
-    for (i = 0; i < 60 && !alter_finished; i++) {
+    for (int i = 0; i < 60 && !alter_finished; i++) {
         result = sql_return_maparray "SHOW ALTER TABLE MATERIALIZED VIEW FROM 
${dbName}"
         logger.info("result: ${result}")
-        for (int i = 0; i < result.size(); i++) {
-            if (result[i]['TableName'] == "${tableName}" &&
-                result[i]['RollupIndexName'] == "${mvName}" &&
-                result[i]['State'] == 'FINISHED') {
+        for (int j = 0; j < result.size(); j++) {
+            if (result[j]['TableName'] == "${tableName}" &&
+                result[j]['RollupIndexName'] == "${mvName}" &&
+                result[j]['State'] == 'FINISHED') {
                 alter_finished = true
                 break
             }
diff --git 
a/regression-test/suites/backup_restore/test_restore_mix_exists_and_new_table.groovy
 
b/regression-test/suites/backup_restore/test_restore_mix_exists_and_new_table.groovy
index aea46af179d..b1cc9136b25 100644
--- 
a/regression-test/suites/backup_restore/test_restore_mix_exists_and_new_table.groovy
+++ 
b/regression-test/suites/backup_restore/test_restore_mix_exists_and_new_table.groovy
@@ -84,7 +84,7 @@ suite("test_restore_mix_exists_and_new_table", 
"backup_restore") {
     syncer.waitAllRestoreFinish(dbName)
 
     for (def tableName in tables) {
-        result = sql "SELECT * FROM ${dbName}.${tableName}"
+        def result = sql "SELECT * FROM ${dbName}.${tableName}"
         assertEquals(result.size(), numRows);
         sql "DROP TABLE ${dbName}.${tableName} FORCE"
     }
diff --git a/regression-test/suites/ccr_mow_syncer_p0/test_ingest_binlog.groovy 
b/regression-test/suites/ccr_mow_syncer_p0/test_ingest_binlog.groovy
index e07529718ee..16ce1d43c4f 100644
--- a/regression-test/suites/ccr_mow_syncer_p0/test_ingest_binlog.groovy
+++ b/regression-test/suites/ccr_mow_syncer_p0/test_ingest_binlog.groovy
@@ -78,7 +78,7 @@ suite("test_mow_ingest_binlog") {
     }
 
     target_sql " sync "
-    res = target_sql """SELECT * FROM ${tableName} WHERE test=${test_num}"""
+    def res = target_sql """SELECT * FROM ${tableName} WHERE 
test=${test_num}"""
     assertEquals(res.size(), insert_num)
 
 
diff --git 
a/regression-test/suites/ccr_syncer_p0/inverted_index/test_ingest_binlog.groovy 
b/regression-test/suites/ccr_syncer_p0/inverted_index/test_ingest_binlog.groovy
index 12ba49e084d..b46d256a52f 100644
--- 
a/regression-test/suites/ccr_syncer_p0/inverted_index/test_ingest_binlog.groovy
+++ 
b/regression-test/suites/ccr_syncer_p0/inverted_index/test_ingest_binlog.groovy
@@ -152,7 +152,7 @@ suite("test_ingest_binlog_index") {
         }
 
         target_sql " sync "
-        res = target_sql """SELECT * FROM ${tableName}"""
+        def res = target_sql """SELECT * FROM ${tableName}"""
         if (tableName.contains("mow")) {
             assertEquals(res.size(), insert_data(tableName).size() / 2 as 
Integer)
         } else {
diff --git a/regression-test/suites/ccr_syncer_p0/test_ingest_binlog.groovy 
b/regression-test/suites/ccr_syncer_p0/test_ingest_binlog.groovy
index 9f176cac9e0..414621b5086 100644
--- a/regression-test/suites/ccr_syncer_p0/test_ingest_binlog.groovy
+++ b/regression-test/suites/ccr_syncer_p0/test_ingest_binlog.groovy
@@ -77,7 +77,7 @@ suite("test_ingest_binlog") {
     }
 
     target_sql " sync "
-    res = target_sql """SELECT * FROM ${tableName} WHERE test=${test_num}"""
+    def res = target_sql """SELECT * FROM ${tableName} WHERE 
test=${test_num}"""
     assertEquals(res.size(), insert_num)
 
 
diff --git 
a/regression-test/suites/cloud_p0/auth/test_disable_revoke_admin_auth.groovy 
b/regression-test/suites/cloud_p0/auth/test_disable_revoke_admin_auth.groovy
index 19add1d2c87..bc1767a4ed4 100644
--- a/regression-test/suites/cloud_p0/auth/test_disable_revoke_admin_auth.groovy
+++ b/regression-test/suites/cloud_p0/auth/test_disable_revoke_admin_auth.groovy
@@ -22,6 +22,7 @@ suite("test_disable_revoke_admin_auth", "cloud_auth") {
     sql """create user ${user} identified by 'Cloud12345' default role 
'admin'"""
 
     sql "sync"
+    def result
 
     try {
         result = sql """revoke 'admin' from 'admin'""";
diff --git a/regression-test/suites/compaction/test_full_compaction.groovy 
b/regression-test/suites/compaction/test_full_compaction.groovy
index 0aaeff085b9..34742d41bf2 100644
--- a/regression-test/suites/compaction/test_full_compaction.groovy
+++ b/regression-test/suites/compaction/test_full_compaction.groovy
@@ -120,7 +120,7 @@ suite("test_full_compaction") {
         for (def tablet in tablets) {
             String tablet_id = tablet.TabletId
             backend_id = tablet.BackendId
-            times = 1
+            def times = 1
 
             do{
                 (code, out, err) = 
be_run_full_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
diff --git a/regression-test/suites/correctness/test_trim_new_parameters.groovy 
b/regression-test/suites/correctness/test_trim_new_parameters.groovy
index 17ac4a0c65e..a76ccb96356 100644
--- a/regression-test/suites/correctness/test_trim_new_parameters.groovy
+++ b/regression-test/suites/correctness/test_trim_new_parameters.groovy
@@ -68,6 +68,6 @@ suite("test_trim_new_parameters") {
     rtrim = sql "select rtrim('bcTTTabcabc','abc')"
     assertEquals(rtrim[0][0], 'bcTTT')   
 
-    trim_one = sql "select 
trim('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaabcTTTabcabcaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa','a')"
+    def trim_one = sql "select 
trim('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaabcTTTabcabcaaaaaaaaaaaaaaaaaaaaaaaaaabaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa','a')"
     assertEquals(trim_one[0][0], 
'baaaaaaaaaaabcTTTabcabcaaaaaaaaaaaaaaaaaaaaaaaaaab')  
 }
diff --git a/regression-test/suites/ddl_p0/test_create_table_properties.groovy 
b/regression-test/suites/ddl_p0/test_create_table_properties.groovy
index c6908d80b07..32fd0cabcaf 100644
--- a/regression-test/suites/ddl_p0/test_create_table_properties.groovy
+++ b/regression-test/suites/ddl_p0/test_create_table_properties.groovy
@@ -336,7 +336,7 @@ suite("test_create_table_properties") {
         )
     """
     sql """ insert into ${bool_tab} values (1, '2020-12-12 12:12:12', 
'2000-01-01 12:12:12.123456'), (0, '20201212 121212', '2000-01-01'), (1, 
'20201212121212', '2000-01-01'), (0, 'AaA', '2000-01-01') """
-    result = sql "show partitions from ${bool_tab}"
+    def result = sql "show partitions from ${bool_tab}"
     logger.info("${result}")
     assertEquals(result.size(), 2)
     
diff --git a/regression-test/suites/export/test_array_export.groovy 
b/regression-test/suites/export/test_array_export.groovy
index 4186ee21d9f..4ce9786e997 100644
--- a/regression-test/suites/export/test_array_export.groovy
+++ b/regression-test/suites/export/test_array_export.groovy
@@ -136,7 +136,7 @@ suite("test_array_export", "export") {
     def check_export_result = {checklabel->
         max_try_milli_secs = 15000
         while(max_try_milli_secs) {
-            result = sql "show export where label='${checklabel}'"
+            def result = sql "show export where label='${checklabel}'"
             if(result[0][2] == "FINISHED") {
                 break
             } else {
@@ -171,7 +171,7 @@ suite("test_array_export", "export") {
         } else {
             throw new IllegalStateException("""${outFilePath} already exists! 
""")
         }
-        result = sql """
+        def result = sql """
             SELECT * FROM ${tableName} t ORDER BY k1 INTO OUTFILE 
"file://${outFile}/";
         """
         def url = result[0][3]
@@ -203,7 +203,7 @@ suite("test_array_export", "export") {
             path.delete();
         }
         if (csvFiles != "") {
-            cmd = "rm -rf ${csvFiles}"
+            def cmd = "rm -rf ${csvFiles}"
             sshExec("root", urlHost, cmd)
         }
     }
diff --git a/regression-test/suites/export/test_map_export.groovy 
b/regression-test/suites/export/test_map_export.groovy
index fdb207f8628..101ab07ebbe 100644
--- a/regression-test/suites/export/test_map_export.groovy
+++ b/regression-test/suites/export/test_map_export.groovy
@@ -98,7 +98,7 @@ suite("test_map_export", "export") {
         def result = sql """
                     SELECT * FROM ${testTable} ORDER BY id INTO OUTFILE 
"file://${outFile}/";
         """
-        url = result[0][3]
+        def url = result[0][3]
         urlHost = url.substring(8, url.indexOf("${outFile}"))
         if (backends.size() > 1) {
             // custer will scp files
@@ -146,7 +146,7 @@ suite("test_map_export", "export") {
             path.delete();
         }
         if (csvFiles != "") {
-            cmd = "rm -rf ${csvFiles}"
+            def cmd = "rm -rf ${csvFiles}"
             sshExec("root", urlHost, cmd)
         }
     }
diff --git a/regression-test/suites/export/test_struct_export.groovy 
b/regression-test/suites/export/test_struct_export.groovy
index 8df3a40906e..bc3912af39e 100644
--- a/regression-test/suites/export/test_struct_export.groovy
+++ b/regression-test/suites/export/test_struct_export.groovy
@@ -151,7 +151,7 @@ suite("test_struct_export", "export") {
             path.delete();
         }
         if (csvFiles != "") {
-            cmd = "rm -rf ${csvFiles}"
+            def cmd = "rm -rf ${csvFiles}"
             sshExec("root", urlHost, cmd)
         }
     }
diff --git 
a/regression-test/suites/export_p0/outfile/csv/test_outfile_empty_data.groovy 
b/regression-test/suites/export_p0/outfile/csv/test_outfile_empty_data.groovy
index d8da398ea81..d14dc119ddb 100644
--- 
a/regression-test/suites/export_p0/outfile/csv/test_outfile_empty_data.groovy
+++ 
b/regression-test/suites/export_p0/outfile/csv/test_outfile_empty_data.groovy
@@ -45,7 +45,7 @@ suite("test_outfile_empty_data", 
"external,hive,tvf,external_docker") {
     String ak = getS3AK()
     String sk = getS3SK()
     String s3_endpoint = getS3Endpoint()
-    String region = region = getS3Region()
+    String region = getS3Region()
     String bucket = context.config.otherConfigs.get("s3BucketName");
 
     // broker
@@ -67,8 +67,8 @@ suite("test_outfile_empty_data", 
"external,hive,tvf,external_docker") {
         // select ... into outfile ...
         def uuid = UUID.randomUUID().toString()
 
-        hdfs_outfile_path = "/user/doris/tmp_data/${uuid}"
-        uri = "${defaultFS}" + "${hdfs_outfile_path}/exp_"
+        def hdfs_outfile_path = "/user/doris/tmp_data/${uuid}"
+        def uri = "${defaultFS}" + "${hdfs_outfile_path}/exp_"
 
         def res = sql """
             SELECT * FROM ${export_table_name} t ORDER BY user_id
@@ -87,8 +87,8 @@ suite("test_outfile_empty_data", 
"external,hive,tvf,external_docker") {
         // select ... into outfile ...
         def uuid = UUID.randomUUID().toString()
 
-        hdfs_outfile_path = "/user/doris/tmp_data/${uuid}"
-        uri = "${defaultFS}" + "${hdfs_outfile_path}/exp_"
+        def hdfs_outfile_path = "/user/doris/tmp_data/${uuid}"
+        def uri = "${defaultFS}" + "${hdfs_outfile_path}/exp_"
 
         def res = sql """
             SELECT * FROM ${export_table_name} t ORDER BY user_id
@@ -106,8 +106,8 @@ suite("test_outfile_empty_data", 
"external,hive,tvf,external_docker") {
 
     def outfile_to_S3_directly = {
         // select ... into outfile ...
-        s3_outfile_path = "${bucket}/outfile/csv/test-outfile-empty/"
-        uri = "s3://${s3_outfile_path}/exp_"
+        def s3_outfile_path = "${bucket}/outfile/csv/test-outfile-empty/"
+        def uri = "s3://${s3_outfile_path}/exp_"
 
         def res = sql """
             SELECT * FROM ${export_table_name} t ORDER BY user_id
diff --git 
a/regression-test/suites/export_p0/outfile/outfile_expr/test_outfile_expr_generate_col_name.groovy
 
b/regression-test/suites/export_p0/outfile/outfile_expr/test_outfile_expr_generate_col_name.groovy
index e5a9ec7ea49..1d31c64ba75 100644
--- 
a/regression-test/suites/export_p0/outfile/outfile_expr/test_outfile_expr_generate_col_name.groovy
+++ 
b/regression-test/suites/export_p0/outfile/outfile_expr/test_outfile_expr_generate_col_name.groovy
@@ -129,7 +129,7 @@ suite("test_outfile_expr_generate_col_name", "p0") {
                 "s3.access_key" = "${ak}"
             );
         """
-        outfile_url = res[0][3]
+        def outfile_url = res[0][3]
         
         check_outfile_data(outfile_url, outfile_format)
         check_outfile_column_name(outfile_url, outfile_format)
@@ -150,7 +150,7 @@ suite("test_outfile_expr_generate_col_name", "p0") {
                 "s3.access_key" = "${ak}"
             );
         """
-        outfile_url = res[0][3]
+        def outfile_url = res[0][3]
         
         check_outfile_data(outfile_url, outfile_format)
         check_outfile_column_name(outfile_url, outfile_format)
@@ -171,7 +171,7 @@ suite("test_outfile_expr_generate_col_name", "p0") {
                 "s3.access_key" = "${ak}"
             );
         """
-        outfile_url = res[0][3]
+        def outfile_url = res[0][3]
         
         check_outfile_data(outfile_url, outfile_format)
         check_outfile_column_name(outfile_url, outfile_format)
@@ -211,7 +211,7 @@ suite("test_outfile_expr_generate_col_name", "p0") {
                 "s3.access_key" = "${ak}"
             );
         """
-        outfile_url = res[0][3]
+        def outfile_url = res[0][3]
         
         check_outfile_data(outfile_url, outfile_format)
         check_outfile_column_name(outfile_url, outfile_format)
@@ -235,7 +235,7 @@ suite("test_outfile_expr_generate_col_name", "p0") {
                 "s3.access_key" = "${ak}"
             );
         """
-        outfile_url = res[0][3]
+        def outfile_url = res[0][3]
         
         check_outfile_data(outfile_url, outfile_format)
         check_outfile_column_name(outfile_url, outfile_format)
diff --git a/regression-test/suites/export_p0/test_export_basic.groovy 
b/regression-test/suites/export_p0/test_export_basic.groovy
index ca838232e3f..152f1ab4e6e 100644
--- a/regression-test/suites/export_p0/test_export_basic.groovy
+++ b/regression-test/suites/export_p0/test_export_basic.groovy
@@ -414,11 +414,11 @@ suite("test_export_basic", "p0") {
     }
 
     // 5. test order by and limit clause
-    uuid1 = UUID.randomUUID().toString()
+    def uuid1 = UUID.randomUUID().toString()
     outFilePath = """${outfile_path_prefix}_${uuid1}"""
-    label1 = "label_${uuid1}"
-    uuid2 = UUID.randomUUID().toString()
-    label2 = "label_${uuid2}"
+    def label1 = "label_${uuid1}"
+    def uuid2 = UUID.randomUUID().toString()
+    def label2 = "label_${uuid2}"
     try {
         // check export path
         check_path_exists.call("${outFilePath}")
diff --git a/regression-test/suites/export_p0/test_outfile_file_suffix.groovy 
b/regression-test/suites/export_p0/test_outfile_file_suffix.groovy
index 0123b6ee4cf..97ab61446b1 100644
--- a/regression-test/suites/export_p0/test_outfile_file_suffix.groovy
+++ b/regression-test/suites/export_p0/test_outfile_file_suffix.groovy
@@ -42,7 +42,7 @@ suite("test_outfile_file_suffix", "p0") {
 
     def outFilePath = """s3://${bucket}/outfile_"""
     def csv_suffix_result = { file_suffix, file_format ->
-        result = sql """
+        def result = sql """
                 select * from ${table_name}
                 into outfile "${outFilePath}"
                 FORMAT AS ${file_format}
diff --git a/regression-test/suites/export_p0/test_show_create_database.groovy 
b/regression-test/suites/export_p0/test_show_create_database.groovy
index 03f0e8b291c..44e54c46028 100644
--- a/regression-test/suites/export_p0/test_show_create_database.groovy
+++ b/regression-test/suites/export_p0/test_show_create_database.groovy
@@ -18,7 +18,7 @@
 suite("test_show_create_database", 
"p0,external,hive,external_docker,external_docker_hive") {
 
     sql """create database if not exists db_test"""
-    result = sql """show create database db_test"""
+    def result = sql """show create database db_test"""
     assertEquals(result.size(), 1)
     assertEquals(result[0][1], "CREATE DATABASE `db_test`")
 
diff --git 
a/regression-test/suites/external_table_p0/hive/test_autoinc_broker_load.groovy 
b/regression-test/suites/external_table_p0/hive/test_autoinc_broker_load.groovy
index 3171c72b710..376da31c8a7 100644
--- 
a/regression-test/suites/external_table_p0/hive/test_autoinc_broker_load.groovy
+++ 
b/regression-test/suites/external_table_p0/hive/test_autoinc_broker_load.groovy
@@ -20,15 +20,15 @@ suite("test_autoinc_broker_load", 
"p0,external,hive,external_docker,external_doc
 
     String enabled = context.config.otherConfigs.get("enableHiveTest")
     if (enabled != null && enabled.equalsIgnoreCase("true")) {
-        brokerName = getBrokerName()
-        hdfsUser = getHdfsUser()
-        hdfsPasswd = getHdfsPasswd()
-        hdfs_port = context.config.otherConfigs.get("hive2HdfsPort")
-        externalEnvIp = context.config.otherConfigs.get("externalEnvIp")
+        def brokerName = getBrokerName()
+        def hdfsUser = getHdfsUser()
+        def hdfsPasswd = getHdfsPasswd()
+        def hdfs_port = context.config.otherConfigs.get("hive2HdfsPort")
+        def externalEnvIp = context.config.otherConfigs.get("externalEnvIp")
 
         def test_dir = "user/doris/preinstalled_data/data_case/autoinc"
 
-        def load_from_hdfs = {columns, testTable, label, testFile, format, 
brokerName, hdfsUser, hdfsPasswd ->
+        def load_from_hdfs = {columns, testTable, label, testFile, format ->
             def result1= sql """ LOAD LABEL ${label} (
                                 DATA 
INFILE("hdfs://${externalEnvIp}:${hdfs_port}/${test_dir}/${testFile}")
                                 INTO TABLE ${testTable}
@@ -46,9 +46,9 @@ suite("test_autoinc_broker_load", 
"p0,external,hive,external_docker,external_doc
         }
 
         def wait_for_load_result = {checklabel, testTable ->
-            max_try_milli_secs = 10000
+            def max_try_milli_secs = 10000
             while(max_try_milli_secs) {
-                result = sql "show load where label = '${checklabel}'"
+                def result = sql "show load where label = '${checklabel}'"
                 if(result[0][2] == "FINISHED") {
                     break
                 } else {
@@ -61,7 +61,7 @@ suite("test_autoinc_broker_load", 
"p0,external,hive,external_docker,external_doc
             }
         }
 
-        table = "test_autoinc_broker_load"
+        def table = "test_autoinc_broker_load"
         sql "drop table if exists ${table}"
         sql """ CREATE TABLE IF NOT EXISTS `${table}` (
             `id` BIGINT NOT NULL AUTO_INCREMENT COMMENT "用户 ID",
@@ -78,7 +78,7 @@ suite("test_autoinc_broker_load", 
"p0,external,hive,external_docker,external_doc
             "enable_unique_key_merge_on_write" = "true") """
         
         def test_load_label = UUID.randomUUID().toString().replaceAll("-", "")
-        load_from_hdfs("name, value", table, test_load_label, 
"auto_inc_basic.csv", "csv", brokerName, hdfsUser, hdfsPasswd)
+        load_from_hdfs("name, value", table, test_load_label, 
"auto_inc_basic.csv", "csv")
         wait_for_load_result(test_load_label, table)
         qt_sql "select * from ${table};"
         sql """ insert into ${table} values(0, "Bob", 123), (2, "Tom", 323), 
(4, "Carter", 523);"""
@@ -102,7 +102,7 @@ suite("test_autoinc_broker_load", 
"p0,external,hive,external_docker,external_doc
             "storage_format" = "V2",
             "enable_unique_key_merge_on_write" = "true");"""
         test_load_label = UUID.randomUUID().toString().replaceAll("-", "")
-        load_from_hdfs("id, name, value", table, test_load_label, 
"auto_inc_with_null.csv", "csv", brokerName, hdfsUser, hdfsPasswd)
+        load_from_hdfs("id, name, value", table, test_load_label, 
"auto_inc_with_null.csv", "csv")
         wait_for_load_result(test_load_label, table)
         sql "sync"
         qt_sql "select * from ${table};"
diff --git 
a/regression-test/suites/external_table_p0/hive/test_hive_parquet_alter_column.groovy
 
b/regression-test/suites/external_table_p0/hive/test_hive_parquet_alter_column.groovy
index 9020b99c183..5e3afa8d7fb 100644
--- 
a/regression-test/suites/external_table_p0/hive/test_hive_parquet_alter_column.groovy
+++ 
b/regression-test/suites/external_table_p0/hive/test_hive_parquet_alter_column.groovy
@@ -43,7 +43,7 @@ suite("test_hive_parquet_alter_column", 
"p0,external,hive,external_docker,extern
 
 
 
-        types = 
["int","smallint","tinyint","bigint","float","double","boolean","string","char","varchar","date","timestamp","decimal"]
+        def types = 
["int","smallint","tinyint","bigint","float","double","boolean","string","char","varchar","date","timestamp","decimal"]
 
         for( String type1 in types) {
             qt_desc """ desc parquet_alter_column_to_${type1} ; """
diff --git 
a/regression-test/suites/external_table_p0/hive/test_hive_statistic.groovy 
b/regression-test/suites/external_table_p0/hive/test_hive_statistic.groovy
index 5da056ff00e..971e6bc0253 100644
--- a/regression-test/suites/external_table_p0/hive/test_hive_statistic.groovy
+++ b/regression-test/suites/external_table_p0/hive/test_hive_statistic.groovy
@@ -259,7 +259,7 @@ suite("test_hive_statistic", 
"p0,external,hive,external_docker,external_docker_h
         def ctlId
         def dbId
         def tblId
-        result = sql """show catalogs"""
+        def result = sql """show catalogs"""
 
         for (int i = 0; i < result.size(); i++) {
             if (result[i][1] == catalog_name) {
diff --git 
a/regression-test/suites/external_table_p0/hive/test_partial_update_broker_load.groovy
 
b/regression-test/suites/external_table_p0/hive/test_partial_update_broker_load.groovy
index 8e95484b1df..f9b31a3d8a8 100644
--- 
a/regression-test/suites/external_table_p0/hive/test_partial_update_broker_load.groovy
+++ 
b/regression-test/suites/external_table_p0/hive/test_partial_update_broker_load.groovy
@@ -20,12 +20,12 @@ suite("test_primary_key_partial_update_broker_load", 
"p0,external,hive,external_
 
     String enabled = context.config.otherConfigs.get("enableHiveTest")
     if (enabled != null && enabled.equalsIgnoreCase("true")) {
-        brokerName = getBrokerName()
-        hdfsUser = getHdfsUser()
-        hdfsPasswd = getHdfsPasswd()
-        hdfs_port = context.config.otherConfigs.get("hive2HdfsPort")
-        externalEnvIp = context.config.otherConfigs.get("externalEnvIp")
-        def load_from_hdfs = {testTable, label, hdfsFilePath, format, 
brokerName, hdfsUser, hdfsPasswd ->
+        def brokerName = getBrokerName()
+        def hdfsUser = getHdfsUser()
+        def hdfsPasswd = getHdfsPasswd()
+        def hdfs_port = context.config.otherConfigs.get("hive2HdfsPort")
+        def externalEnvIp = context.config.otherConfigs.get("externalEnvIp")
+        def load_from_hdfs = {testTable, label, hdfsFilePath, format ->
             def result1= sql """
                             LOAD LABEL ${label} (
                                 DATA INFILE("${hdfsFilePath}")
@@ -77,13 +77,13 @@ suite("test_primary_key_partial_update_broker_load", 
"p0,external,hive,external_
         sql """insert into ${tableName} values(2, "bob", 2000, 223, 2),(1, 
"alice", 1000, 123, 1),(3, "tom", 3000, 323, 3);"""
         qt_sql """ select * from ${tableName} order by id; """
         def test_load_label = UUID.randomUUID().toString().replaceAll("-", "")
-        load_from_hdfs(tableName, test_load_label, 
"hdfs://${externalEnvIp}:${hdfs_port}/user/doris/preinstalled_data/data_case/partial_update/update.csv",
 "csv", brokerName, hdfsUser, hdfsPasswd)
+        load_from_hdfs(tableName, test_load_label, 
"hdfs://${externalEnvIp}:${hdfs_port}/user/doris/preinstalled_data/data_case/partial_update/update.csv",
 "csv")
         wait_for_load_result(test_load_label, tableName)
         qt_sql """select * from ${tableName} order by id;"""
 
         sql "sync;"
         def test_load_label2 = UUID.randomUUID().toString().replaceAll("-", "")
-        load_from_hdfs(tableName, test_load_label2, 
"hdfs://${externalEnvIp}:${hdfs_port}/user/doris/preinstalled_data/data_case/partial_update/update2.csv",
 "csv", brokerName, hdfsUser, hdfsPasswd)
+        load_from_hdfs(tableName, test_load_label2, 
"hdfs://${externalEnvIp}:${hdfs_port}/user/doris/preinstalled_data/data_case/partial_update/update2.csv",
 "csv")
         wait_for_load_result(test_load_label2, tableName)
         qt_sql """select * from ${tableName} order by id;"""
         sql "drop table if exists ${tableName};"
diff --git 
a/regression-test/suites/external_table_p0/iceberg/test_iceberg_optimize_count.groovy
 
b/regression-test/suites/external_table_p0/iceberg/test_iceberg_optimize_count.groovy
index 31235b5278f..ba8e8e7d8ec 100644
--- 
a/regression-test/suites/external_table_p0/iceberg/test_iceberg_optimize_count.groovy
+++ 
b/regression-test/suites/external_table_p0/iceberg/test_iceberg_optimize_count.groovy
@@ -43,10 +43,10 @@ suite("test_iceberg_optimize_count", 
"p0,external,doris,external_docker,external
         sql """ switch ${catalog_name} """
         sql """ use format_v2 """
 
-        sqlstr1 = """ select count(*) from sample_cow_orc; """
-        sqlstr2 = """ select count(*) from sample_cow_parquet; """
-        sqlstr3 = """ select count(*) from sample_mor_orc; """
-        sqlstr4 = """ select count(*) from sample_mor_parquet; """
+        def sqlstr1 = """ select count(*) from sample_cow_orc; """
+        def sqlstr2 = """ select count(*) from sample_cow_parquet; """
+        def sqlstr3 = """ select count(*) from sample_mor_orc; """
+        def sqlstr4 = """ select count(*) from sample_mor_parquet; """
 
         // use push down count
         sql """ set enable_count_push_down_for_external_table=true; """
@@ -100,7 +100,7 @@ suite("test_iceberg_optimize_count", 
"p0,external,doris,external_docker,external
 
         // There has `dangling delete` after rewrite
         sql """ set enable_count_push_down_for_external_table=true; """
-        sqlstr5 = """ select count(*) from 
${catalog_name}.test_db.dangling_delete_after_write; """
+        def sqlstr5 = """ select count(*) from 
${catalog_name}.test_db.dangling_delete_after_write; """
 
         qt_q09 """${sqlstr5}""" 
 
diff --git 
a/regression-test/suites/external_table_p0/iceberg/test_iceberg_predicate_conversion.groovy
 
b/regression-test/suites/external_table_p0/iceberg/test_iceberg_predicate_conversion.groovy
index bbca6d8f023..c1e2a792e9f 100644
--- 
a/regression-test/suites/external_table_p0/iceberg/test_iceberg_predicate_conversion.groovy
+++ 
b/regression-test/suites/external_table_p0/iceberg/test_iceberg_predicate_conversion.groovy
@@ -70,7 +70,7 @@ suite("test_iceberg_predicate_conversion", 
"p0,external,doris,external_docker,ex
         }
 
         sqlstr = """select l_shipdate, l_shipmode from tb_predict where 
l_shipdate in ("1997-05-18", "1996-05-06") or NOT(l_shipmode = "MAIL") order by 
l_shipdate, l_shipmode limit 10"""
-        plan = """(ref(name="l_shipdate") in ("1997-05-18", "1996-05-06") or 
not(ref(name="l_shipmode") == "MAIL"))"""
+        def plan = """(ref(name="l_shipdate") in ("1997-05-18", "1996-05-06") 
or not(ref(name="l_shipmode") == "MAIL"))"""
         order_qt_q04 """${sqlstr}""" 
         explain {
             sql("""${sqlstr}""")
diff --git 
a/regression-test/suites/insert_p0/insert_group_commit_with_exception.groovy 
b/regression-test/suites/insert_p0/insert_group_commit_with_exception.groovy
index 7ed94f7ae15..542ab571cd3 100644
--- a/regression-test/suites/insert_p0/insert_group_commit_with_exception.groovy
+++ b/regression-test/suites/insert_p0/insert_group_commit_with_exception.groovy
@@ -195,7 +195,7 @@ suite("insert_group_commit_with_exception", 
"nonConcurrent") {
                     ps.setObject(2, "f");
                     ps.setObject(3, 90);
                     ps.addBatch();
-                    int[] result = ps.executeBatch();
+                    result = ps.executeBatch();
                     logger.info("prepare insert result: " + result)
                 }
 
@@ -204,7 +204,7 @@ suite("insert_group_commit_with_exception", 
"nonConcurrent") {
                     ps.setObject(1, 11);
                     ps.setObject(2, "f");
                     ps.addBatch();
-                    int[] result = ps.executeBatch();
+                    result = ps.executeBatch();
                     logger.info("prepare insert result: " + result)
                 }
 
diff --git 
a/regression-test/suites/inverted_index_p0/index_change/test_add_drop_index_on_table_with_mv.groovy
 
b/regression-test/suites/inverted_index_p0/index_change/test_add_drop_index_on_table_with_mv.groovy
index 7b63521178e..6a4ef958f9c 100644
--- 
a/regression-test/suites/inverted_index_p0/index_change/test_add_drop_index_on_table_with_mv.groovy
+++ 
b/regression-test/suites/inverted_index_p0/index_change/test_add_drop_index_on_table_with_mv.groovy
@@ -104,7 +104,7 @@ suite("test_add_drop_index_on_table_with_mv") {
 
     sql """ SHOW ALTER TABLE MATERIALIZED VIEW """
 
-    max_try_secs = 60
+    def max_try_secs = 60
     while (max_try_secs--) {
         String res = getJobState(tableName)
         if (res == "FINISHED" || res == "CANCELLED") {
diff --git 
a/regression-test/suites/inverted_index_p0/index_change/test_pk_uk_index_change.groovy
 
b/regression-test/suites/inverted_index_p0/index_change/test_pk_uk_index_change.groovy
index 32bee13dc81..41e72b51b41 100644
--- 
a/regression-test/suites/inverted_index_p0/index_change/test_pk_uk_index_change.groovy
+++ 
b/regression-test/suites/inverted_index_p0/index_change/test_pk_uk_index_change.groovy
@@ -300,7 +300,7 @@ suite("test_pk_uk_index_change", "inverted_index") {
                         """  
         assertTrue(result0.size()==result1.size())
         for (int i = 0; i < result0.size(); ++i) {
-            for (j = 0; j < result0[0].size(); j++) {
+            for (int j = 0; j < result0[0].size(); j++) {
                 logger.info("result: " + result0[i][j] + "|" + result1[i][j])
                 assertTrue(result0[i][j]==result1[i][j])
             }
diff --git 
a/regression-test/suites/inverted_index_p0/storage_format/test_schema_change_storage_format.groovy
 
b/regression-test/suites/inverted_index_p0/storage_format/test_schema_change_storage_format.groovy
index e28414ab48c..b50414035c1 100644
--- 
a/regression-test/suites/inverted_index_p0/storage_format/test_schema_change_storage_format.groovy
+++ 
b/regression-test/suites/inverted_index_p0/storage_format/test_schema_change_storage_format.groovy
@@ -105,7 +105,7 @@ suite("test_schema_change_storge_format", "p0") {
     def backendId_to_backendHttpPort = [:]
     getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
 
-    tablets = sql_return_maparray """ show tablets from ${table_name}; """
+    def tablets = sql_return_maparray """ show tablets from ${table_name}; """
     String tablet_id = tablets[0].TabletId
     String backend_id = tablets[0].BackendId
     String ip = backendId_to_backendIP.get(backend_id)
diff --git 
a/regression-test/suites/inverted_index_p0/test_array_contains_with_inverted_index.groovy
 
b/regression-test/suites/inverted_index_p0/test_array_contains_with_inverted_index.groovy
index 7aacf6e45a3..83ff988e039 100644
--- 
a/regression-test/suites/inverted_index_p0/test_array_contains_with_inverted_index.groovy
+++ 
b/regression-test/suites/inverted_index_p0/test_array_contains_with_inverted_index.groovy
@@ -67,7 +67,7 @@ suite("test_array_contains_with_inverted_index"){
 
     qt_sql """ select count() from ${indexTblName}"""
     def param_contains = ["\'s\'", "\'\'", null]
-    for (i = 0 ; i < param_contains.size(); ++i) {
+    for (int i = 0 ; i < param_contains.size(); ++i) {
         def p = param_contains[i]
         log.info("param: ${p}")
         order_qt_sql """ select * from tai where array_contains(inventors, 
${p}) order by id; """
@@ -84,7 +84,7 @@ suite("test_array_contains_with_inverted_index"){
     // test arrays_overlap with inverted index
     // now if we use inverted index we will not eval exprs
     def param = [["\'s\'", "\'t\'"], [], null, ["\'s\'", "\'\'", "\'t\'"], 
["\'s\'", null, "\'t\'"], [null, "\'\'"], ["\'s\'", null, "\'t\'", "\'\'"]] // 
null for arrays_overlap will return null which in predicate will lead to return 
empty set
-    for (i = 0 ; i < param.size(); ++i) {
+    for (int i = 0 ; i < param.size(); ++i) {
         def p = param[i]
         log.info("param: ${p}")
         order_qt_sql """ select /*+SET_VAR(enable_common_expr_pushdown = 
true)*/ * from tai where arrays_overlap(inventors, ${p}) order by id; """
diff --git 
a/regression-test/suites/inverted_index_p0/test_index_match_select.groovy 
b/regression-test/suites/inverted_index_p0/test_index_match_select.groovy
index 82451cd09cb..753a5b0a610 100644
--- a/regression-test/suites/inverted_index_p0/test_index_match_select.groovy
+++ b/regression-test/suites/inverted_index_p0/test_index_match_select.groovy
@@ -205,7 +205,7 @@ suite("test_index_match_select", "inverted_index_select"){
         }
 
         // cas2.2 test varchar standard match same term with different way and 
repeate 5 times
-        for (test_times = 0; test_times < 5; test_times++) {
+        for (int test_times = 0; test_times < 5; test_times++) {
             qt_sql """ select * from ${indexTbName1} where ${varchar_colume3} 
match_any 'zhang yi' order by name """
             qt_sql """ select * from ${indexTbName1} where ${varchar_colume3} 
match_all "zhang yi" order by name """
             qt_sql """ select * from ${indexTbName1} where ${varchar_colume3} 
match_any '"zhang yi"' order by name """
@@ -215,7 +215,7 @@ suite("test_index_match_select", "inverted_index_select"){
         }
 
         // case3: test char standard match same term with different way and 
repeate 5 times
-        for (test_times = 0; test_times < 5; test_times++) {
+        for (int test_times = 0; test_times < 5; test_times++) {
             qt_sql """ select * from ${indexTbName1} where ${char_colume1} 
match_any 'tall:100cm, weight: 30kg, hobbies:' order by name """
             qt_sql """ select * from ${indexTbName1} where ${char_colume1} 
match_all "tall:100cm, weight: 30kg, hobbies:" order by name """
             qt_sql """ select * from ${indexTbName1} where ${char_colume1} 
match_any '"tall:100cm, weight: 30kg, hobbies:"' order by name """
@@ -225,7 +225,7 @@ suite("test_index_match_select", "inverted_index_select"){
         }
 
         // case4: test string simple match same term with different way and 
repeate 5 times
-        for (test_times = 0; test_times < 5; test_times++) {
+        for (int test_times = 0; test_times < 5; test_times++) {
             qt_sql """ select * from ${indexTbName1} where ${string_colume1} 
match_all 'A naughty boy' order by name """
             qt_sql """ select * from ${indexTbName1} where ${string_colume1} 
match_any "A naughty boy" order by name """
             qt_sql """ select * from ${indexTbName1} where ${string_colume1} 
match_any '"A naughty boy"' order by name """
@@ -234,7 +234,7 @@ suite("test_index_match_select", "inverted_index_select"){
         }
 
         // case5: test text standard match same term with different way and 
repeate 5 times
-        for (test_times = 0; test_times < 5; test_times++) {
+        for (int test_times = 0; test_times < 5; test_times++) {
             qt_sql """ select * from ${indexTbName1} where ${text_colume1} 
match_all 'i just want go outside' order by name """
             qt_sql """ select * from ${indexTbName1} where ${text_colume1} 
match_any "i just want go outside" order by name """
             qt_sql """ select * from ${indexTbName1} where ${text_colume1} 
match_all '"i just want go outside"' order by name """
diff --git 
a/regression-test/suites/inverted_index_p0/unique_with_mow/test_primary_key_simple_case.groovy
 
b/regression-test/suites/inverted_index_p0/unique_with_mow/test_primary_key_simple_case.groovy
index 8884ff837a6..0c437e4fe1b 100644
--- 
a/regression-test/suites/inverted_index_p0/unique_with_mow/test_primary_key_simple_case.groovy
+++ 
b/regression-test/suites/inverted_index_p0/unique_with_mow/test_primary_key_simple_case.groovy
@@ -68,7 +68,7 @@ suite("test_primary_key_simple_case", "inverted_index") {
         """
     sql """ set enable_common_expr_pushdown = true """
 
-    result = sql """ SELECT * FROM ${tableName} t ORDER BY user_id; """
+    def result = sql """ SELECT * FROM ${tableName} t ORDER BY user_id; """
     assertTrue(result.size() == 5)
     assertTrue(result[0].size() == 11)
 
diff --git 
a/regression-test/suites/load_p0/http_stream/test_http_stream_2pc.groovy 
b/regression-test/suites/load_p0/http_stream/test_http_stream_2pc.groovy
index b05ce6c42b8..bc07d65ecce 100644
--- a/regression-test/suites/load_p0/http_stream/test_http_stream_2pc.groovy
+++ b/regression-test/suites/load_p0/http_stream/test_http_stream_2pc.groovy
@@ -65,16 +65,16 @@ suite("test_http_stream_2pc", "p0") {
         log.info("http_stream execute 2pc: ${command}")
 
         def process = command.execute()
-        code = process.waitFor()
-        out = process.text
-        json2pc = parseJson(out)
+        def code = process.waitFor()
+        def out = process.text
+        def json2pc = parseJson(out)
         log.info("http_stream 2pc result: ${out}".toString())
         assertEquals(code, 0)
         assertEquals("success", json2pc.status.toLowerCase())
 
         def count = 0
         while (true) {
-            res = sql "select count(*) from ${tableName1}"
+            def res = sql "select count(*) from ${tableName1}"
             if (res[0][0] > 0) {
                 break
             }
diff --git 
a/regression-test/suites/load_p0/insert/test_insert_random_distribution_table.groovy
 
b/regression-test/suites/load_p0/insert/test_insert_random_distribution_table.groovy
index f9e456b5d3c..9510a4f7980 100644
--- 
a/regression-test/suites/load_p0/insert/test_insert_random_distribution_table.groovy
+++ 
b/regression-test/suites/load_p0/insert/test_insert_random_distribution_table.groovy
@@ -151,7 +151,7 @@ suite("test_insert_random_distribution_table", "p0") {
         res = sql "show tablets from ${tableName} partition ${partitions[p]}"
         partitionTablets[p] = getTablets.call(res)
         partitionRowCounts[p] = []
-        numTablets = partitionTablets[p].size()
+        def numTablets = partitionTablets[p].size()
         for (int i = numTablets - 1; i >= 0; i--) {
             def countResult = sql "select count() from ${tableName} 
tablet(${partitionTablets[p][i]})"
             partitionRowCounts[p][i] = countResult[0][0]
@@ -191,7 +191,7 @@ suite("test_insert_random_distribution_table", "p0") {
 
 
     for (int p = 0; p < 3; p++) {
-        numTablets = partitionTablets[p].size()
+        def numTablets = partitionTablets[p].size()
         for (int i = numTablets - 1; i >= 0; i--) {
             def countResult = sql "select count() from ${tableName} 
tablet(${partitionTablets[p][i]})"
             partitionRowCounts[p][i] = countResult[0][0]
diff --git a/regression-test/suites/load_p0/mysql_load/test_mysql_load.groovy 
b/regression-test/suites/load_p0/mysql_load/test_mysql_load.groovy
index ff239e5fef1..17cc30e578a 100644
--- a/regression-test/suites/load_p0/mysql_load/test_mysql_load.groovy
+++ b/regression-test/suites/load_p0/mysql_load/test_mysql_load.groovy
@@ -114,7 +114,7 @@ suite("test_mysql_load", "p0") {
     """
 
     sql "sync"
-    rowCount = sql "select count(1) from ${tableName}"
+    def rowCount = sql "select count(1) from ${tableName}"
     assertEquals(3, rowCount[0][0])
 
 
diff --git 
a/regression-test/suites/load_p0/stream_load/test_group_commit_stream_load_with_nonexist_db_and_table.groovy
 
b/regression-test/suites/load_p0/stream_load/test_group_commit_stream_load_with_nonexist_db_and_table.groovy
index 57054b2a420..8fd63814715 100644
--- 
a/regression-test/suites/load_p0/stream_load/test_group_commit_stream_load_with_nonexist_db_and_table.groovy
+++ 
b/regression-test/suites/load_p0/stream_load/test_group_commit_stream_load_with_nonexist_db_and_table.groovy
@@ -28,13 +28,13 @@ 
suite("test_group_commit_stream_load_with_nonexist_db_and_table") {
         log.info("stream load command: ${command}")
 
         def process = command.execute()
-        code = process.waitFor()
-        out = process.text
+        def code = process.waitFor()
+        def out = process.text
         log.info("stream lad result: ${out}".toString())
         assertTrue(out.toString().contains("OlapTable not found"))
     } catch (Exception e) {
         logger.info("failed: " + e.getMessage())
-        assertTrue(false)
+        throw e
     } finally {
 
     }
diff --git 
a/regression-test/suites/load_p0/stream_load/test_map_load_and_compaction.groovy
 
b/regression-test/suites/load_p0/stream_load/test_map_load_and_compaction.groovy
index 4a80004169b..317a0363230 100644
--- 
a/regression-test/suites/load_p0/stream_load/test_map_load_and_compaction.groovy
+++ 
b/regression-test/suites/load_p0/stream_load/test_map_load_and_compaction.groovy
@@ -117,6 +117,8 @@ suite("test_map_load_and_compaction", "p0") {
         def compactJson = parseJson(out.trim())
         assertEquals("success", compactJson.status.toLowerCase())
 
+        def running = false
+
         // wait compactions done
         do {
             Thread.sleep(1000)
@@ -131,7 +133,7 @@ suite("test_map_load_and_compaction", "p0") {
         checkCompactionStatus.call(compactionStatus, 1)
 
         // finally check backend alive
-        backends = sql """ show backends; """
+        def backends = sql """ show backends; """
         assertTrue(backends.size() > 0)
         for (String[] b : backends) {
             assertEquals("true", b[9])
diff --git a/regression-test/suites/load_p0/stream_load/test_stream_load.groovy 
b/regression-test/suites/load_p0/stream_load/test_stream_load.groovy
index 521c4e233ea..27b40471a69 100644
--- a/regression-test/suites/load_p0/stream_load/test_stream_load.groovy
+++ b/regression-test/suites/load_p0/stream_load/test_stream_load.groovy
@@ -178,7 +178,7 @@ suite("test_stream_load", "p0") {
     }
 
     sql "sync"
-    rowCount = sql "select count(1) from ${tableName}"
+    def rowCount = sql "select count(1) from ${tableName}"
     assertEquals(3, rowCount[0][0])
 
     // test load_nullable_to_not_nullable
@@ -1335,9 +1335,9 @@ suite("test_stream_load", "p0") {
         def command = "curl --location-trusted -u 
${context.config.feHttpUser}:${context.config.feHttpPassword} -H 
column_separator:| -H Transfer-Encoding:chunked -H columns:k1,k2,v1,v2,v3  -T 
${context.dataPath}/test_chunked_transfer.csv 
http://${context.config.feHttpAddress}/api/${db}/${tableName16}/_stream_load";
         log.info("test chunked transfer command: ${command}")
         def process = command.execute()
-        code = process.waitFor()
-        out = process.text
-        json2pc = parseJson(out)
+        def code = process.waitFor()
+        def out = process.text
+        def json2pc = parseJson(out)
         log.info("test chunked transfer result: ${out}".toString())
         sql "sync"
         qt_sql_chunked_transfer_csv "select * from ${tableName16} order by k1"
@@ -1363,9 +1363,9 @@ suite("test_stream_load", "p0") {
         def command = "curl --location-trusted -u 
${context.config.feHttpUser}:${context.config.feHttpPassword} -H 
Transfer-Encoding:chunked -H format:json -H read_json_by_line:true -T 
${context.dataPath}/test_chunked_transfer.json 
http://${context.config.feHttpAddress}/api/${db}/${tableName16}/_stream_load";
         log.info("test chunked transfer command: ${command}")
         def process = command.execute()
-        code = process.waitFor()
-        out = process.text
-        json2pc = parseJson(out)
+        def code = process.waitFor()
+        def out = process.text
+        def json2pc = parseJson(out)
         log.info("test chunked transfer result: ${out}".toString())
         sql "sync"
         qt_sql_chunked_transfer_json "select * from ${tableName16} order by k1"
@@ -1643,8 +1643,8 @@ suite("test_stream_load", "p0") {
        def command = "curl --location-trusted -u 
${context.config.feHttpUser}:${context.config.feHttpPassword} -H 
column_separator:| -H ${db}:${tableName16} -H Content-Length:0  -H 
Transfer-Encoding:chunked -H columns:k1,k2,v1,v2,v3 -T 
${context.dataPath}/test_chunked_transfer.csv 
http://${beHost}:${beHttpPort}/api/${db}/${tableName16}/_stream_load";
        log.info("test chunked transfer command: ${command}")
        def process = command.execute()
-       code = process.waitFor()
-       out = process.text
+       def code = process.waitFor()
+       def out = process.text
        log.info("test chunked transfer result: ${out}".toString())
        def json = parseJson(out)
        assertEquals("fail", json.Status.toLowerCase())
@@ -1673,8 +1673,8 @@ suite("test_stream_load", "p0") {
         def command = "curl --location-trusted -u 
${context.config.feHttpUser}:${context.config.feHttpPassword} -H 
column_separator:| -H ${db}:${tableName16} -H Content-Length:  -H 
Transfer-Encoding: -T ${context.dataPath}/test_chunked_transfer.csv 
http://${beHost}:${beHttpPort}/api/${db}/${tableName16}/_stream_load";
         log.info("test chunked transfer command: ${command}")
         def process = command.execute()
-        code = process.waitFor()
-        out = process.text
+        def code = process.waitFor()
+        def out = process.text
         log.info("test chunked transfer result: ${out}".toString())
         def json = parseJson(out)
         assertEquals("fail", json.Status.toLowerCase())
diff --git 
a/regression-test/suites/load_p0/stream_load/test_stream_load_move_memtable.groovy
 
b/regression-test/suites/load_p0/stream_load/test_stream_load_move_memtable.groovy
index 8a2f7b0247d..f1fcf9cf87a 100644
--- 
a/regression-test/suites/load_p0/stream_load/test_stream_load_move_memtable.groovy
+++ 
b/regression-test/suites/load_p0/stream_load/test_stream_load_move_memtable.groovy
@@ -134,7 +134,7 @@ suite("test_stream_load_move_memtable", "p0") {
     }
 
     sql "sync"
-    rowCount = sql "select count(1) from ${tableName}"
+    def rowCount = sql "select count(1) from ${tableName}"
     assertEquals(3, rowCount[0][0])
 
     // test load_nullable_to_not_nullable
diff --git a/regression-test/suites/manager/test_manager_interface_2.groovy 
b/regression-test/suites/manager/test_manager_interface_2.groovy
index 611f825c142..2ee8fd30b3a 100644
--- a/regression-test/suites/manager/test_manager_interface_2.groovy
+++ b/regression-test/suites/manager/test_manager_interface_2.groovy
@@ -44,7 +44,7 @@ suite('test_manager_interface_2',"p0") {
         );"""
 
     
-        result =sql  """ show data """ 
+        def result =sql  """ show data """ 
         for(int i = 0 ; i < result.size();i++) {
             assert(result[i][0].toLowerCase() != "null") //TableName 
             assert(result[i][1].toLowerCase() != "null") //Size 
@@ -183,7 +183,7 @@ suite('test_manager_interface_2',"p0") {
     
         sql """ALTER SYSTEM ADD BACKEND "${address}:${notExistPort}";"""
 
-        result = sql """SHOW BACKENDS;"""
+        def result = sql """SHOW BACKENDS;"""
         logger.info("result = ${result}" )
         def x = 0 
         for(int i  =0 ;i<result.size();i++) {
diff --git a/regression-test/suites/manager/test_manager_interface_4.groovy 
b/regression-test/suites/manager/test_manager_interface_4.groovy
index 355d96e1e03..78d8d451b7b 100644
--- a/regression-test/suites/manager/test_manager_interface_4.groovy
+++ b/regression-test/suites/manager/test_manager_interface_4.groovy
@@ -122,7 +122,7 @@ suite('test_manager_interface_4',"p0") {
         def healthy_num = 0;
         def total_tablet_num  = 0;
         def total_healthy_num = 0;
-        result = sql """  SHOW PROC '/cluster_health/tablet_health' """ 
+        def result = sql """  SHOW PROC '/cluster_health/tablet_health' """ 
         logger.info("result = ${result}" )
 
         for( int i =0 ;i < result.size();i++ ){
diff --git a/regression-test/suites/mtmv_p0/test_build_mtmv.groovy 
b/regression-test/suites/mtmv_p0/test_build_mtmv.groovy
index 717f614e3e5..b1456da8c43 100644
--- a/regression-test/suites/mtmv_p0/test_build_mtmv.groovy
+++ b/regression-test/suites/mtmv_p0/test_build_mtmv.groovy
@@ -202,7 +202,7 @@ suite("test_build_mtmv") {
         AS
         SELECT ${tableName}.username, ${tableNamePv}.pv FROM ${tableName}, 
${tableNamePv} WHERE ${tableName}.id=${tableNamePv}.id;
     """
-    jobName = getJobName("regression_test_mtmv_p0", mvName);
+    def jobName = getJobName("regression_test_mtmv_p0", mvName);
     println jobName
     waitingMTMVTaskFinished(jobName)
     order_qt_select "SELECT * FROM ${mvName}"
diff --git a/regression-test/suites/mtmv_p0/test_create_mv_mtmv.groovy 
b/regression-test/suites/mtmv_p0/test_create_mv_mtmv.groovy
index c6b6b4386d5..189517ba8ea 100644
--- a/regression-test/suites/mtmv_p0/test_create_mv_mtmv.groovy
+++ b/regression-test/suites/mtmv_p0/test_create_mv_mtmv.groovy
@@ -50,7 +50,7 @@ suite("test_create_mv_mtmv","mtmv") {
         CREATE MATERIALIZED VIEW mv_mtmv1  as select k2 from ${mvName};
         """
 
-    max_try_secs = 60
+    def max_try_secs = 60
     while (max_try_secs--) {
         def jobStateResult = sql """  SHOW ALTER TABLE MATERIALIZED VIEW WHERE 
TableName='${mvName}' ORDER BY CreateTime DESC LIMIT 1; """
         String res = jobStateResult[0][8]
diff --git a/regression-test/suites/mtmv_p0/test_create_rollup_mtmv.groovy 
b/regression-test/suites/mtmv_p0/test_create_rollup_mtmv.groovy
index df4040d9a9e..4fd926fa3ac 100644
--- a/regression-test/suites/mtmv_p0/test_create_rollup_mtmv.groovy
+++ b/regression-test/suites/mtmv_p0/test_create_rollup_mtmv.groovy
@@ -51,7 +51,7 @@ suite("test_create_rollup_mtmv","mtmv") {
         alter table ${mvName} ADD ROLLUP rollup1(k3);
         """
 
-    max_try_secs = 60
+    def max_try_secs = 60
     while (max_try_secs--) {
         def jobStateResult = sql """SHOW ALTER TABLE ROLLUP WHERE 
TableName='${mvName}' ORDER BY CreateTime DESC LIMIT 1; """
         String res = jobStateResult[0][8]
diff --git a/regression-test/suites/mtmv_p0/test_limit_partition_mtmv.groovy 
b/regression-test/suites/mtmv_p0/test_limit_partition_mtmv.groovy
index 19aa229743d..6e442631303 100644
--- a/regression-test/suites/mtmv_p0/test_limit_partition_mtmv.groovy
+++ b/regression-test/suites/mtmv_p0/test_limit_partition_mtmv.groovy
@@ -57,7 +57,7 @@ suite("test_limit_partition_mtmv") {
             AS
             SELECT * FROM ${tableName};
     """
-    showPartitionsResult = sql """show partitions from ${mvName}"""
+    def showPartitionsResult = sql """show partitions from ${mvName}"""
     logger.info("showPartitionsResult: " + showPartitionsResult.toString())
     assertEquals(1, showPartitionsResult.size())
     assertTrue(showPartitionsResult.toString().contains("p_20380101"))
diff --git a/regression-test/suites/mtmv_p0/test_rollup_partition_mtmv.groovy 
b/regression-test/suites/mtmv_p0/test_rollup_partition_mtmv.groovy
index a9eeacc1c89..6ab3fa57c47 100644
--- a/regression-test/suites/mtmv_p0/test_rollup_partition_mtmv.groovy
+++ b/regression-test/suites/mtmv_p0/test_rollup_partition_mtmv.groovy
@@ -96,7 +96,7 @@ suite("test_rollup_partition_mtmv") {
             AS
             SELECT * FROM ${tableName};
     """
-    showPartitionsResult = sql """show partitions from ${mvName}"""
+    def showPartitionsResult = sql """show partitions from ${mvName}"""
     logger.info("showPartitionsResult: " + showPartitionsResult.toString())
     assertEquals(2, showPartitionsResult.size())
     assertTrue(showPartitionsResult.toString().contains("2020-01-01"))
@@ -199,7 +199,7 @@ suite("test_rollup_partition_mtmv") {
     sql """
             REFRESH MATERIALIZED VIEW ${mvName} AUTO
         """
-    jobName = getJobName(dbName, mvName);
+    def jobName = getJobName(dbName, mvName);
     log.info(jobName)
     waitingMTMVTaskFinished(jobName)
     order_qt_date_range_month "SELECT * FROM ${mvName} order by k1,k2"
diff --git a/regression-test/suites/mv_p0/test_create_mv/test_create_mv.groovy 
b/regression-test/suites/mv_p0/test_create_mv/test_create_mv.groovy
index 8ee24cb5ba2..49817cee278 100644
--- a/regression-test/suites/mv_p0/test_create_mv/test_create_mv.groovy
+++ b/regression-test/suites/mv_p0/test_create_mv/test_create_mv.groovy
@@ -67,7 +67,7 @@ suite("test_create_mv") {
 
     sql """ SHOW ALTER TABLE MATERIALIZED VIEW """
 
-    max_try_secs = 60
+    def max_try_secs = 60
     while (max_try_secs--) {
         String res = getJobState(tableName)
         if (res == "FINISHED" || res == "CANCELLED") {
diff --git a/regression-test/suites/nereids_arith_p0/topn/accept_null.groovy 
b/regression-test/suites/nereids_arith_p0/topn/accept_null.groovy
index 09713c76172..b12aef3a656 100644
--- a/regression-test/suites/nereids_arith_p0/topn/accept_null.groovy
+++ b/regression-test/suites/nereids_arith_p0/topn/accept_null.groovy
@@ -66,7 +66,7 @@ suite ("accept_null") {
    getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
    boolean disableAutoCompaction = true
    for(int i=0;i<backendId_to_backendIP.keySet().size();i++){
-      backend_id = backendId_to_backendIP.keySet()[i]
+      def backend_id = backendId_to_backendIP.keySet()[i]
       def (code, out, err) = 
show_be_config(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id))
       logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
       assertEquals(code, 0)
@@ -86,7 +86,7 @@ suite ("accept_null") {
       def tablet_info = sql_return_maparray """ show tablet ${tablet_id}; """
       logger.info("tablet"+tablet_info)
       def table_id = tablet_info[0].TableId
-      backend_id = tablet.BackendId
+      def backend_id = tablet.BackendId
       def times = 1
       def code, out, err
       do{
diff --git 
a/regression-test/suites/nereids_p0/cache/parse_sql_from_sql_cache.groovy 
b/regression-test/suites/nereids_p0/cache/parse_sql_from_sql_cache.groovy
index d1b1e964126..658c8efb04f 100644
--- a/regression-test/suites/nereids_p0/cache/parse_sql_from_sql_cache.groovy
+++ b/regression-test/suites/nereids_p0/cache/parse_sql_from_sql_cache.groovy
@@ -145,7 +145,7 @@ suite("parse_sql_from_sql_cache") {
             assertNoCache "select * from test_use_plan_cache5"
         }),
         extraThread("testUpdate",{
-            createTestTable "test_use_plan_cache6", uniqueTable=true
+            createTestTable("test_use_plan_cache6", true)
 
             // after partition changed 10s, the sql cache can be used
             sleep(10000)
diff --git 
a/regression-test/suites/nereids_p0/expression/fold_constant/fold_constant_by_fe.groovy
 
b/regression-test/suites/nereids_p0/expression/fold_constant/fold_constant_by_fe.groovy
index d590bacfa07..ad137d49c3d 100644
--- 
a/regression-test/suites/nereids_p0/expression/fold_constant/fold_constant_by_fe.groovy
+++ 
b/regression-test/suites/nereids_p0/expression/fold_constant/fold_constant_by_fe.groovy
@@ -66,7 +66,7 @@ suite("test_fold_constant_by_fe") {
         qt_sql "select to_monday('${date}'), last_day('${date}'), 
to_date('${date}'), to_days('${date}'), date('${date}'), datev2('${date}')"
     }
 
-    test_year = [2001, 2013, 123, 1969, 2023]
+    def test_year = [2001, 2013, 123, 1969, 2023]
     for (year in test_year) {
         for (integer in test_int) {
             qt_sql "select /*+SET_VAR(time_zone=\"Asia/Shanghai\")*/ 
makedate(${year}, ${integer}), from_days(${year * integer}), 
from_unixtime(${year / 10 * year * integer})"
diff --git a/regression-test/suites/nereids_p0/outfile/test_outfile.groovy 
b/regression-test/suites/nereids_p0/outfile/test_outfile.groovy
index f256df91809..6a7be107510 100644
--- a/regression-test/suites/nereids_p0/outfile/test_outfile.groovy
+++ b/regression-test/suites/nereids_p0/outfile/test_outfile.groovy
@@ -109,7 +109,7 @@ suite("test_outfile") {
             SELECT * FROM ${tableName} t ORDER BY user_id INTO OUTFILE 
"file://${outFile}/";
         """
 
-        url = result[0][3]
+        def url = result[0][3]
         urlHost = url.substring(8, url.indexOf("${outFile}"))
         def filePrifix = url.split("${outFile}")[1]
         csvFiles = "${outFile}${filePrifix}*.csv"
@@ -146,7 +146,7 @@ suite("test_outfile") {
             path.delete();
         }
 
-        cmd = "rm -rf ${csvFiles}"
+        def cmd = "rm -rf ${csvFiles}"
         sshExec ("root", urlHost, cmd)
     }
 
@@ -184,7 +184,7 @@ suite("test_outfile") {
             SELECT * FROM ${tableName} t ORDER BY k1, v2 INTO OUTFILE 
"file://${outFile}/"
         """
 
-        url = result[0][3]
+        def url = result[0][3]
         urlHost = url.substring(8, url.indexOf("${outFile}"))
         def filePrifix = url.split("${outFile}")[1]
         csvFiles = "${outFile}${filePrifix}*.csv"
@@ -209,7 +209,7 @@ suite("test_outfile") {
             path.delete();
         }
 
-        cmd = "rm -rf ${csvFiles}"
+        def cmd = "rm -rf ${csvFiles}"
         sshExec ("root", urlHost, cmd)
     }
 
@@ -239,6 +239,8 @@ suite("test_outfile") {
 
         sql "set enable_parallel_outfile = true;"
         sql """select * from select_into_file into outfile 
"file://${outFilePath}/" properties("success_file_name" = "SUCCESS");"""
+    } catch (Exception e) {
+        logger.info("export exception: ${e}")
     } finally {
         try_sql("DROP TABLE IF EXISTS select_into_file")
         File path = new File(outFilePath)
@@ -249,7 +251,7 @@ suite("test_outfile") {
             path.delete();
         }
 
-        cmd = "rm -rf ${csvFiles}"
+        def cmd = "rm -rf ${csvFiles}"
         sshExec ("root", urlHost, cmd)
     }
 }
diff --git a/regression-test/suites/nereids_p0/outfile/test_outfile_expr.groovy 
b/regression-test/suites/nereids_p0/outfile/test_outfile_expr.groovy
index 7ac4b1b43b2..9429906ec3c 100644
--- a/regression-test/suites/nereids_p0/outfile/test_outfile_expr.groovy
+++ b/regression-test/suites/nereids_p0/outfile/test_outfile_expr.groovy
@@ -142,7 +142,7 @@ suite("test_outfile_expr") {
             path.delete();
         }
 
-        cmd = "rm -rf ${csvFiles}"
+        def cmd = "rm -rf ${csvFiles}"
         sshExec ("root", urlHost, cmd)
     }
 
diff --git 
a/regression-test/suites/nereids_p0/outfile/test_outfile_parquet.groovy 
b/regression-test/suites/nereids_p0/outfile/test_outfile_parquet.groovy
index e6cdb541dbf..cdc210b8aea 100644
--- a/regression-test/suites/nereids_p0/outfile/test_outfile_parquet.groovy
+++ b/regression-test/suites/nereids_p0/outfile/test_outfile_parquet.groovy
@@ -113,7 +113,7 @@ suite("test_outfile_parquet") {
             SELECT * FROM ${tableName} t ORDER BY user_id INTO OUTFILE 
"file://${outFile}/" FORMAT AS PARQUET;
         """
 
-        url = result[0][3]
+        def url = result[0][3]
         urlHost = url.substring(8, url.indexOf("${outFile}"))
         def filePrifix = url.split("${outFile}")[1]
         parquetFiles = "${outFile}${filePrifix}*.parquet"
@@ -158,7 +158,9 @@ suite("test_outfile_parquet") {
         logger.info("Run command: command=" + command + ",code=" + code + ", 
out=" + out + ", err=" + err)
         assertEquals(code, 0)
         qt_select_default """ SELECT * FROM ${tableName2} t ORDER BY user_id; 
"""
-    } finally {
+    } catch (Exception e) {
+        logger.info("export exception: ${e}")
+    }finally {
         try_sql("DROP TABLE IF EXISTS ${tableName}")
         try_sql("DROP TABLE IF EXISTS ${tableName2}")
         File path = new File(outFilePath)
@@ -169,7 +171,7 @@ suite("test_outfile_parquet") {
             path.delete();
         }
 
-        cmd = "rm -rf ${parquetFiles}"
+        def cmd = "rm -rf ${parquetFiles}"
         sshExec ("root", urlHost, cmd)
     }
 }
diff --git 
a/regression-test/suites/nereids_p0/sql_functions/datetime_functions/test_date_or_datetime_computation_negative.groovy
 
b/regression-test/suites/nereids_p0/sql_functions/datetime_functions/test_date_or_datetime_computation_negative.groovy
index 1a5d3ca7a0a..282a28a903e 100644
--- 
a/regression-test/suites/nereids_p0/sql_functions/datetime_functions/test_date_or_datetime_computation_negative.groovy
+++ 
b/regression-test/suites/nereids_p0/sql_functions/datetime_functions/test_date_or_datetime_computation_negative.groovy
@@ -14,7 +14,6 @@
 // KIND, either express or implied.  See the License for the
 // specific language governing permissions and limitations
 // under the License.
-test_date_or_datetime_computation_negative
 suite("test_date_or_datetime_computation_negative") {
     sql """ CREATE TABLE IF NOT EXISTS 
test_date_or_datetime_computation_negative (
                 `row_id` LARGEINT NOT NULL,
diff --git 
a/regression-test/suites/nereids_rules_p0/mv/dimension/dimension_self_conn.groovy
 
b/regression-test/suites/nereids_rules_p0/mv/dimension/dimension_self_conn.groovy
index 776f1b996f1..178307f15f8 100644
--- 
a/regression-test/suites/nereids_rules_p0/mv/dimension/dimension_self_conn.groovy
+++ 
b/regression-test/suites/nereids_rules_p0/mv/dimension/dimension_self_conn.groovy
@@ -305,7 +305,7 @@ suite("partition_mv_rewrite_dimension_self_conn") {
 
     // agg
     // agg + without group by + with agg function
-    agg_mv_stmt = """
+    def agg_mv_stmt = """
         select t2.o_orderkey, 
         sum(t1.O_TOTALPRICE) as sum_total,
         max(t1.o_totalprice) as max_total, 
diff --git a/regression-test/suites/nereids_syntax_p0/group_bit.groovy 
b/regression-test/suites/nereids_syntax_p0/group_bit.groovy
index b0a96f80ddf..2244a5af4e0 100644
--- a/regression-test/suites/nereids_syntax_p0/group_bit.groovy
+++ b/regression-test/suites/nereids_syntax_p0/group_bit.groovy
@@ -18,7 +18,7 @@
 suite("group_bit") {
     sql "SET enable_nereids_planner=true"
     sql "SET enable_fallback_to_original_planner=false"
-    table = "group_bit_and_or_xor"
+    def table = "group_bit_and_or_xor"
     sql """ CREATE TABLE if not exists ${table} (
         `k` int(11) NULL
         ) ENGINE=OLAP
diff --git a/regression-test/suites/nereids_syntax_p0/rollup/bitmap.groovy 
b/regression-test/suites/nereids_syntax_p0/rollup/bitmap.groovy
index 2407c70d8a4..77344c4fc86 100644
--- a/regression-test/suites/nereids_syntax_p0/rollup/bitmap.groovy
+++ b/regression-test/suites/nereids_syntax_p0/rollup/bitmap.groovy
@@ -37,7 +37,7 @@ suite("bitmap", "rollup") {
     sql """alter table test_materialized_view_bitmap1 modify column k1 set 
stats ('row_count'='2');"""
 
     sql "CREATE MATERIALIZED VIEW test_neg as select 
k1,bitmap_union(to_bitmap(k2)), bitmap_union(to_bitmap(k3)) FROM ${tbName1} 
GROUP BY k1;"
-    max_try_secs = 60
+    def max_try_secs = 60
     while (max_try_secs--) {
         String res = getJobState(tbName1)
         if (res == "FINISHED" || res == "CANCELLED") {
diff --git 
a/regression-test/suites/partition_p0/auto_partition/test_auto_partition_load.groovy
 
b/regression-test/suites/partition_p0/auto_partition/test_auto_partition_load.groovy
index d728ba9a145..0e0440dc89e 100644
--- 
a/regression-test/suites/partition_p0/auto_partition/test_auto_partition_load.groovy
+++ 
b/regression-test/suites/partition_p0/auto_partition/test_auto_partition_load.groovy
@@ -45,7 +45,7 @@ suite("test_auto_partition_load") {
     sql """ insert into load_table1 values (14, '2002-12-12 12:12:12.123', 
'2001-11-14 12:12:12.123456') """
 
     qt_select1 "select * from load_table1 order by k1"
-    result1 = sql "show partitions from load_table1"
+    def result1 = sql "show partitions from load_table1"
     logger.info("${result1}")
     assertEquals(result1.size(), 8)
 
@@ -79,7 +79,7 @@ suite("test_auto_partition_load") {
     sql """ insert into load_table2 values (14, '12', '2123-11-14 
12:12:12.123456') """
 
     qt_select2 "select * from load_table2 order by k1"
-    result2 = sql "show partitions from load_table2"
+    def result2 = sql "show partitions from load_table2"
     logger.info("${result2}")
     assertEquals(result2.size(), 11)
 }
diff --git 
a/regression-test/suites/partition_p0/auto_partition/test_auto_range_partition.groovy
 
b/regression-test/suites/partition_p0/auto_partition/test_auto_range_partition.groovy
index c08cb74e6c9..f0c9fbb4fcd 100644
--- 
a/regression-test/suites/partition_p0/auto_partition/test_auto_range_partition.groovy
+++ 
b/regression-test/suites/partition_p0/auto_partition/test_auto_range_partition.groovy
@@ -100,7 +100,7 @@ suite("test_auto_range_partition") {
     sql " insert into right_bound values ('9999-12-31 23:59:59'); "
     sql " insert into right_bound values ('9999-12-31 23:59:59.999999'); "
     qt_right_bound " select * from right_bound order by k0; "
-    result2 = sql "show partitions from right_bound"
+    def result2 = sql "show partitions from right_bound"
     logger.info("${result2}")
     assertEquals(result2.size(), 2)
 
@@ -119,7 +119,7 @@ suite("test_auto_range_partition") {
         );
     """
     sql " insert into week_range values (20240408), (20240409); "
-    def result2 = sql "show partitions from week_range"
+    result2 = sql "show partitions from week_range"
     logger.info("${result2}")
     assertEquals(result2.size(), 1)
 
diff --git 
a/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition_mod_distribution_key.groovy
 
b/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition_mod_distribution_key.groovy
index a2c1e3035a6..1079ca04f81 100644
--- 
a/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition_mod_distribution_key.groovy
+++ 
b/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition_mod_distribution_key.groovy
@@ -53,7 +53,7 @@ suite("test_dynamic_partition_mod_distribution_key") {
 
         sql """ ADMIN SET FRONTEND CONFIG 
('dynamic_partition_check_interval_seconds' = '1') """
         sql """ alter table ${tableName} set('dynamic_partition.end'='5') """
-        result = sql "show partitions from ${tableName}"
+        def result = sql "show partitions from ${tableName}"
         for (def retry = 0; retry < 10; retry++) { // at most wait 120s
             if (result.size() == 9) {
                 break;
diff --git 
a/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition_with_alter.groovy
 
b/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition_with_alter.groovy
index d488a53812f..4a5a6ae962e 100644
--- 
a/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition_with_alter.groovy
+++ 
b/regression-test/suites/partition_p0/dynamic_partition/test_dynamic_partition_with_alter.groovy
@@ -35,7 +35,7 @@ suite("test_dynamic_partition_with_alter") {
             "dynamic_partition.replication_allocation" = 
"tag.location.default: 1")
         """
 
-    result = sql "show partitions from ${tbl}"
+    def result = sql "show partitions from ${tbl}"
     assertEquals(7, result.size())
 
     try {
diff --git a/regression-test/suites/query_p0/sort/topn_2pr_rule.groovy 
b/regression-test/suites/query_p0/sort/topn_2pr_rule.groovy
index f5e20a97dd7..4b74cdad853 100644
--- a/regression-test/suites/query_p0/sort/topn_2pr_rule.groovy
+++ b/regression-test/suites/query_p0/sort/topn_2pr_rule.groovy
@@ -21,7 +21,7 @@ suite("topn_2pr_rule") {
 
     def create_table = { table_name, key_type="DUPLICATE" ->
         sql "DROP TABLE IF EXISTS ${table_name}"
-        value_type = "v string"
+        def value_type = "v string"
         if ("${key_type}" == "AGGREGATE") {
             value_type = "v string REPLACE_IF_NOT_NULL NULL" 
         }
diff --git a/regression-test/suites/query_profile/test_profile.groovy 
b/regression-test/suites/query_profile/test_profile.groovy
index 1b655558267..8a9df358275 100644
--- a/regression-test/suites/query_profile/test_profile.groovy
+++ b/regression-test/suites/query_profile/test_profile.groovy
@@ -35,7 +35,7 @@ def SUCCESS_MSG = 'success'
 def SUCCESS_CODE = 0
 def QUERY_NUM = 5
 
-random = new Random()
+def random = new Random()
 
 def getRandomNumber(int num){
     return random.nextInt(num)
diff --git 
a/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy 
b/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy
index 93e9443accd..68ee51c86ed 100644
--- a/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy
+++ b/regression-test/suites/schema_change_p0/test_agg_keys_schema_change.groovy
@@ -174,9 +174,9 @@ suite ("test_agg_keys_schema_change") {
         String[][] tablets = sql """ show tablets from ${tableName}; """
         for (String[] tablet in tablets) {
                 String tablet_id = tablet[0]
-                backend_id = tablet[2]
+                def backend_id = tablet[2]
                 logger.info("run compaction:" + tablet_id)
-                (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                def (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                 logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
         }
 
@@ -186,8 +186,8 @@ suite ("test_agg_keys_schema_change") {
                 do {
                     Thread.sleep(100)
                     String tablet_id = tablet[0]
-                    backend_id = tablet[2]
-                    (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                    def backend_id = tablet[2]
+                    def (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                     logger.info("Get compaction status: code=" + code + ", 
out=" + out + ", err=" + err)
                     assertEquals(code, 0)
                     def compactionStatus = parseJson(out.trim())
diff --git 
a/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy 
b/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy
index f7af7226982..4ed3e39cdbb 100644
--- a/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy
+++ b/regression-test/suites/schema_change_p0/test_agg_mv_schema_change.groovy
@@ -149,9 +149,9 @@ suite ("test_agg_mv_schema_change") {
         String[][] tablets = sql """ show tablets from ${tableName}; """
         for (String[] tablet in tablets) {
                 String tablet_id = tablet[0]
-                backend_id = tablet[2]
+                def backend_id = tablet[2]
                 logger.info("run compaction:" + tablet_id)
-                (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                def (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                 logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
                 //assertEquals(code, 0)
         }
@@ -162,8 +162,8 @@ suite ("test_agg_mv_schema_change") {
                 do {
                     Thread.sleep(100)
                     String tablet_id = tablet[0]
-                    backend_id = tablet[2]
-                    (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                    def backend_id = tablet[2]
+                    def (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                     logger.info("Get compaction status: code=" + code + ", 
out=" + out + ", err=" + err)
                     assertEquals(code, 0)
                     def compactionStatus = parseJson(out.trim())
diff --git 
a/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy 
b/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy
index 78578c7b522..a24268eea26 100644
--- 
a/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy
+++ 
b/regression-test/suites/schema_change_p0/test_agg_rollup_schema_change.groovy
@@ -109,7 +109,7 @@ suite ("test_agg_rollup_schema_change") {
             ALTER TABLE ${tableName} DROP COLUMN cost
             """
 
-        max_try_time = 3000
+        def max_try_time = 3000
         while (max_try_time--){
             String result = getJobState(tableName)
             if (result == "FINISHED") {
@@ -157,9 +157,9 @@ suite ("test_agg_rollup_schema_change") {
         String[][] tablets = sql """ show tablets from ${tableName}; """
         for (String[] tablet in tablets) {
                 String tablet_id = tablet[0]
-                backend_id = tablet[2]
+                def backend_id = tablet[2]
                 logger.info("run compaction:" + tablet_id)
-                (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                def (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                 logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
                 //assertEquals(code, 0)
         }
@@ -170,8 +170,8 @@ suite ("test_agg_rollup_schema_change") {
                 do {
                     Thread.sleep(100)
                     String tablet_id = tablet[0]
-                    backend_id = tablet[2]
-                    (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                    def backend_id = tablet[2]
+                    def (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                     logger.info("Get compaction status: code=" + code + ", 
out=" + out + ", err=" + err)
                     assertEquals(code, 0)
                     def compactionStatus = parseJson(out.trim())
diff --git 
a/regression-test/suites/schema_change_p0/test_agg_schema_key_change_modify1.groovy
 
b/regression-test/suites/schema_change_p0/test_agg_schema_key_change_modify1.groovy
index 78d203c4f73..f6d1fdb4222 100644
--- 
a/regression-test/suites/schema_change_p0/test_agg_schema_key_change_modify1.groovy
+++ 
b/regression-test/suites/schema_change_p0/test_agg_schema_key_change_modify1.groovy
@@ -84,7 +84,7 @@ suite("test_agg_schema_key_change_modify1","p0") {
              "               (789012345, 'Grace', 2123483141, 'Xian', 29, 0, 
13333333333, 'No. 222 Street, Xian', '2022-07-07 22:00:00');"
 
      //TODO Test the agg model by modify a key type from LARGEINT  to BOOLEAN
-     errorMessage = "errCode = 2, detailMessage = Can not change LARGEINT to 
BOOLEAN"
+     def errorMessage = "errCode = 2, detailMessage = Can not change LARGEINT 
to BOOLEAN"
      expectException({
           sql initTable
           sql initTableData
@@ -219,7 +219,7 @@ suite("test_agg_schema_key_change_modify1","p0") {
      }, insertSql, false, "${tbName1}")
 
      sql """ DROP TABLE IF EXISTS ${tbName2} """
-     initTable2 = " CREATE TABLE IF NOT EXISTS ${tbName2}\n" +
+     def initTable2 = " CREATE TABLE IF NOT EXISTS ${tbName2}\n" +
              "          (\n" +
              "              `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" +
              "              `username` VARCHAR(50) NOT NULL COMMENT 
\"用户昵称\",\n" +
@@ -237,7 +237,7 @@ suite("test_agg_schema_key_change_modify1","p0") {
              "          \"replication_allocation\" = \"tag.location.default: 
1\"\n" +
              "          );"
 
-     initTableData2 = "insert into ${tbName2} values(123456789, 'Alice', 
2147483641, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', 
'2022-01-01 10:00:00')," +
+     def initTableData2 = "insert into ${tbName2} values(123456789, 'Alice', 
2147483641, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', 
'2022-01-01 10:00:00')," +
              "               (234567890, 'Bob', 214748364, 'Shanghai', 30, 1, 
13998765432, 'No. 456 Street, Shanghai', '2022-02-02 12:00:00')," +
              "               (345678901, 'Carol', 2147483441, 'Guangzhou', 28, 
0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 14:00:00')," +
              "               (456789012, 'Dave', 2147483141, 'Shenzhen', 35, 
1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 16:00:00')," +
diff --git 
a/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy 
b/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy
index 477ea202e4c..211bdaefe3c 100644
--- a/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy
+++ b/regression-test/suites/schema_change_p0/test_agg_vals_schema_change.groovy
@@ -139,7 +139,7 @@ suite ("test_agg_vals_schema_change") {
         String[][] tablets = sql """ show tablets from ${tableName}; """
         for (String[] tablet in tablets) {
                 String tablet_id = tablet[0]
-                backend_id = tablet[2]
+                def backend_id = tablet[2]
                 logger.info("run compaction:" + tablet_id)
                 def (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                 logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
@@ -152,7 +152,7 @@ suite ("test_agg_vals_schema_change") {
                 do {
                     Thread.sleep(100)
                     String tablet_id = tablet[0]
-                    backend_id = tablet[2]
+                    def backend_id = tablet[2]
                     def (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                     logger.info("Get compaction status: code=" + code + ", 
out=" + out + ", err=" + err)
                     assertEquals(code, 0)
diff --git 
a/regression-test/suites/schema_change_p0/test_alter_uniq_null.groovy 
b/regression-test/suites/schema_change_p0/test_alter_uniq_null.groovy
index 5db97e55042..fc3fd7fa005 100644
--- a/regression-test/suites/schema_change_p0/test_alter_uniq_null.groovy
+++ b/regression-test/suites/schema_change_p0/test_alter_uniq_null.groovy
@@ -51,7 +51,7 @@ suite("test_alter_uniq_null") {
 
     sql """alter table ${tableName} modify column `v2` INT NULL"""
     sleep(10)
-    max_try_num = 1000
+    def max_try_num = 1000
     while (max_try_num--) {
         String res = getJobState(tableName)
         if (res == "FINISHED" || res == "CANCELLED") {
diff --git 
a/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy 
b/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy
index 52814b312f2..e4eaedb500e 100644
--- a/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy
+++ b/regression-test/suites/schema_change_p0/test_dup_keys_schema_change.groovy
@@ -143,9 +143,9 @@ suite ("test_dup_keys_schema_change") {
         String[][] tablets = sql """ show tablets from ${tableName}; """
         for (String[] tablet in tablets) {
                 String tablet_id = tablet[0]
-                backend_id = tablet[2]
+                def backend_id = tablet[2]
                 logger.info("run compaction:" + tablet_id)
-                (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                def (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                 logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
                 //assertEquals(code, 0)
         }
@@ -156,8 +156,8 @@ suite ("test_dup_keys_schema_change") {
                 do {
                     Thread.sleep(100)
                     String tablet_id = tablet[0]
-                    backend_id = tablet[2]
-                    (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                    def backend_id = tablet[2]
+                    def (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                     logger.info("Get compaction status: code=" + code + ", 
out=" + out + ", err=" + err)
                     assertEquals(code, 0)
                     def compactionStatus = parseJson(out.trim())
diff --git 
a/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy 
b/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy
index 6ce9812e130..34c3fd505e0 100644
--- a/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy
+++ b/regression-test/suites/schema_change_p0/test_dup_mv_schema_change.groovy
@@ -155,7 +155,7 @@ suite ("test_dup_mv_schema_change") {
                 String tablet_id = tablet[0]
                 backend_id = tablet[2]
                 logger.info("run compaction:" + tablet_id)
-                (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                def (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                 logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
                 //assertEquals(code, 0)
         }
@@ -167,7 +167,7 @@ suite ("test_dup_mv_schema_change") {
                     Thread.sleep(100)
                     String tablet_id = tablet[0]
                     backend_id = tablet[2]
-                    (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                    def (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                     logger.info("Get compaction status: code=" + code + ", 
out=" + out + ", err=" + err)
                     assertEquals(code, 0)
                     def compactionStatus = parseJson(out.trim())
diff --git 
a/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy 
b/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy
index ad29f7d2277..fa7a30ba570 100644
--- 
a/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy
+++ 
b/regression-test/suites/schema_change_p0/test_dup_rollup_schema_change.groovy
@@ -126,7 +126,7 @@ suite ("test_dup_rollup_schema_change") {
         sql """
             ALTER TABLE ${tableName} DROP COLUMN sex
             """
-        max_try_time = 3000
+        def max_try_time = 3000
         while (max_try_time--){
             String result = getJobState(tableName)
             if (result == "FINISHED") {
@@ -173,7 +173,7 @@ suite ("test_dup_rollup_schema_change") {
                 String tablet_id = tablet[0]
                 backend_id = tablet[2]
                 logger.info("run compaction:" + tablet_id)
-                (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                def (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                 logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
                 //assertEquals(code, 0)
         }
@@ -185,7 +185,7 @@ suite ("test_dup_rollup_schema_change") {
                     Thread.sleep(100)
                     String tablet_id = tablet[0]
                     backend_id = tablet[2]
-                    (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                    def (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                     logger.info("Get compaction status: code=" + code + ", 
out=" + out + ", err=" + err)
                     assertEquals(code, 0)
                     def compactionStatus = parseJson(out.trim())
diff --git 
a/regression-test/suites/schema_change_p0/test_dup_schema_key_change_modify1.groovy
 
b/regression-test/suites/schema_change_p0/test_dup_schema_key_change_modify1.groovy
index 74a5c1f86d8..4683b735faa 100644
--- 
a/regression-test/suites/schema_change_p0/test_dup_schema_key_change_modify1.groovy
+++ 
b/regression-test/suites/schema_change_p0/test_dup_schema_key_change_modify1.groovy
@@ -83,7 +83,7 @@ suite("test_dup_schema_key_change_modify1","p0") {
              "               (789012345, 'Grace', 2123483141, 'Xian', 29, 0, 
13333333333, 'No. 222 Street, Xian', '2022-07-07 22:00:00');"
 
      //TODO Test the dup model by modify a key type from LARGEINT  to BOOLEAN
-     errorMessage = "errCode = 2, detailMessage = Can not change LARGEINT to 
BOOLEAN"
+     def errorMessage = "errCode = 2, detailMessage = Can not change LARGEINT 
to BOOLEAN"
      expectException({
           sql initTable
           sql initTableData
@@ -218,7 +218,7 @@ suite("test_dup_schema_key_change_modify1","p0") {
      }, insertSql, false, "${tbName1}")
 
      sql """ DROP TABLE IF EXISTS ${tbName2} """
-     initTable2 = " CREATE TABLE IF NOT EXISTS ${tbName2}\n" +
+     def initTable2 = " CREATE TABLE IF NOT EXISTS ${tbName2}\n" +
              "          (\n" +
              "              `user_id` LARGEINT NOT NULL COMMENT \"用户id\",\n" +
              "              `username` VARCHAR(50) NOT NULL COMMENT 
\"用户昵称\",\n" +
@@ -236,7 +236,7 @@ suite("test_dup_schema_key_change_modify1","p0") {
              "          \"replication_allocation\" = \"tag.location.default: 
1\"\n" +
              "          );"
 
-     initTableData2 = "insert into ${tbName2} values(123456789, 'Alice', 
2147483641, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', 
'2022-01-01 10:00:00')," +
+     def initTableData2 = "insert into ${tbName2} values(123456789, 'Alice', 
2147483641, 'Beijing', 25, 0, 13812345678, 'No. 123 Street, Beijing', 
'2022-01-01 10:00:00')," +
              "               (234567890, 'Bob', 214748364, 'Shanghai', 30, 1, 
13998765432, 'No. 456 Street, Shanghai', '2022-02-02 12:00:00')," +
              "               (345678901, 'Carol', 2147483441, 'Guangzhou', 28, 
0, 13724681357, 'No. 789 Street, Guangzhou', '2022-03-03 14:00:00')," +
              "               (456789012, 'Dave', 2147483141, 'Shenzhen', 35, 
1, 13680864279, 'No. 987 Street, Shenzhen', '2022-04-04 16:00:00')," +
diff --git 
a/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy 
b/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy
index ca7c0f598d8..7eb43c4c44a 100644
--- a/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy
+++ b/regression-test/suites/schema_change_p0/test_dup_vals_schema_change.groovy
@@ -128,7 +128,7 @@ suite ("test_dup_vals_schema_change") {
                 String tablet_id = tablet[0]
                 backend_id = tablet[2]
                 logger.info("run compaction:" + tablet_id)
-                (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                def (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                 logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
                 //assertEquals(code, 0)
         }
@@ -140,7 +140,7 @@ suite ("test_dup_vals_schema_change") {
                     Thread.sleep(100)
                     String tablet_id = tablet[0]
                     backend_id = tablet[2]
-                    (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                    def (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                     logger.info("Get compaction status: code=" + code + ", 
out=" + out + ", err=" + err)
                     assertEquals(code, 0)
                     def compactionStatus = parseJson(out.trim())
diff --git 
a/regression-test/suites/schema_change_p0/test_enable_light_schema_change.groovy
 
b/regression-test/suites/schema_change_p0/test_enable_light_schema_change.groovy
index 0c09af5e5de..9e4ad09842d 100644
--- 
a/regression-test/suites/schema_change_p0/test_enable_light_schema_change.groovy
+++ 
b/regression-test/suites/schema_change_p0/test_enable_light_schema_change.groovy
@@ -76,7 +76,7 @@ suite("test_enable_light_schema_change", "p0") {
 
     sql """ alter table ${tableName1} order by (k1, k2, k4, k3) """
 
-    max_try_num = 60
+    def max_try_num = 60
     while (max_try_num--) {
         String res = getJobState(tableName1)
         if (res == "FINISHED" || res == "CANCELLED") {
diff --git 
a/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy 
b/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy
index e06c27b8abf..92f9f5267ba 100644
--- 
a/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy
+++ 
b/regression-test/suites/schema_change_p0/test_uniq_keys_schema_change.groovy
@@ -125,7 +125,7 @@ suite ("test_uniq_keys_schema_change") {
                 String tablet_id = tablet[0]
                 backend_id = tablet[2]
                 logger.info("run compaction:" + tablet_id)
-                (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                def (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                 logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
                 //assertEquals(code, 0)
         }
@@ -137,7 +137,7 @@ suite ("test_uniq_keys_schema_change") {
                     Thread.sleep(100)
                     String tablet_id = tablet[0]
                     backend_id = tablet[2]
-                    (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                    def (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                     logger.info("Get compaction status: code=" + code + ", 
out=" + out + ", err=" + err)
                     assertEquals(code, 0)
                     def compactionStatus = parseJson(out.trim())
diff --git 
a/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy 
b/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy
index 6167b757d09..fd564d117d7 100644
--- a/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy
+++ b/regression-test/suites/schema_change_p0/test_uniq_mv_schema_change.groovy
@@ -170,7 +170,7 @@ suite ("test_uniq_mv_schema_change") {
             String tablet_id = tablet[0]
             backend_id = tablet[2]
             logger.info("run compaction:" + tablet_id)
-            (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+            def (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
             logger.info("Run compaction: code=" + code + ", out=" + out + ", 
err=" + err)
             //assertEquals(code, 0)
     }
@@ -182,7 +182,7 @@ suite ("test_uniq_mv_schema_change") {
                 Thread.sleep(100)
                 String tablet_id = tablet[0]
                 backend_id = tablet[2]
-                (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                def (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                 logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
                 assertEquals(code, 0)
                 def compactionStatus = parseJson(out.trim())
diff --git 
a/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy 
b/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy
index 515d1ee9151..2cd17c6f5e5 100644
--- 
a/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy
+++ 
b/regression-test/suites/schema_change_p0/test_uniq_rollup_schema_change.groovy
@@ -178,7 +178,7 @@ suite ("test_uniq_rollup_schema_change") {
             String tablet_id = tablet[0]
             backend_id = tablet[2]
             logger.info("run compaction:" + tablet_id)
-            (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+            def (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
             logger.info("Run compaction: code=" + code + ", out=" + out + ", 
err=" + err)
             //assertEquals(code, 0)
     }
@@ -190,7 +190,7 @@ suite ("test_uniq_rollup_schema_change") {
                 Thread.sleep(100)
                 String tablet_id = tablet[0]
                 backend_id = tablet[2]
-                (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                def (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                 logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
                 assertEquals(code, 0)
                 def compactionStatus = parseJson(out.trim())
diff --git 
a/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy 
b/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy
index 78d97b539c3..631072a7f0b 100644
--- 
a/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy
+++ 
b/regression-test/suites/schema_change_p0/test_uniq_vals_schema_change.groovy
@@ -64,7 +64,7 @@ suite ("test_uniq_vals_schema_change") {
                 (2, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', '2020-01-03', 1, 32, 20)
             """
 
-        qt_sc"""
+        qt_sc """
                         select count(*) from ${tableName}
                         """
 
@@ -98,7 +98,7 @@ suite ("test_uniq_vals_schema_change") {
         sql """
             ALTER TABLE ${tableName} DROP COLUMN last_visit_date
             """
-        qt_sc = sql """ select * from ${tableName} where user_id = 3 """
+        qt_sc """ select * from ${tableName} where user_id = 3 """
 
         sql """ INSERT INTO ${tableName} VALUES
                 (4, '2017-10-01', 'Beijing', 10, 1, '2020-01-03', 
'2020-01-03', 1, 32, 20, 2)
@@ -132,7 +132,7 @@ suite ("test_uniq_vals_schema_change") {
                 String tablet_id = tablet[0]
                 backend_id = tablet[2]
                 logger.info("run compaction:" + tablet_id)
-                (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                def (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                 logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
                 //assertEquals(code, 0)
         }
@@ -144,7 +144,7 @@ suite ("test_uniq_vals_schema_change") {
                     Thread.sleep(100)
                     String tablet_id = tablet[0]
                     backend_id = tablet[2]
-                    (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                    def (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                     logger.info("Get compaction status: code=" + code + ", 
out=" + out + ", err=" + err)
                     assertEquals(code, 0)
                     def compactionStatus = parseJson(out.trim())
diff --git 
a/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy 
b/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy
index 38bd996e89d..77f85cf2c89 100644
--- a/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy
+++ b/regression-test/suites/schema_change_p0/test_varchar_schema_change.groovy
@@ -121,7 +121,7 @@ suite ("test_varchar_schema_change") {
                 String tablet_id = tablet[0]
                 backend_id = tablet[2]
                 logger.info("run compaction:" + tablet_id)
-                (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                def (code, out, err) = 
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                 logger.info("Run compaction: code=" + code + ", out=" + out + 
", err=" + err)
         }
 
@@ -132,7 +132,7 @@ suite ("test_varchar_schema_change") {
                     Thread.sleep(100)
                     String tablet_id = tablet[0]
                     backend_id = tablet[2]
-                    (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
+                    def (code, out, err) = 
be_get_compaction_status(backendId_to_backendIP.get(backend_id), 
backendId_to_backendHttpPort.get(backend_id), tablet_id)
                     logger.info("Get compaction status: code=" + code + ", 
out=" + out + ", err=" + err)
                     assertEquals(code, 0)
                     def compactionStatus = parseJson(out.trim())
diff --git a/regression-test/suites/show_p0/test_show_data.groovy 
b/regression-test/suites/show_p0/test_show_data.groovy
index 10714a542d4..20adc299d55 100644
--- a/regression-test/suites/show_p0/test_show_data.groovy
+++ b/regression-test/suites/show_p0/test_show_data.groovy
@@ -22,7 +22,7 @@ suite("test_show_data") {
 
     def jdbcUrlWithoutDbStr = 
(context.config.jdbcUrl).split(context.config.defaultDb)
     logger.info("jdbcUrlWithoutDbStr:${jdbcUrlWithoutDbStr}");
-    def result2 = connect(context.config.jdbcUser, password = 
context.config.jdbcPassword, url = jdbcUrlWithoutDbStr[0]) {
+    def result2 = connect(context.config.jdbcUser, 
context.config.jdbcPassword, jdbcUrlWithoutDbStr[0]) {
         sql """show data;"""
     }
 
diff --git a/regression-test/suites/statistics/analyze_stats.groovy 
b/regression-test/suites/statistics/analyze_stats.groovy
index c518a15d7dd..7f4b9abee47 100644
--- a/regression-test/suites/statistics/analyze_stats.groovy
+++ b/regression-test/suites/statistics/analyze_stats.groovy
@@ -364,7 +364,7 @@ suite("test_analyze") {
         ANALYZE TABLE analyze_partitioned_tbl_test WITH SYNC
     """
 
-    part_tbl_analyze_result = sql """
+    def part_tbl_analyze_result = sql """
         SHOW COLUMN CACHED STATS analyze_partitioned_tbl_test(col1)
     """
 
@@ -1013,7 +1013,7 @@ PARTITION `p599` VALUES IN (599)
     sql """ANALYZE TABLE test_600_partition_table_analyze WITH SYNC"""
 
     //  0:column_name | 1:index_name | 2:count | 3:ndv  | 4:num_null | 
5:data_size | 6:avg_size_byte | 7:min  | 8:max  | 9:method | 10:type | 
11:trigger | 12:query_times | 13:updated_time
-    id_col_stats = sql """
+    def id_col_stats = sql """
         SHOW COLUMN CACHED STATS test_600_partition_table_analyze(id);
     """
 
@@ -1237,7 +1237,7 @@ PARTITION `p599` VALUES IN (599)
 
     def check_column = { r, expected ->
         expected_result = convert_col_list_str_to_java_collection(expected)
-        actual_result = convert_col_list_str_to_java_collection(r[0][4])
+        def actual_result = convert_col_list_str_to_java_collection(r[0][4])
         System.out.println(expected_result)
         System.out.println(actual_result)
         return expected_result.containsAll(actual_result) && 
actual_result.containsAll(expected_result)
diff --git 
a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_delete_sign_with_conflict.groovy
 
b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_delete_sign_with_conflict.groovy
index 7e2cd9cdfe3..ec03ac336e7 100644
--- 
a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_delete_sign_with_conflict.groovy
+++ 
b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_delete_sign_with_conflict.groovy
@@ -65,9 +65,9 @@ suite("test_partial_update_delete_sign_with_conflict") {
         log.info("http_stream execute 2pc: ${command}")
 
         def process = command.execute()
-        code = process.waitFor()
-        out = process.text
-        json2pc = parseJson(out)
+        def code = process.waitFor()
+        def out = process.text
+        def json2pc = parseJson(out)
         log.info("http_stream 2pc result: ${out}".toString())
         assertEquals(code, 0)
         assertEquals("success", json2pc.status.toLowerCase())
diff --git 
a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_parallel.groovy
 
b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_parallel.groovy
index 5b69f9096f9..3a254ad8bf0 100644
--- 
a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_parallel.groovy
+++ 
b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_parallel.groovy
@@ -38,7 +38,7 @@ suite("test_primary_key_partial_update_parallel", "p0") {
         (4, "doris4", 4000, 423, 4),
         (3, "doris3", 3000, 323, 3);"""
 
-    t1 = Thread.startDaemon {
+    def t1 = Thread.startDaemon {
         streamLoad {
             table "${tableName}"
 
@@ -52,7 +52,7 @@ suite("test_primary_key_partial_update_parallel", "p0") {
         }
     }
 
-    t2 = Thread.startDaemon {
+    def t2 = Thread.startDaemon {
         streamLoad {
             table "${tableName}"
 
@@ -66,7 +66,7 @@ suite("test_primary_key_partial_update_parallel", "p0") {
         }
     }
 
-    t3 = Thread.startDaemon {
+    def t3 = Thread.startDaemon {
         streamLoad {
             table "${tableName}"
 
diff --git a/regression-test/suites/update/test_update_configs.groovy 
b/regression-test/suites/update/test_update_configs.groovy
index cbce43293c1..6032682103c 100644
--- a/regression-test/suites/update/test_update_configs.groovy
+++ b/regression-test/suites/update/test_update_configs.groovy
@@ -49,7 +49,7 @@ suite("test_update_configs", "p0") {
     (code, out, err) = show_be_config(beIp, bePort)
     logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err)
     assertEquals(code, 0)
-    configList2 = parseJson(out.trim())
+    def configList2 = parseJson(out.trim())
     assert configList instanceof List
     for (Object ele in (List) configList2) {
         assert ele instanceof List<String>
diff --git a/regression-test/suites/variant_p0/delete_update.groovy 
b/regression-test/suites/variant_p0/delete_update.groovy
index 2b126b4c3a6..85a6f3c9a26 100644
--- a/regression-test/suites/variant_p0/delete_update.groovy
+++ b/regression-test/suites/variant_p0/delete_update.groovy
@@ -72,19 +72,19 @@ suite("regression_test_variant_delete_and_update", 
"variant_type"){
     // delete & insert concurrently
     sql "set enable_unique_key_partial_update=true;"
     sql "sync"
-    t1 = Thread.startDaemon {
+    def t1 = Thread.startDaemon {
         for (int k = 1; k <= 60; k++) {
             int x = new Random().nextInt(61) % 10;
             sql """insert into ${table_name}(k,vs) values(${x}, '{"k${x}" : 
${x}}'),(${x+1}, '{"k${x+1}" : ${x+1}}'),(${x+2}, '{"k${x+2}" : 
${x+2}}'),(${x+3}, '{"k${x+3}" : ${x+3}}')"""
         } 
     }
-    t2 = Thread.startDaemon {
+    def t2 = Thread.startDaemon {
         for (int k = 1; k <= 60; k++) {
             int x = new Random().nextInt(61) % 10;
             sql """insert into ${table_name}(k,v) values(${x}, '{"k${x}" : 
${x}}'),(${x+1}, '{"k${x+1}" : ${x+1}}'),(${x+2}, '{"k${x+2}" : 
${x+2}}'),(${x+3}, '{"k${x+3}" : ${x+3}}')"""
         } 
     }
-    t3 = Thread.startDaemon {
+    def t3 = Thread.startDaemon {
         for (int k = 1; k <= 60; k++) {
             int x = new Random().nextInt(61) % 10;
             sql """insert into ${table_name}(k,v) values(${x}, '{"k${x}" : 
${x}}'),(${x+1}, '{"k${x+1}" : ${x+1}}'),(${x+2}, '{"k${x+2}" : 
${x+2}}'),(${x+3}, '{"k${x+3}" : ${x+3}}')"""
diff --git 
a/regression-test/suites/variant_p0/schema_change/schema_change.groovy 
b/regression-test/suites/variant_p0/schema_change/schema_change.groovy
index 42cef32c8e5..877874b7e03 100644
--- a/regression-test/suites/variant_p0/schema_change/schema_change.groovy
+++ b/regression-test/suites/variant_p0/schema_change/schema_change.groovy
@@ -32,7 +32,7 @@ suite("regression_test_variant_schema_change", 
"variant_type"){
     def useTime = 0
     def wait_for_latest_op_on_table_finish = { tableName, OpTimeout ->
         for(int t = delta_time; t <= OpTimeout; t += delta_time){
-            alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${tableName}" ORDER BY CreateTime DESC LIMIT 1;"""
+            def alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${tableName}" ORDER BY CreateTime DESC LIMIT 1;"""
             alter_res = alter_res.toString()
             if(alter_res.contains("FINISHED")) {
                 sleep(3000) // wait change table state to normal
diff --git 
a/regression-test/suites/workload_manager_p0/test_resource_tag.groovy 
b/regression-test/suites/workload_manager_p0/test_resource_tag.groovy
index 529aba390b7..fa7ba680143 100644
--- a/regression-test/suites/workload_manager_p0/test_resource_tag.groovy
+++ b/regression-test/suites/workload_manager_p0/test_resource_tag.groovy
@@ -76,8 +76,8 @@ suite("test_resource_tag") {
         def test_failed_command = "curl --location-trusted -u test_rg: -H 
column_separator:| -H Transfer-Encoding:chunked -H columns:k1,k2  -T 
${context.dataPath}/skip_rg_test_table.csv 
http://${context.config.feHttpAddress}/api/${context.config.defaultDb}/skip_rg_test_table/_stream_load";
         log.info("stream load skip_rg_test_table failed test cmd: 
${test_failed_command}")
         def process = test_failed_command.execute()
-        code1 = process.waitFor()
-        out1 = process.text
+        def code1 = process.waitFor()
+        def out1 = process.text
         log.info("stream load skip_rg_test_table failed test result, 
${out1}".toString())
         assertTrue("${out1}".toString().contains("No backend load available") 
|| "${out1}".toString().contains("No available backends"))
 
@@ -85,9 +85,9 @@ suite("test_resource_tag") {
 
         def test_succ_command = "curl --location-trusted -u test_rg: -H 
column_separator:| -H Transfer-Encoding:chunked -H columns:k1,k2  -T 
${context.dataPath}/skip_rg_test_table.csv 
http://${context.config.feHttpAddress}/api/${context.config.defaultDb}/skip_rg_test_table/_stream_load";
         def process2 = test_succ_command.execute()
-        code2 = process2.waitFor()
-        out2 = process2.text
-        jsonRet = parseJson(out2)
+        def code2 = process2.waitFor()
+        def out2 = process2.text
+        def jsonRet = parseJson(out2)
         log.info("stream load skip_rg_test_table succ test result, 
${out2}".toString())
         assertFalse("${out2}".toString().contains("No backend load available"))
         assertTrue(jsonRet['Status'] == 'Success')


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to