This is an automated email from the ASF dual-hosted git repository.

lijibing pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 624bbff91c7 [regression](mtmv)Support show iceberg external table 
partition. Add more test case for iceberg mtmv. (#46257)
624bbff91c7 is described below

commit 624bbff91c756dbb8ade7fc9e13c2f664aad6553
Author: James <lijib...@selectdb.com>
AuthorDate: Mon Jan 6 15:54:27 2025 +0800

    [regression](mtmv)Support show iceberg external table partition. Add more 
test case for iceberg mtmv. (#46257)
    
    ### What problem does this PR solve?
    
    Support show iceberg external table partition.
    We convert iceberg partition to doris range partition in
    IcebergExternalTable. This PR add show partition function for
    IcebergExternalTable, this make it possible to add regression test.
    
    Issue Number: close #xxx
    
    Related PR: #xxx
    
    Problem Summary:
    
    ### Release note
    
    None
---
 .../create_preinstalled_scripts/iceberg/run08.sql  | 106 +++++++++++++++++++++
 .../apache/doris/analysis/ShowPartitionsStmt.java  |  18 +++-
 .../datasource/iceberg/IcebergExternalTable.java   |   2 +-
 .../java/org/apache/doris/qe/ShowExecutor.java     |  40 ++++++++
 regression-test/data/mtmv_p0/test_iceberg_mtmv.out |  19 ++++
 .../suites/mtmv_p0/test_iceberg_mtmv.groovy        |  67 ++++++++++---
 6 files changed, 237 insertions(+), 15 deletions(-)

diff --git 
a/docker/thirdparties/docker-compose/iceberg/scripts/create_preinstalled_scripts/iceberg/run08.sql
 
b/docker/thirdparties/docker-compose/iceberg/scripts/create_preinstalled_scripts/iceberg/run08.sql
new file mode 100644
index 00000000000..aa573e0af1e
--- /dev/null
+++ 
b/docker/thirdparties/docker-compose/iceberg/scripts/create_preinstalled_scripts/iceberg/run08.sql
@@ -0,0 +1,106 @@
+
+use demo.test_db;
+CREATE TABLE no_partition (
+    id INT,
+    name STRING,
+    create_date DATE
+) USING iceberg;
+INSERT INTO no_partition VALUES(1, 'Alice', DATE '2023-12-01'),(2, 'Bob', DATE 
'2023-12-02');
+
+CREATE TABLE not_support_trans (
+    id INT,
+    name STRING,
+    create_date DATE
+) USING iceberg
+PARTITIONED BY (bucket(10, create_date));
+INSERT INTO not_support_trans VALUES(1, 'Alice', DATE '2023-12-01'),(2, 'Bob', 
DATE '2023-12-02');
+
+CREATE TABLE add_partition1 (
+    id INT,
+    name STRING,
+    create_date DATE
+) USING iceberg;
+INSERT INTO add_partition1 VALUES(1, 'Alice', DATE '2023-12-01'),(2, 'Bob', 
DATE '2023-12-02');
+ALTER TABLE add_partition1 ADD PARTITION FIELD month(create_date);
+INSERT INTO add_partition1 VALUES(3, 'Lara', DATE '2023-12-03');
+
+CREATE TABLE add_partition2 (
+    id INT,
+    name STRING,
+    create_date1 DATE,
+    create_date2 DATE
+) USING iceberg
+PARTITIONED BY (month(create_date1));
+INSERT INTO add_partition2 VALUES(1, 'Alice', DATE '2023-12-01', DATE 
'2023-12-01'),(2, 'Bob', DATE '2023-12-02', DATE '2023-12-02');
+ALTER TABLE add_partition2 ADD PARTITION FIELD year(create_date2);
+INSERT INTO add_partition2 VALUES(3, 'Lara', DATE '2023-12-03', DATE 
'2023-12-03');
+
+CREATE TABLE drop_partition1 (
+    id INT,
+    name STRING,
+    create_date DATE
+) USING iceberg
+PARTITIONED BY (month(create_date));
+INSERT INTO drop_partition1 VALUES(1, 'Alice', DATE '2023-12-01'),(2, 'Bob', 
DATE '2023-12-02');
+ALTER TABLE drop_partition1 DROP PARTITION FIELD month(create_date);
+
+CREATE TABLE drop_partition2 (
+    id INT,
+    name STRING,
+    create_date1 DATE,
+    create_date2 DATE
+) USING iceberg
+PARTITIONED BY (month(create_date1), year(create_date2));
+INSERT INTO drop_partition2 VALUES(1, 'Alice', DATE '2023-12-01', DATE 
'2023-12-01'),(2, 'Bob', DATE '2023-12-02', DATE '2023-12-02');
+ALTER TABLE drop_partition2 DROP PARTITION FIELD year(create_date2);
+INSERT INTO drop_partition2 VALUES(3, 'Lara', DATE '2023-12-03', DATE 
'2023-12-03');
+
+CREATE TABLE replace_partition1 (
+    id INT,
+    name STRING,
+    create_date1 DATE,
+    create_date2 DATE
+) USING iceberg
+PARTITIONED BY (month(create_date1));
+INSERT INTO replace_partition1 VALUES(1, 'Alice', DATE '2023-12-01', DATE 
'2023-12-01'),(2, 'Bob', DATE '2023-12-02', DATE '2023-12-02');
+ALTER TABLE replace_partition1 REPLACE PARTITION FIELD month(create_date1) 
WITH year(create_date2);
+INSERT INTO replace_partition1 VALUES(3, 'Lara', DATE '2023-12-03', DATE 
'2023-12-03');
+
+CREATE TABLE replace_partition2(
+  ts TIMESTAMP COMMENT 'ts',
+  value INT COMMENT 'col1')
+USING iceberg
+PARTITIONED BY (month(ts));
+insert into replace_partition2 values (to_timestamp('2024-10-26 11:02:03', 
'yyyy-MM-dd HH:mm:ss'), 1), (to_timestamp('2024-11-27 21:02:03', 'yyyy-MM-dd 
HH:mm:ss'), 2);
+ALTER TABLE replace_partition2 REPLACE PARTITION FIELD ts_month WITH day(ts);
+insert into replace_partition2 values (to_timestamp('2024-12-03 14:02:03', 
'yyyy-MM-dd HH:mm:ss'), 3);
+
+CREATE TABLE replace_partition3(
+  ts TIMESTAMP COMMENT 'ts',
+  value INT COMMENT 'col1')
+USING iceberg
+PARTITIONED BY (month(ts));
+insert into replace_partition3 values (to_timestamp('2024-11-26 11:02:03', 
'yyyy-MM-dd HH:mm:ss'), 1); 
+ALTER TABLE replace_partition3 REPLACE PARTITION FIELD month(ts) WITH day(ts);
+insert into replace_partition3 values (to_timestamp('2024-11-02 21:02:03', 
'yyyy-MM-dd HH:mm:ss'), 2), (to_timestamp('2024-11-03 11:02:03', 'yyyy-MM-dd 
HH:mm:ss'), 3), (to_timestamp('2024-12-02 19:02:03', 'yyyy-MM-dd HH:mm:ss'), 4);
+
+CREATE TABLE replace_partition4(
+  ts TIMESTAMP COMMENT 'ts',
+  value INT COMMENT 'col1')
+USING iceberg
+PARTITIONED BY (month(ts));
+insert into replace_partition4 values (to_timestamp('2024-10-26 11:02:03', 
'yyyy-MM-dd HH:mm:ss'), 1), (to_timestamp('2024-11-26 21:02:03', 'yyyy-MM-dd 
HH:mm:ss'), 2);
+ALTER TABLE replace_partition4 REPLACE PARTITION FIELD month(ts) WITH day(ts);
+insert into replace_partition4 values (to_timestamp('2024-11-02 13:02:03', 
'yyyy-MM-dd HH:mm:ss'), 3), (to_timestamp('2024-11-03 10:02:03', 'yyyy-MM-dd 
HH:mm:ss'), 4);
+
+CREATE TABLE replace_partition5(
+  ts TIMESTAMP COMMENT 'ts',
+  value INT COMMENT 'col1')
+USING iceberg
+PARTITIONED BY (month(ts));
+insert into replace_partition5 values (to_timestamp('2024-10-26 11:02:03', 
'yyyy-MM-dd HH:mm:ss'), 1), (to_timestamp('2024-11-26 13:02:03', 'yyyy-MM-dd 
HH:mm:ss'), 2);
+ALTER TABLE replace_partition5 REPLACE PARTITION FIELD month(ts) WITH day(ts);
+insert into replace_partition5 values (to_timestamp('2024-10-12 09:02:03', 
'yyyy-MM-dd HH:mm:ss'), 3), (to_timestamp('2024-12-21 21:02:03', 'yyyy-MM-dd 
HH:mm:ss'), 4);
+ALTER TABLE replace_partition5 REPLACE PARTITION FIELD day(ts) WITH hour(ts);
+insert into replace_partition5 values (to_timestamp('2024-12-21 11:02:03', 
'yyyy-MM-dd HH:mm:ss'), 5);
+insert into replace_partition5 values (to_timestamp('2025-01-01 11:02:03', 
'yyyy-MM-dd HH:mm:ss'), 6);
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowPartitionsStmt.java 
b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowPartitionsStmt.java
index 0be41ef60fa..14a00184c75 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowPartitionsStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/ShowPartitionsStmt.java
@@ -37,6 +37,8 @@ import org.apache.doris.common.util.OrderByPair;
 import org.apache.doris.datasource.CatalogIf;
 import org.apache.doris.datasource.hive.HMSExternalCatalog;
 import org.apache.doris.datasource.hive.HMSExternalTable;
+import org.apache.doris.datasource.iceberg.IcebergExternalCatalog;
+import org.apache.doris.datasource.iceberg.IcebergExternalTable;
 import org.apache.doris.datasource.maxcompute.MaxComputeExternalCatalog;
 import org.apache.doris.datasource.maxcompute.MaxComputeExternalTable;
 import org.apache.doris.mysql.privilege.PrivPredicate;
@@ -128,7 +130,8 @@ public class ShowPartitionsStmt extends ShowStmt implements 
NotFallbackInParser
 
         DatabaseIf db = catalog.getDbOrAnalysisException(dbName);
         TableIf table = db.getTableOrMetaException(tblName, 
Table.TableType.OLAP,
-                    TableType.HMS_EXTERNAL_TABLE, 
TableType.MAX_COMPUTE_EXTERNAL_TABLE);
+                    TableType.HMS_EXTERNAL_TABLE, 
TableType.MAX_COMPUTE_EXTERNAL_TABLE,
+                    TableType.ICEBERG_EXTERNAL_TABLE);
 
         if (table instanceof HMSExternalTable) {
             if (((HMSExternalTable) table).isView()) {
@@ -147,6 +150,13 @@ public class ShowPartitionsStmt extends ShowStmt 
implements NotFallbackInParser
             return;
         }
 
+        if (table instanceof IcebergExternalTable) {
+            if (!((IcebergExternalTable) table).isValidRelatedTable()) {
+                throw new AnalysisException("Table " + tblName + " is not a 
supported partition table");
+            }
+            return;
+        }
+
         table.readLock();
         try {
             // build proc path
@@ -180,7 +190,7 @@ public class ShowPartitionsStmt extends ShowStmt implements 
NotFallbackInParser
 
         // disallow unsupported catalog
         if (!(catalog.isInternalCatalog() || catalog instanceof 
HMSExternalCatalog
-                || catalog instanceof MaxComputeExternalCatalog)) {
+                || catalog instanceof MaxComputeExternalCatalog || catalog 
instanceof IcebergExternalCatalog)) {
             throw new AnalysisException(String.format("Catalog of type '%s' is 
not allowed in ShowPartitionsStmt",
                 catalog.getType()));
         }
@@ -287,6 +297,10 @@ public class ShowPartitionsStmt extends ShowStmt 
implements NotFallbackInParser
             for (String col : result.getColumnNames()) {
                 builder.addColumn(new Column(col, 
ScalarType.createVarchar(30)));
             }
+        } else if (catalog instanceof IcebergExternalCatalog) {
+            builder.addColumn(new Column("Partition", 
ScalarType.createVarchar(60)));
+            builder.addColumn(new Column("Lower Bound", 
ScalarType.createVarchar(100)));
+            builder.addColumn(new Column("Upper Bound", 
ScalarType.createVarchar(100)));
         } else {
             builder.addColumn(new Column("Partition", 
ScalarType.createVarchar(60)));
         }
diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalTable.java
 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalTable.java
index 713ec94fd1b..2feab480d7e 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalTable.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalTable.java
@@ -556,7 +556,7 @@ public class IcebergExternalTable extends ExternalTable 
implements MTMVRelatedTa
         this.isValidRelatedTableCached = isCached;
     }
 
-    private IcebergSnapshotCacheValue 
getOrFetchSnapshotCacheValue(Optional<MvccSnapshot> snapshot) {
+    public IcebergSnapshotCacheValue 
getOrFetchSnapshotCacheValue(Optional<MvccSnapshot> snapshot) {
         if (snapshot.isPresent()) {
             return ((IcebergMvccSnapshot) 
snapshot.get()).getSnapshotCacheValue();
         } else {
diff --git a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java 
b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java
index babfe4e2265..e453119d6a1 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/qe/ShowExecutor.java
@@ -140,6 +140,8 @@ import org.apache.doris.catalog.MaterializedIndexMeta;
 import org.apache.doris.catalog.MetadataViewer;
 import org.apache.doris.catalog.OlapTable;
 import org.apache.doris.catalog.Partition;
+import org.apache.doris.catalog.PartitionItem;
+import org.apache.doris.catalog.PartitionKey;
 import org.apache.doris.catalog.PrimitiveType;
 import org.apache.doris.catalog.Replica;
 import org.apache.doris.catalog.ReplicaAllocation;
@@ -202,6 +204,7 @@ import org.apache.doris.datasource.hive.HMSExternalTable;
 import org.apache.doris.datasource.hive.HiveMetaStoreClientHelper;
 import org.apache.doris.datasource.iceberg.IcebergExternalCatalog;
 import org.apache.doris.datasource.iceberg.IcebergExternalDatabase;
+import org.apache.doris.datasource.iceberg.IcebergExternalTable;
 import org.apache.doris.datasource.maxcompute.MaxComputeExternalCatalog;
 import org.apache.doris.job.manager.JobManager;
 import org.apache.doris.load.DeleteHandler;
@@ -254,6 +257,7 @@ import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
 import com.google.common.collect.ImmutableSetMultimap;
 import com.google.common.collect.Lists;
+import com.google.common.collect.Range;
 import com.google.common.collect.Sets;
 import com.google.gson.GsonBuilder;
 import org.apache.commons.collections.CollectionUtils;
@@ -1898,6 +1902,8 @@ public class ShowExecutor {
             resultSet = new ShowResultSet(showStmt.getMetaData(), rows);
         } else if (showStmt.getCatalog() instanceof MaxComputeExternalCatalog) 
{
             handleShowMaxComputeTablePartitions(showStmt);
+        } else if (showStmt.getCatalog() instanceof IcebergExternalCatalog) {
+            handleShowIcebergTablePartitions(showStmt);
         } else {
             handleShowHMSTablePartitions(showStmt);
         }
@@ -1981,6 +1987,40 @@ public class ShowExecutor {
         resultSet = new ShowResultSet(showStmt.getMetaData(), rows);
     }
 
+    private void handleShowIcebergTablePartitions(ShowPartitionsStmt showStmt) 
{
+        IcebergExternalCatalog catalog = (IcebergExternalCatalog) 
showStmt.getCatalog();
+        String db = showStmt.getTableName().getDb();
+        String tbl = showStmt.getTableName().getTbl();
+        IcebergExternalTable icebergTable = (IcebergExternalTable) 
catalog.getDb(db).get().getTable(tbl).get();
+        LimitElement limit = showStmt.getLimitElement();
+        List<OrderByPair> orderByPairs = showStmt.getOrderByPairs();
+        Map<String, PartitionItem> partitions = 
icebergTable.getAndCopyPartitionItems(Optional.empty());
+        List<List<String>> rows = new ArrayList<>();
+        for (Map.Entry<String, PartitionItem> entry : partitions.entrySet()) {
+            List<String> row = new ArrayList<>();
+            Range<PartitionKey> items = entry.getValue().getItems();
+            row.add(entry.getKey());
+            row.add(items.lowerEndpoint().toString());
+            row.add(items.upperEndpoint().toString());
+            rows.add(row);
+        }
+        // sort by partition name
+        if (orderByPairs != null && orderByPairs.get(0).isDesc()) {
+            rows.sort(Comparator.comparing(x -> x.get(0), 
Comparator.reverseOrder()));
+        } else {
+            rows.sort(Comparator.comparing(x -> x.get(0)));
+        }
+        if (limit != null && limit.hasLimit()) {
+            int beginIndex = (int) limit.getOffset();
+            int endIndex = (int) (beginIndex + limit.getLimit());
+            if (endIndex > rows.size()) {
+                endIndex = rows.size();
+            }
+            rows = rows.subList(beginIndex, endIndex);
+        }
+        resultSet = new ShowResultSet(showStmt.getMetaData(), rows);
+    }
+
     private void handleShowTablet() throws AnalysisException {
         ShowTabletStmt showStmt = (ShowTabletStmt) stmt;
         List<List<String>> rows = Lists.newArrayList();
diff --git a/regression-test/data/mtmv_p0/test_iceberg_mtmv.out 
b/regression-test/data/mtmv_p0/test_iceberg_mtmv.out
index 483ac0957e6..433cc85d332 100644
--- a/regression-test/data/mtmv_p0/test_iceberg_mtmv.out
+++ b/regression-test/data/mtmv_p0/test_iceberg_mtmv.out
@@ -118,3 +118,22 @@
 2024-01-01T00:00       4
 2024-01-02T00:00       3
 
+-- !evolution2 --
+ts_day=20060   types: [DATETIMEV2]; keys: [2024-12-03 00:00:00];       types: 
[DATETIMEV2]; keys: [2024-12-04 00:00:00]; 
+ts_month=657   types: [DATETIMEV2]; keys: [2024-10-01 00:00:00];       types: 
[DATETIMEV2]; keys: [2024-11-01 00:00:00]; 
+ts_month=658   types: [DATETIMEV2]; keys: [2024-11-01 00:00:00];       types: 
[DATETIMEV2]; keys: [2024-12-01 00:00:00]; 
+
+-- !evolution3 --
+ts_day=20059   types: [DATETIMEV2]; keys: [2024-12-02 00:00:00];       types: 
[DATETIMEV2]; keys: [2024-12-03 00:00:00]; 
+ts_month=658   types: [DATETIMEV2]; keys: [2024-11-01 00:00:00];       types: 
[DATETIMEV2]; keys: [2024-12-01 00:00:00]; 
+
+-- !evolution4 --
+ts_month=657   types: [DATETIMEV2]; keys: [2024-10-01 00:00:00];       types: 
[DATETIMEV2]; keys: [2024-11-01 00:00:00]; 
+ts_month=658   types: [DATETIMEV2]; keys: [2024-11-01 00:00:00];       types: 
[DATETIMEV2]; keys: [2024-12-01 00:00:00]; 
+
+-- !evolution5 --
+ts_day=20078   types: [DATETIMEV2]; keys: [2024-12-21 00:00:00];       types: 
[DATETIMEV2]; keys: [2024-12-22 00:00:00]; 
+ts_hour=482139 types: [DATETIMEV2]; keys: [2025-01-01 03:00:00];       types: 
[DATETIMEV2]; keys: [2025-01-01 04:00:00]; 
+ts_month=657   types: [DATETIMEV2]; keys: [2024-10-01 00:00:00];       types: 
[DATETIMEV2]; keys: [2024-11-01 00:00:00]; 
+ts_month=658   types: [DATETIMEV2]; keys: [2024-11-01 00:00:00];       types: 
[DATETIMEV2]; keys: [2024-12-01 00:00:00]; 
+
diff --git a/regression-test/suites/mtmv_p0/test_iceberg_mtmv.groovy 
b/regression-test/suites/mtmv_p0/test_iceberg_mtmv.groovy
index aee80d8d169..36c0d3f120e 100644
--- a/regression-test/suites/mtmv_p0/test_iceberg_mtmv.groovy
+++ b/regression-test/suites/mtmv_p0/test_iceberg_mtmv.groovy
@@ -69,12 +69,6 @@ suite("test_iceberg_mtmv", 
"p0,external,iceberg,external_docker,external_docker_
     // Test partition refresh.
     // Use hms catalog to avoid rest catalog fail to write caused by sqlite 
database file locked.
     if (enabled != null && enabled.equalsIgnoreCase("true")) {
-        String hivePrefix = "hive2";
-        String hms_port = context.config.otherConfigs.get(hivePrefix + 
"HmsPort")
-        String hdfs_port = context.config.otherConfigs.get(hivePrefix + 
"HdfsPort")
-        String default_fs = "hdfs://${externalEnvIp}:${hdfs_port}"
-        String warehouse = "${default_fs}/warehouse"
-
         String catalog_name = "iceberg_mtmv_catalog_hms";
         String mvUnpartition = "test_iceberg_unpartition"
         String mvName1 = "test_iceberg_mtmv_ts"
@@ -85,13 +79,14 @@ suite("test_iceberg_mtmv", 
"p0,external,iceberg,external_docker,external_docker_
         String icebergTable2 = "dtable"
         String icebergTable3 = "union_test"
         sql """drop catalog if exists ${catalog_name} """
-        sql """create catalog if not exists ${catalog_name} properties (
+        sql """CREATE CATALOG ${catalog_name} PROPERTIES (
             'type'='iceberg',
-            'iceberg.catalog.type'='hms',
-            'hive.metastore.uris' = 'thrift://${externalEnvIp}:${hms_port}',
-            'fs.defaultFS' = '${default_fs}',
-            'warehouse' = '${warehouse}',
-            'use_meta_cache' = 'true'
+            'iceberg.catalog.type'='rest',
+            'uri' = 'http://${externalEnvIp}:${rest_port}',
+            "s3.access_key" = "admin",
+            "s3.secret_key" = "password",
+            "s3.endpoint" = "http://${externalEnvIp}:${minio_port}";,
+            "s3.region" = "us-east-1"
         );"""
 
         sql """switch internal"""
@@ -266,6 +261,54 @@ suite("test_iceberg_mtmv", 
"p0,external,iceberg,external_docker,external_docker_
         sql """drop materialized view if exists ${mvName};"""
         sql """drop table if exists 
${catalog_name}.${icebergDb}.${icebergTable3}"""
 
+        sql """use ${catalog_name}.test_db"""
+        qt_evolution2 "show partitions from replace_partition2"
+        qt_evolution3 "show partitions from replace_partition3"
+        qt_evolution4 "show partitions from replace_partition4"
+        qt_evolution5 "show partitions from replace_partition5"
+
+        test {
+            sql "show partitions from replace_partition1"
+            // check exception message contains
+            exception "is not a supported partition table"
+        }
+
+        test {
+            sql "show partitions from no_partition"
+            // check exception message contains
+            exception "is not a supported partition table"
+        }
+
+        test {
+            sql "show partitions from not_support_trans"
+            // check exception message contains
+            exception "is not a supported partition table"
+        }
+
+        test {
+            sql "show partitions from drop_partition1"
+            // check exception message contains
+            exception "is not a supported partition table"
+        }
+
+        test {
+            sql "show partitions from drop_partition2"
+            // check exception message contains
+            exception "is not a supported partition table"
+        }
+
+        test {
+            sql "show partitions from add_partition1"
+            // check exception message contains
+            exception "is not a supported partition table"
+        }
+
+        test {
+            sql "show partitions from add_partition2"
+            // check exception message contains
+            exception "is not a supported partition table"
+        }
+
         sql """ drop catalog if exists ${catalog_name} """
     }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to