This is an automated email from the ASF dual-hosted git repository.

morningman pushed a commit to branch branch-1.2-lts
in repository https://gitbox.apache.org/repos/asf/doris.git

commit 0ebabee9b76d5dc94a58ece02af8c3e27692702e
Author: Mingyu Chen <morning...@163.com>
AuthorDate: Fri Mar 10 10:19:37 2023 +0800

    [fix](backup) backup throw NPE when no partition in table (#17546)
    
    If table has no partition, backup will report error:
    
    2023-03-06 17:35:32,971 ERROR (backupHandler|24) [Daemon.run():118] daemon 
thread got exception. name: backupHandler
    java.util.NoSuchElementException: No value present
            at java.util.Optional.get(Optional.java:135) ~[?:1.8.0_152]
            at 
org.apache.doris.catalog.OlapTable.selectiveCopy(OlapTable.java:1259) 
~[doris-fe.jar:1.0-SNAPSHOT]
            at 
org.apache.doris.backup.BackupJob.prepareBackupMeta(BackupJob.java:505) 
~[doris-fe.jar:1.0-SNAPSHOT]
            at 
org.apache.doris.backup.BackupJob.prepareAndSendSnapshotTask(BackupJob.java:398)
 ~[doris-fe.jar:1.0-SNAPSHOT]
            at org.apache.doris.backup.BackupJob.run(BackupJob.java:301) 
~[doris-fe.jar:1.0-SNAPSHOT]
            at 
org.apache.doris.backup.BackupHandler.runAfterCatalogReady(BackupHandler.java:188)
 ~[doris-fe.jar:1.0-SNAPSHOT]
            at 
org.apache.doris.common.util.MasterDaemon.runOneCycle(MasterDaemon.java:58) 
~[doris-fe.jar:1.0-SNAPSHOT]
            at org.apache.doris.common.util.Daemon.run(Daemon.java:116) 
~[doris-fe.jar:1.0-SNAPSHOT]
---
 .../Backup-and-Restore/BACKUP.md                   | 59 ----------------------
 .../Backup-and-Restore/CREATE-REPOSITORY.md        | 18 ++++++-
 .../Backup-and-Restore/BACKUP.md                   | 59 ----------------------
 .../Backup-and-Restore/CREATE-REPOSITORY.md        | 18 ++++++-
 .../java/org/apache/doris/catalog/OlapTable.java   |  9 +++-
 .../doris/catalog/DynamicPartitionTableTest.java   | 37 ++++++++++++--
 6 files changed, 74 insertions(+), 126 deletions(-)

diff --git 
a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/BACKUP.md
 
b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/BACKUP.md
index b458dd6e9d..ea8fbdb7b9 100644
--- 
a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/BACKUP.md
+++ 
b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/BACKUP.md
@@ -88,68 +88,9 @@ TO example_repo
 EXCLUDE (example_tbl);
 ```
 
-4. Create a warehouse named hdfs_repo, rely on Baidu hdfs broker 
"hdfs_broker", the data root directory is: 
hdfs://hadoop-name-node:54310/path/to/repo/
-
-```
-CREATE REPOSITORY `hdfs_repo`
-WITH BROKER `hdfs_broker`
-ON LOCATION "hdfs://hadoop-name-node:54310/path/to/repo/"
-PROPERTIES
-(
-    "username" = "user",
-    "password" = "password"
-);
-```
-
-5. Create a repository named s3_repo to link cloud storage directly without 
going through the broker.
-
-```
-CREATE REPOSITORY `s3_repo`
-WITH S3
-ON LOCATION "s3://s3-repo"
-PROPERTIES
-(
-    "AWS_ENDPOINT" = "http://s3-REGION.amazonaws.com";,
-    "AWS_ACCESS_KEY" = "AWS_ACCESS_KEY",
-    "AWS_SECRET_KEY"="AWS_SECRET_KEY",
-    "AWS_REGION" = "REGION"
-);
-```
-
-6. Create a repository named hdfs_repo to link HDFS directly without going 
through the broker.
-
-```
-CREATE REPOSITORY `hdfs_repo`
-WITH hdfs
-ON LOCATION "hdfs://hadoop-name-node:54310/path/to/repo/"
-PROPERTIES
-(
-    "fs.defaultFS"="hdfs://hadoop-name-node:54310",
-    "hadoop.username"="user"
-);
-```
-
-7. Create a repository named minio_repo to link minio storage directly through 
the s3 protocol.
-
-```
-CREATE REPOSITORY `minio_repo`
-WITH S3
-ON LOCATION "s3://minio_repo"
-PROPERTIES
-(
-    "AWS_ENDPOINT" = "http://minio.com";,
-    "AWS_ACCESS_KEY" = "MINIO_USER",
-    "AWS_SECRET_KEY"="MINIO_PASSWORD",
-    "AWS_REGION" = "REGION",
-    "use_path_style" = "true"
-);
-```
-
 ### Keywords
 
-```text
 BACKUP
-```
 
 ### Best Practice
 
diff --git 
a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CREATE-REPOSITORY.md
 
b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CREATE-REPOSITORY.md
index f191be3157..4013e4ddce 100644
--- 
a/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CREATE-REPOSITORY.md
+++ 
b/docs/en/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CREATE-REPOSITORY.md
@@ -139,9 +139,10 @@ PROPERTIES
 );
 ```
 
-<version since="1.2"></version>
 7. Create a repository named minio_repo via temporary security credentials.
 
+<version since="1.2"></version>
+
 ```
 CREATE REPOSITORY `minio_repo`
 WITH S3
@@ -156,6 +157,21 @@ PROPERTIES
 )
 ```
 
+8. Create repository using Tencent COS
+
+```
+CREATE REPOSITORY `cos_repo`
+WITH S3
+ON LOCATION "s3://backet1/"
+PROPERTIES
+(
+    "AWS_ACCESS_KEY" = "ak",
+    "AWS_SECRET_KEY" = "sk",
+    "AWS_ENDPOINT" = "http://cos.ap-beijing.myqcloud.com";,
+    "AWS_REGION" = "ap-beijing"
+);
+```
+
 ### Keywords
 
     CREATE, REPOSITORY
diff --git 
a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/BACKUP.md
 
b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/BACKUP.md
index de766279dd..dc652e935d 100644
--- 
a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/BACKUP.md
+++ 
b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/BACKUP.md
@@ -90,65 +90,6 @@ TO example_repo
 EXCLUDE (example_tbl);
 ```
 
-4. 创建名为 hdfs_repo 的仓库,依赖 Baidu hdfs broker 
"hdfs_broker",数据根目录为:hdfs://hadoop-name-node:54310/path/to/repo/
-
-```
-CREATE REPOSITORY `hdfs_repo`
-WITH BROKER `hdfs_broker`
-ON LOCATION "hdfs://hadoop-name-node:54310/path/to/repo/"
-PROPERTIES
-(
-    "username" = "user",
-    "password" = "password"
-);
-```
-
-5. 创建名为 s3_repo 的仓库,直接链接云存储,而不通过broker.
-
-```
-CREATE REPOSITORY `s3_repo`
-WITH S3
-ON LOCATION "s3://s3-repo"
-PROPERTIES
-(
-    "AWS_ENDPOINT" = "http://s3-REGION.amazonaws.com";,
-    "AWS_ACCESS_KEY" = "AWS_ACCESS_KEY",
-    "AWS_SECRET_KEY"="AWS_SECRET_KEY",
-    "AWS_REGION" = "REGION"
-);
-```
-
-6. 创建名为 hdfs_repo 的仓库,直接链接HDFS,而不通过broker.
-
-```
-CREATE REPOSITORY `hdfs_repo`
-WITH hdfs
-ON LOCATION "hdfs://hadoop-name-node:54310/path/to/repo/"
-PROPERTIES
-(
-    "fs.defaultFS"="hdfs://hadoop-name-node:54310",
-    "hadoop.username"="user"
-);
-```
-
-7. 创建名为 minio_repo 的仓库,直接通过 s3 协议链接 minio.
-
-```
-CREATE REPOSITORY `minio_repo`
-WITH S3
-ON LOCATION "s3://minio_repo"
-PROPERTIES
-(
-    "AWS_ENDPOINT" = "http://minio.com";,
-    "AWS_ACCESS_KEY" = "MINIO_USER",
-    "AWS_SECRET_KEY"="MINIO_PASSWORD",
-    "AWS_REGION" = "REGION",
-    "use_path_style" = "true"
-);
-```
-
-
-
 ### Keywords
 
 ```text
diff --git 
a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CREATE-REPOSITORY.md
 
b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CREATE-REPOSITORY.md
index 710e9b46aa..9a5977d996 100644
--- 
a/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CREATE-REPOSITORY.md
+++ 
b/docs/zh-CN/docs/sql-manual/sql-reference/Data-Definition-Statements/Backup-and-Restore/CREATE-REPOSITORY.md
@@ -136,9 +136,11 @@ PROPERTIES
     "use_path_style" = "true"
 );
 ```
-<version since="1.2"></version>
+
 7. 使用临时秘钥创建名为 minio_repo 的仓库
 
+<version since="1.2"></version>
+
 ```
 CREATE REPOSITORY `minio_repo`
 WITH S3
@@ -153,6 +155,20 @@ PROPERTIES
 )
 ```
 
+8. 使用腾讯云 COS 创建仓库
+
+```
+CREATE REPOSITORY `cos_repo`
+WITH S3
+ON LOCATION "s3://backet1/"
+PROPERTIES
+(
+    "AWS_ACCESS_KEY" = "ak",
+    "AWS_SECRET_KEY" = "sk",
+    "AWS_ENDPOINT" = "http://cos.ap-beijing.myqcloud.com";,
+    "AWS_REGION" = "ap-beijing"
+);
+```
 
 ### Keywords
 
diff --git a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java 
b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
index bda32ce929..f5088db5ed 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/OlapTable.java
@@ -1304,8 +1304,13 @@ public class OlapTable extends Table {
         }
 
         // remove shadow index from copied table
-        List<MaterializedIndex> shadowIndex = 
copied.getPartitions().stream().findFirst()
-                .get().getMaterializedIndices(IndexExtState.SHADOW);
+        // NOTICE that there maybe not partition in table.
+        List<MaterializedIndex> shadowIndex = Lists.newArrayList();
+        Optional<Partition> firstPartition = 
copied.getPartitions().stream().findFirst();
+        if (firstPartition.isPresent()) {
+            shadowIndex = 
firstPartition.get().getMaterializedIndices(IndexExtState.SHADOW);
+        }
+
         for (MaterializedIndex deleteIndex : shadowIndex) {
             LOG.debug("copied table delete shadow index : {}", 
deleteIndex.getId());
             
copied.deleteIndexInfo(copied.getIndexNameById(deleteIndex.getId()));
diff --git 
a/fe/fe-core/src/test/java/org/apache/doris/catalog/DynamicPartitionTableTest.java
 
b/fe/fe-core/src/test/java/org/apache/doris/catalog/DynamicPartitionTableTest.java
index ef60733e73..3a522ad3a6 100644
--- 
a/fe/fe-core/src/test/java/org/apache/doris/catalog/DynamicPartitionTableTest.java
+++ 
b/fe/fe-core/src/test/java/org/apache/doris/catalog/DynamicPartitionTableTest.java
@@ -20,6 +20,7 @@ package org.apache.doris.catalog;
 import org.apache.doris.analysis.AlterTableStmt;
 import org.apache.doris.analysis.CreateDbStmt;
 import org.apache.doris.analysis.CreateTableStmt;
+import org.apache.doris.catalog.MaterializedIndex.IndexExtState;
 import org.apache.doris.clone.DynamicPartitionScheduler;
 import org.apache.doris.common.AnalysisException;
 import org.apache.doris.common.Config;
@@ -40,6 +41,7 @@ import org.junit.rules.ExpectedException;
 
 import java.text.SimpleDateFormat;
 import java.util.Calendar;
+import java.util.Collection;
 import java.util.Collections;
 import java.util.Date;
 import java.util.GregorianCalendar;
@@ -1537,10 +1539,37 @@ public class DynamicPartitionTableTest {
                 + ");";
         ExceptionChecker.expectThrowsWithMsg(DdlException.class,
                 "errCode = 2, detailMessage = Invalid \" 
dynamic_partition.reserved_history_periods \""
-                    + " value [2020-01-01,2020-03-01]. "
-                    + "It must be like "
-                    + "\"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\" while time_unit 
is DAY/WEEK/MONTH "
-                    + "or \"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd 
HH:mm:ss],[...,...]\" while time_unit is HOUR.",
+                        + " value [2020-01-01,2020-03-01]. "
+                        + "It must be like "
+                        + "\"[yyyy-MM-dd,yyyy-MM-dd],[...,...]\" while 
time_unit is DAY/WEEK/MONTH "
+                        + "or \"[yyyy-MM-dd HH:mm:ss,yyyy-MM-dd 
HH:mm:ss],[...,...]\" while time_unit is HOUR.",
                 () -> createTable(createOlapTblStmt4));
     }
+
+    @Test
+    public void testNoPartition() throws AnalysisException {
+        String createOlapTblStmt = "CREATE TABLE test.`no_partition` (\n"
+                + "  `k1` datetime NULL COMMENT \"\",\n"
+                + "  `k2` int NULL COMMENT \"\",\n"
+                + "  `k3` smallint NULL COMMENT \"\",\n"
+                + "  `v1` varchar(2048) NULL COMMENT \"\",\n"
+                + "  `v2` datetime NULL COMMENT \"\"\n"
+                + ") ENGINE=OLAP\n"
+                + "DUPLICATE KEY(`k1`, `k2`, `k3`)\n"
+                + "COMMENT \"OLAP\"\n"
+                + "PARTITION BY RANGE (k1)()\n"
+                + "DISTRIBUTED BY HASH(`k1`) BUCKETS 32\n"
+                + "PROPERTIES (\n"
+                + "\"replication_num\" = \"1\"\n"
+                + ");";
+        ExceptionChecker.expectThrowsNoException(() -> 
createTable(createOlapTblStmt));
+        OlapTable table = (OlapTable) Env.getCurrentInternalCatalog()
+                .getDbOrAnalysisException("default_cluster:test")
+                .getTableOrAnalysisException("no_partition");
+        Collection<Partition> partitions = table.getPartitions();
+        Assert.assertTrue(partitions.isEmpty());
+        OlapTable copiedTable = table.selectiveCopy(Collections.emptyList(), 
IndexExtState.VISIBLE, true);
+        partitions = copiedTable.getPartitions();
+        Assert.assertTrue(partitions.isEmpty());
+    }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to