This is an automated email from the ASF dual-hosted git repository.

github-bot pushed a commit to branch auto-pick-42337-branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/auto-pick-42337-branch-3.0 by 
this push:
     new 787bda77740 [improve](restore) Allow atomic_restore with different 
replica num (#42337)
787bda77740 is described below

commit 787bda77740cdf05a2c10c11dc4d4c86fb5b3912
Author: walter <w41te...@gmail.com>
AuthorDate: Mon Oct 28 12:07:38 2024 +0800

    [improve](restore) Allow atomic_restore with different replica num (#42337)
---
 .../java/org/apache/doris/backup/RestoreJob.java   |  24 ++---
 .../test_backup_restore_atomic_reserve_replica.out |  97 +++++++++++++++++
 .../test_backup_restore_atomic.groovy              |   3 +
 .../test_backup_restore_atomic_cancel.groovy       |   4 +-
 ...t_backup_restore_atomic_reserve_replica.groovy} | 120 +++++----------------
 .../test_backup_restore_atomic_with_view.groovy    |   3 +
 6 files changed, 142 insertions(+), 109 deletions(-)

diff --git a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java 
b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java
index c287ca78038..85bbd453c9a 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/backup/RestoreJob.java
@@ -1102,11 +1102,11 @@ public class RestoreJob extends AbstractJob implements 
GsonPostProcessable {
                     List<Tablet> localTablets = localIndex.getTablets();
                     List<Tablet> remoteTablets = index.getTablets();
                     if (localTablets.size() != remoteTablets.size()) {
-                        return new Status(ErrCode.COMMON_ERROR, String.format(
-                                "the size of local tablet %s is not equals to 
the remote %s, "
-                                + "is_atomic_restore=true, remote table=%d, 
remote index=%d, "
-                                + "local table=%d, local index=%d", 
localTablets.size(), remoteTablets.size(),
-                                remoteOlapTbl.getId(), index.getId(), 
localOlapTbl.getId(), localIndexId));
+                        LOG.warn("skip bind replicas because the size of local 
tablet {} is not equals to "
+                                + "the remote {}, is_atomic_restore=true, 
remote table={}, remote index={}, "
+                                + "local table={}, local index={}", 
localTablets.size(), remoteTablets.size(),
+                                remoteOlapTbl.getId(), index.getId(), 
localOlapTbl.getId(), localIndexId);
+                        continue;
                     }
                     for (int i = 0; i < remoteTablets.size(); i++) {
                         Tablet localTablet = localTablets.get(i);
@@ -1114,13 +1114,13 @@ public class RestoreJob extends AbstractJob implements 
GsonPostProcessable {
                         List<Replica> localReplicas = 
localTablet.getReplicas();
                         List<Replica> remoteReplicas = 
remoteTablet.getReplicas();
                         if (localReplicas.size() != remoteReplicas.size()) {
-                            return new Status(ErrCode.COMMON_ERROR, 
String.format(
-                                    "the size of local replicas %s is not 
equals to the remote %s, "
-                                    + "is_atomic_restore=true, remote 
table=%d, remote index=%d, "
-                                    + "local table=%d, local index=%d, local 
replicas=%d, remote replicas=%d",
-                                    localTablets.size(), remoteTablets.size(), 
remoteOlapTbl.getId(),
-                                    index.getId(), localOlapTbl.getId(), 
localIndexId, localReplicas.size(),
-                                    remoteReplicas.size()));
+                            LOG.warn("skip bind replicas because the size of 
local replicas {} is not equals to "
+                                    + "the remote {}, is_atomic_restore=true, 
remote table={}, remote index={}, "
+                                    + "local table={}, local index={}, local 
tablet={}, remote tablet={}",
+                                    localReplicas.size(), 
remoteReplicas.size(), remoteOlapTbl.getId(),
+                                    index.getId(), localOlapTbl.getId(), 
localIndexId, localTablet.getId(),
+                                    remoteTablet.getId());
+                            continue;
                         }
                         for (int j = 0; j < remoteReplicas.size(); j++) {
                             long backendId = 
localReplicas.get(j).getBackendIdWithoutException();
diff --git 
a/regression-test/data/backup_restore/test_backup_restore_atomic_reserve_replica.out
 
b/regression-test/data/backup_restore/test_backup_restore_atomic_reserve_replica.out
new file mode 100644
index 00000000000..31a6aa569fc
--- /dev/null
+++ 
b/regression-test/data/backup_restore/test_backup_restore_atomic_reserve_replica.out
@@ -0,0 +1,97 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sql --
+10     10
+20     20
+30     30
+40     40
+50     50
+60     60
+70     70
+80     80
+90     90
+100    100
+
+-- !sql --
+10     10
+20     20
+30     30
+40     40
+50     50
+60     60
+70     70
+80     80
+90     90
+100    100
+
+-- !sql --
+10     10
+20     20
+30     30
+40     40
+50     50
+60     60
+70     70
+80     80
+90     90
+100    100
+
+-- !sql --
+10     10
+20     20
+30     30
+40     40
+50     50
+60     60
+70     70
+80     80
+90     90
+100    100
+
+-- !sql --
+10     10
+20     20
+30     30
+40     40
+50     50
+60     60
+70     70
+80     80
+90     90
+100    100
+
+-- !sql --
+10     10
+20     20
+30     30
+40     40
+50     50
+60     60
+70     70
+80     80
+90     90
+100    100
+
+-- !sql --
+10     10
+20     20
+30     30
+40     40
+50     50
+60     60
+70     70
+80     80
+90     90
+100    100
+
+-- !sql --
+10     10
+20     20
+30     30
+40     40
+50     50
+60     60
+70     70
+80     80
+90     90
+100    100
+
diff --git 
a/regression-test/suites/backup_restore/test_backup_restore_atomic.groovy 
b/regression-test/suites/backup_restore/test_backup_restore_atomic.groovy
index 4b87340fb35..0158612aef1 100644
--- a/regression-test/suites/backup_restore/test_backup_restore_atomic.groovy
+++ b/regression-test/suites/backup_restore/test_backup_restore_atomic.groovy
@@ -142,6 +142,7 @@ suite("test_backup_restore_atomic", "backup_restore") {
 
     syncer.waitAllRestoreFinish(dbName)
 
+    sql "sync"
     for (def tableName in tables) {
         qt_sql "SELECT * FROM ${dbName}.${tableName} ORDER BY id"
     }
@@ -161,6 +162,7 @@ suite("test_backup_restore_atomic", "backup_restore") {
 
     syncer.waitAllRestoreFinish(dbName1)
 
+    sql "sync"
     qt_sql "SELECT * FROM ${dbName1}.${tableNamePrefix}_3 ORDER BY id"
 
     // add partition and insert some data.
@@ -196,6 +198,7 @@ suite("test_backup_restore_atomic", "backup_restore") {
 
     syncer.waitAllRestoreFinish(dbName1)
 
+    sql "sync"
     qt_sql "SELECT * FROM ${dbName1}.${tableNamePrefix}_3 ORDER BY id"
 
     for (def tableName in tables) {
diff --git 
a/regression-test/suites/backup_restore/test_backup_restore_atomic_cancel.groovy
 
b/regression-test/suites/backup_restore/test_backup_restore_atomic_cancel.groovy
index 3487c93b0d6..3f4f91d5a1d 100644
--- 
a/regression-test/suites/backup_restore/test_backup_restore_atomic_cancel.groovy
+++ 
b/regression-test/suites/backup_restore/test_backup_restore_atomic_cancel.groovy
@@ -65,10 +65,12 @@ suite("test_backup_restore_atomic_cancel") {
         values.add("(${i}, ${i})")
     }
     sql "INSERT INTO ${dbName}.${tableName} VALUES ${values.join(",")}"
+    sql "sync"
     def result = sql "SELECT * FROM ${dbName}.${tableName}"
     assertEquals(result.size(), values.size());
 
     sql "INSERT INTO ${dbName}.${tableName1} VALUES ${values.join(",")}"
+    sql "sync"
     result = sql "SELECT * FROM ${dbName}.${tableName1}"
     assertEquals(result.size(), values.size());
 
@@ -111,7 +113,7 @@ suite("test_backup_restore_atomic_cancel") {
     logger.info("show restore result: ${restore_result}")
     assertTrue(restore_result.last().State == "CANCELLED")
 
-
+    sql "sync"
     // Do not affect any tables.
     result = sql "SELECT * FROM ${dbName}.${tableName}"
     assertEquals(result.size(), values.size() + 1);
diff --git 
a/regression-test/suites/backup_restore/test_backup_restore_atomic.groovy 
b/regression-test/suites/backup_restore/test_backup_restore_atomic_reserve_replica.groovy
similarity index 50%
copy from 
regression-test/suites/backup_restore/test_backup_restore_atomic.groovy
copy to 
regression-test/suites/backup_restore/test_backup_restore_atomic_reserve_replica.groovy
index 4b87340fb35..ab8ad129273 100644
--- a/regression-test/suites/backup_restore/test_backup_restore_atomic.groovy
+++ 
b/regression-test/suites/backup_restore/test_backup_restore_atomic_reserve_replica.groovy
@@ -15,10 +15,9 @@
 // specific language governing permissions and limitations
 // under the License.
 
-suite("test_backup_restore_atomic", "backup_restore") {
-    String suiteName = "test_backup_restore_atomic"
-    String dbName = "${suiteName}_db_1"
-    String dbName1 = "${suiteName}_db_2"
+suite("test_backup_restore_atomic_reserve_replica", "backup_restore") {
+    String suiteName = "test_backup_restore_atomic_reserve_replica"
+    String dbName = "${suiteName}_db"
     String repoName = "repo_" + UUID.randomUUID().toString().replace("-", "")
     String snapshotName = "${suiteName}_snapshot"
     String tableNamePrefix = "${suiteName}_tables"
@@ -26,12 +25,10 @@ suite("test_backup_restore_atomic", "backup_restore") {
     def syncer = getSyncer()
     syncer.createS3Repository(repoName)
     sql "CREATE DATABASE IF NOT EXISTS ${dbName}"
-    sql "CREATE DATABASE IF NOT EXISTS ${dbName1}"
 
-    // 1. restore to not exists table_0
-    // 2. restore partial data to table_1
-    // 3. restore less data to table_2
-    // 4. restore incremental data to table_3
+    // 1. atomic restore with different replication num, base 1, target 3
+    // 2. atomic restore with reserve_replica = true, base 3, target 1
+
     int numTables = 4;
     List<String> tables = []
     for (int i = 0; i < numTables; ++i) {
@@ -62,35 +59,6 @@ suite("test_backup_restore_atomic", "backup_restore") {
             """
     }
 
-    // 5. the len of table name equals to the config table_name_length_limit
-    def maxLabelLen = getFeConfig("table_name_length_limit").toInteger()
-    def maxTableName = "".padRight(maxLabelLen, "x")
-    logger.info("config table_name_length_limit = ${maxLabelLen}, table name = 
${maxTableName}")
-    sql "DROP TABLE IF EXISTS ${dbName}.${maxTableName}"
-    sql """
-        CREATE TABLE ${dbName}.${maxTableName} (
-            `id` LARGEINT NOT NULL,
-            `count` LARGEINT SUM DEFAULT "0"
-        )
-        AGGREGATE KEY(`id`)
-        PARTITION BY RANGE(`id`)
-        (
-            PARTITION p1 VALUES LESS THAN ("10"),
-            PARTITION p2 VALUES LESS THAN ("20"),
-            PARTITION p3 VALUES LESS THAN ("30"),
-            PARTITION p4 VALUES LESS THAN ("40"),
-            PARTITION p5 VALUES LESS THAN ("50"),
-            PARTITION p6 VALUES LESS THAN ("60"),
-            PARTITION p7 VALUES LESS THAN ("120")
-        )
-        DISTRIBUTED BY HASH(`id`) BUCKETS 2
-        PROPERTIES
-        (
-            "replication_num" = "1"
-        )
-        """
-    tables.add(maxTableName)
-
     int numRows = 10;
     List<String> values = []
     for (int j = 1; j <= numRows; ++j) {
@@ -101,91 +69,48 @@ suite("test_backup_restore_atomic", "backup_restore") {
     sql "INSERT INTO ${dbName}.${tableNamePrefix}_1 VALUES ${values.join(",")}"
     sql "INSERT INTO ${dbName}.${tableNamePrefix}_2 VALUES ${values.join(",")}"
     sql "INSERT INTO ${dbName}.${tableNamePrefix}_3 VALUES ${values.join(",")}"
-    sql "INSERT INTO ${dbName}.${maxTableName} VALUES ${values.join(",")}"
 
-    // the other partitions of table_1 will be drop
     sql """
         BACKUP SNAPSHOT ${dbName}.${snapshotName}
         TO `${repoName}`
-        ON (
-            ${tableNamePrefix}_0,
-            ${tableNamePrefix}_1 PARTITION (p1, p2, p3),
-            ${tableNamePrefix}_2,
-            ${tableNamePrefix}_3,
-            ${maxTableName}
-        )
     """
 
     syncer.waitSnapshotFinish(dbName)
 
+    for (def tableName in tables) {
+        sql "TRUNCATE TABLE ${dbName}.${tableName}"
+    }
+
     def snapshot = syncer.getSnapshotTimestamp(repoName, snapshotName)
     assertTrue(snapshot != null)
 
-    // drop table_0
-    sql "DROP TABLE ${dbName}.${tableNamePrefix}_0 FORCE"
-
-    // insert external data to table_2
-    sql "INSERT INTO ${dbName}.${tableNamePrefix}_2 VALUES ${values.join(",")}"
-
-    sql "TRUNCATE TABLE ${dbName}.${tableNamePrefix}_3"
-
+    // restore with replication num = 3
     sql """
         RESTORE SNAPSHOT ${dbName}.${snapshotName}
         FROM `${repoName}`
         PROPERTIES
         (
             "backup_timestamp" = "${snapshot}",
-            "reserve_replica" = "true",
+            "replication_num" = "3",
             "atomic_restore" = "true"
         )
     """
 
     syncer.waitAllRestoreFinish(dbName)
 
+    sql "sync"
     for (def tableName in tables) {
         qt_sql "SELECT * FROM ${dbName}.${tableName} ORDER BY id"
     }
 
-    // restore table_3 to new db
-    sql """
-        RESTORE SNAPSHOT ${dbName1}.${snapshotName}
-        FROM `${repoName}`
-        ON (${tableNamePrefix}_3)
-        PROPERTIES
-        (
-            "backup_timestamp" = "${snapshot}",
-            "reserve_replica" = "true",
-            "atomic_restore" = "true"
-        )
-    """
-
-    syncer.waitAllRestoreFinish(dbName1)
-
-    qt_sql "SELECT * FROM ${dbName1}.${tableNamePrefix}_3 ORDER BY id"
-
-    // add partition and insert some data.
-    sql "ALTER TABLE ${dbName}.${tableNamePrefix}_3 ADD PARTITION p8 VALUES 
LESS THAN MAXVALUE"
-    sql "INSERT INTO ${dbName}.${tableNamePrefix}_3 VALUES ${values.join(",")}"
-    sql "INSERT INTO ${dbName}.${tableNamePrefix}_3 VALUES (200, 200)"
-
-    // backup again
-    snapshotName = "${snapshotName}_1"
-    sql """
-        BACKUP SNAPSHOT ${dbName}.${snapshotName}
-        TO `${repoName}`
-        ON (${tableNamePrefix}_3)
-    """
-
-    syncer.waitSnapshotFinish(dbName)
-
-    snapshot = syncer.getSnapshotTimestamp(repoName, snapshotName)
-    assertTrue(snapshot != null)
+    // restore with reserve_replica = true
+    for (def tableName in tables) {
+        sql "TRUNCATE TABLE ${dbName}.${tableName}"
+    }
 
-    // restore with incremental data
     sql """
-        RESTORE SNAPSHOT ${dbName1}.${snapshotName}
+        RESTORE SNAPSHOT ${dbName}.${snapshotName}
         FROM `${repoName}`
-        ON (${tableNamePrefix}_3)
         PROPERTIES
         (
             "backup_timestamp" = "${snapshot}",
@@ -194,16 +119,19 @@ suite("test_backup_restore_atomic", "backup_restore") {
         )
     """
 
-    syncer.waitAllRestoreFinish(dbName1)
+    syncer.waitAllRestoreFinish(dbName)
 
-    qt_sql "SELECT * FROM ${dbName1}.${tableNamePrefix}_3 ORDER BY id"
+    sql "sync"
+    for (def tableName in tables) {
+        qt_sql "SELECT * FROM ${dbName}.${tableName} ORDER BY id"
+    }
 
     for (def tableName in tables) {
         sql "DROP TABLE ${dbName}.${tableName} FORCE"
     }
     sql "DROP DATABASE ${dbName} FORCE"
-    sql "DROP DATABASE ${dbName1} FORCE"
     sql "DROP REPOSITORY `${repoName}`"
 }
 
 
+
diff --git 
a/regression-test/suites/backup_restore/test_backup_restore_atomic_with_view.groovy
 
b/regression-test/suites/backup_restore/test_backup_restore_atomic_with_view.groovy
index 9d090281364..8746018e3e4 100644
--- 
a/regression-test/suites/backup_restore/test_backup_restore_atomic_with_view.groovy
+++ 
b/regression-test/suites/backup_restore/test_backup_restore_atomic_with_view.groovy
@@ -55,6 +55,7 @@ suite("test_backup_restore_atomic_with_view", 
"backup_restore") {
         SELECT * FROM ${dbName}.${tableName} WHERE count > 5
         """
 
+    sql "sync"
     qt_sql "SELECT * FROM ${dbName}.${tableName} ORDER BY id ASC"
     qt_sql "SELECT * FROM ${dbName}.${viewName} ORDER BY id ASC"
 
@@ -85,6 +86,7 @@ suite("test_backup_restore_atomic_with_view", 
"backup_restore") {
 
     syncer.waitAllRestoreFinish(dbName1)
 
+    sql "sync"
     qt_sql "SELECT * FROM ${dbName1}.${tableName} ORDER BY id ASC"
     qt_sql "SELECT * FROM ${dbName1}.${viewName} ORDER BY id ASC"
     def show_view_result = sql_return_maparray "SHOW VIEW FROM ${tableName} 
FROM ${dbName1}"
@@ -115,6 +117,7 @@ suite("test_backup_restore_atomic_with_view", 
"backup_restore") {
     // View could read the incremental data.
     sql "INSERT INTO ${dbName}.${tableName} VALUES (11, 11)"
 
+    sql "sync"
     qt_sql "SELECT * FROM ${dbName}.${tableName} ORDER BY id ASC"
     qt_sql "SELECT * FROM ${dbName}.${viewName} ORDER BY id ASC"
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to