This is an automated email from the ASF dual-hosted git repository.

jianliangqi pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/master by this push:
     new 5b5a704286d [test](inverted index)Add cases for inverted index format 
v2 (#38132)
5b5a704286d is described below

commit 5b5a704286db20474a5531d4d087e4376e3cf3ec
Author: qiye <jianliang5...@gmail.com>
AuthorDate: Mon Jul 22 21:12:43 2024 +0800

    [test](inverted index)Add cases for inverted index format v2 (#38132)
    
    ## Proposed changes
    
    1. Add exception when trying to change index storage format
    2. Add test cases for index format v2
        - test_add_build_index_with_format_v2
        - test_create_table_with_format_v2
        - test_cumulative_compaction_with_format_v2
        - test_drop_column_with_format_v2
        - test_drop_index_with_format_v2
        - test_index_change_format
        - test_mor_table_with_format_v2
        - test_mow_table_with_format_v2
        - test_recover_with_format_v2
        - test_rename_column_with_format_v2
        - test_single_replica_compaction_with_format_v2
    3. There is a test case `test_rename_column_with_format_v2` for #38079
---
 .../analysis/ModifyTablePropertiesClause.java      |   4 +
 .../test_add_build_index_with_format_v2.out        |   6 +
 .../test_create_table_with_format_v2.out           |   6 +
 .../test_cumulative_compaction_with_format_v2.out  |   9 +
 .../test_drop_column_index_with_format_v2.out      |   6 +
 .../test_drop_column_with_format_v2.out            |   6 +
 .../test_drop_index_with_format_v2.out             |   6 +
 .../test_mor_table_with_format_v2.out              |   7 +
 .../test_mow_table_with_format_v2.out              |   7 +
 .../test_rename_column_with_format_v2.out          |  11 +
 ...st_single_replica_compaction_with_format_v2.out |   9 +
 .../test_add_build_index_with_format_v2.groovy     | 169 ++++++++++++
 .../test_create_table_with_format_v2.groovy        |  72 ++++++
 ...est_cumulative_compaction_with_format_v2.groovy | 277 ++++++++++++++++++++
 .../test_drop_column_with_format_v2.groovy         | 155 +++++++++++
 .../test_drop_index_with_format_v2.groovy          | 128 ++++++++++
 .../test_index_change_format.groovy                |  51 ++++
 .../test_mor_table_with_format_v2.groovy           | 283 +++++++++++++++++++++
 .../test_mow_table_with_format_v2.groovy           | 277 ++++++++++++++++++++
 .../test_recover_with_format_v2.groovy             |  92 +++++++
 .../test_rename_column_with_format_v2.groovy       | 123 +++++++++
 ...single_replica_compaction_with_format_v2.groovy | 281 ++++++++++++++++++++
 22 files changed, 1985 insertions(+)

diff --git 
a/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyTablePropertiesClause.java
 
b/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyTablePropertiesClause.java
index 85b00394326..c7ea5c2ecca 100644
--- 
a/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyTablePropertiesClause.java
+++ 
b/fe/fe-core/src/main/java/org/apache/doris/analysis/ModifyTablePropertiesClause.java
@@ -260,6 +260,10 @@ public class ModifyTablePropertiesClause extends 
AlterTableClause {
             }
             this.needTableStable = false;
             this.opType = AlterOpType.MODIFY_TABLE_PROPERTY_SYNC;
+        } else if 
(properties.containsKey(PropertyAnalyzer.PROPERTIES_INVERTED_INDEX_STORAGE_FORMAT))
 {
+            throw new AnalysisException(
+                "Property "
+                + PropertyAnalyzer.PROPERTIES_INVERTED_INDEX_STORAGE_FORMAT + 
" is not allowed to change");
         } else if 
(properties.containsKey(PropertyAnalyzer.PROPERTIES_ENABLE_SINGLE_REPLICA_COMPACTION))
 {
             if 
(!properties.get(PropertyAnalyzer.PROPERTIES_ENABLE_SINGLE_REPLICA_COMPACTION).equalsIgnoreCase("true")
                     && !properties.get(PropertyAnalyzer
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_add_build_index_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_add_build_index_with_format_v2.out
new file mode 100644
index 00000000000..419f6ee2c26
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_add_build_index_with_format_v2.out
@@ -0,0 +1,6 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sql --
+1      andy    100
+2      andy    100
+3      andy    100
+
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_create_table_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_create_table_with_format_v2.out
new file mode 100644
index 00000000000..419f6ee2c26
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_create_table_with_format_v2.out
@@ -0,0 +1,6 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sql --
+1      andy    100
+2      andy    100
+3      andy    100
+
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_cumulative_compaction_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_cumulative_compaction_with_format_v2.out
new file mode 100644
index 00000000000..d00ff307840
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_cumulative_compaction_with_format_v2.out
@@ -0,0 +1,9 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !select_default --
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-01T00:00        
2020-01-01T00:00        2017-10-01T11:11:11.170 2017-10-01T11:11:11.110111      
2020-01-01T00:00        1       30      20
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2017-10-01T11:11:11.160 2017-10-01T11:11:11.100111      
2020-01-02T00:00        1       31      19
+
+-- !select_default2 --
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-01T00:00        
2020-01-01T00:00        2017-10-01T11:11:11.170 2017-10-01T11:11:11.110111      
2020-01-01T00:00        1       30      20
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2017-10-01T11:11:11.160 2017-10-01T11:11:11.100111      
2020-01-02T00:00        1       31      19
+
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_drop_column_index_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_drop_column_index_with_format_v2.out
new file mode 100644
index 00000000000..419f6ee2c26
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_drop_column_index_with_format_v2.out
@@ -0,0 +1,6 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sql --
+1      andy    100
+2      andy    100
+3      andy    100
+
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_drop_column_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_drop_column_with_format_v2.out
new file mode 100644
index 00000000000..419f6ee2c26
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_drop_column_with_format_v2.out
@@ -0,0 +1,6 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sql --
+1      andy    100
+2      andy    100
+3      andy    100
+
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_drop_index_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_drop_index_with_format_v2.out
new file mode 100644
index 00000000000..419f6ee2c26
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_drop_index_with_format_v2.out
@@ -0,0 +1,6 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sql --
+1      andy    100
+2      andy    100
+3      andy    100
+
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_mor_table_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_mor_table_with_format_v2.out
new file mode 100644
index 00000000000..d5747528271
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_mor_table_with_format_v2.out
@@ -0,0 +1,7 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !select_default --
+4      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Shenzhen        10      1       \N      \N      
\N      \N      2020-01-05T00:00        1       34      20
+
+-- !select_default2 --
+4      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Shenzhen        10      1       \N      \N      
\N      \N      2020-01-05T00:00        1       34      20
+
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_mow_table_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_mow_table_with_format_v2.out
new file mode 100644
index 00000000000..8958d7f42f6
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_mow_table_with_format_v2.out
@@ -0,0 +1,7 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !select_default --
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2017-10-01T11:11:11.160 2017-10-01T11:11:11.100111      
2020-01-02T00:00        1       31      19
+
+-- !select_default2 --
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2017-10-01T11:11:11.160 2017-10-01T11:11:11.100111      
2020-01-02T00:00        1       31      19
+
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_rename_column_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_rename_column_with_format_v2.out
new file mode 100644
index 00000000000..c1c19e9ab3c
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_rename_column_with_format_v2.out
@@ -0,0 +1,11 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !sql --
+1      andy    100
+2      andy    100
+3      andy    100
+
+-- !sql --
+1      andy    100
+2      andy    100
+3      andy    100
+
diff --git 
a/regression-test/data/inverted_index_p0/index_format_v2/test_single_replica_compaction_with_format_v2.out
 
b/regression-test/data/inverted_index_p0/index_format_v2/test_single_replica_compaction_with_format_v2.out
new file mode 100644
index 00000000000..d00ff307840
--- /dev/null
+++ 
b/regression-test/data/inverted_index_p0/index_format_v2/test_single_replica_compaction_with_format_v2.out
@@ -0,0 +1,9 @@
+-- This file is automatically generated. You should know what you did if you 
want to edit this
+-- !select_default --
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-01T00:00        
2020-01-01T00:00        2017-10-01T11:11:11.170 2017-10-01T11:11:11.110111      
2020-01-01T00:00        1       30      20
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2017-10-01T11:11:11.160 2017-10-01T11:11:11.100111      
2020-01-02T00:00        1       31      19
+
+-- !select_default2 --
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-01T00:00        
2020-01-01T00:00        2017-10-01T11:11:11.170 2017-10-01T11:11:11.110111      
2020-01-01T00:00        1       30      20
+1      2017-10-01      2017-10-01      2017-10-01T11:11:11.110 
2017-10-01T11:11:11.110111      Beijing 10      1       2020-01-02T00:00        
2020-01-02T00:00        2017-10-01T11:11:11.160 2017-10-01T11:11:11.100111      
2020-01-02T00:00        1       31      19
+
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_add_build_index_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_add_build_index_with_format_v2.groovy
new file mode 100644
index 00000000000..430c3a67e0a
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_add_build_index_with_format_v2.groovy
@@ -0,0 +1,169 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_add_build_index_with_format_v2", "inverted_index_format_v2"){
+    def tableName = "test_add_build_index_with_format_v2"
+
+    def calc_file_crc_on_tablet = { ip, port, tablet ->
+        return curl("GET", 
String.format("http://%s:%s/api/calc_crc?tablet_id=%s";, ip, port, tablet))
+    }
+    def backendId_to_backendIP = [:]
+    def backendId_to_backendHttpPort = [:]
+    getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
+
+    def timeout = 60000
+    def delta_time = 1000
+    def alter_res = "null"
+    def useTime = 0
+    def wait_for_latest_op_on_table_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${table_name}" ORDER BY CreateTime DESC LIMIT 1;"""
+            alter_res = alter_res.toString()
+            if(alter_res.contains("FINISHED")) {
+                sleep(3000) // wait change table state to normal
+                logger.info(table_name + " latest alter job finished, detail: 
" + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish 
timeout")
+    }
+
+    def wait_for_build_index_on_partition_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW BUILD INDEX WHERE TableName = 
"${table_name}";"""
+            def expected_finished_num = alter_res.size();
+            def finished_num = 0;
+            for (int i = 0; i < expected_finished_num; i++) {
+                logger.info(table_name + " build index job state: " + 
alter_res[i][7] + i)
+                if (alter_res[i][7] == "FINISHED") {
+                    ++finished_num;
+                }
+            }
+            if (finished_num == expected_finished_num) {
+                logger.info(table_name + " all build index jobs finished, 
detail: " + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, 
"wait_for_latest_build_index_on_partition_finish timeout")
+    }
+
+    sql "DROP TABLE IF EXISTS ${tableName}"
+    
+    sql """
+           CREATE TABLE ${tableName} (
+            `id` int(11) NULL,
+            `name` varchar(255) NULL,
+            `score` int(11) NULL
+        ) ENGINE=OLAP
+        DUPLICATE KEY(`id`)
+        COMMENT 'OLAP'
+        DISTRIBUTED BY HASH(`id`) BUCKETS 1
+        PROPERTIES (
+            "replication_allocation" = "tag.location.default: 1",
+            "inverted_index_storage_format" = "V2",
+            "disable_auto_compaction" = "true"
+        );
+    """
+    sql """ INSERT INTO ${tableName} VALUES (1, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (1, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "bason", 99); """
+
+    // add index
+    sql """
+        ALTER TABLE ${tableName}
+        ADD INDEX idx_name (name) using inverted;
+    """
+    wait_for_latest_op_on_table_finish(tableName, timeout)
+    
+    sql """
+        ALTER TABLE ${tableName}
+        ADD INDEX idx_score (score) using inverted;
+    """
+    wait_for_latest_op_on_table_finish(tableName, timeout)
+
+    // show index after add index
+    def show_result = sql_return_maparray "show index from ${tableName}"
+    logger.info("show index from " + tableName + " result: " + show_result)
+    assertEquals(show_result[0].Key_name, "idx_name")
+    assertEquals(show_result[1].Key_name, "idx_score")
+
+    def tablets = sql_return_maparray """ show tablets from ${tableName}; """
+    String tablet_id = tablets[0].TabletId
+    String backend_id = tablets[0].BackendId
+    String ip = backendId_to_backendIP.get(backend_id)
+    String port = backendId_to_backendHttpPort.get(backend_id)
+    def (code, out, err) = calc_file_crc_on_tablet(ip, port, tablet_id)
+    logger.info("Run calc_file_crc_on_tablet: code=" + code + ", out=" + out + 
", err=" + err)
+    assertTrue(code == 0)
+    assertTrue(out.contains("crc_value"))
+    assertTrue(out.contains("used_time_ms"))
+    assertEquals("0", parseJson(out.trim()).start_version)
+    assertEquals("7", parseJson(out.trim()).end_version)
+    assertEquals("7", parseJson(out.trim()).rowset_count)
+    // cloud mode is directly schema change, local mode is light schema change.
+    // cloud mode is 12, local mode is 6
+    if (isCloudMode()) {
+        assertEquals("12", parseJson(out.trim()).file_count)
+        qt_sql "SELECT * FROM $tableName WHERE name match 'andy' order by id, 
name, score;"
+        return
+    } else {
+        assertEquals("6", parseJson(out.trim()).file_count)
+    }
+
+    // build index 
+    sql """
+        BUILD INDEX idx_name ON ${tableName};
+    """
+    wait_for_build_index_on_partition_finish(tableName, timeout)
+
+    (code, out, err) = calc_file_crc_on_tablet(ip, port, tablet_id)
+    logger.info("Run calc_file_crc_on_tablet: code=" + code + ", out=" + out + 
", err=" + err)
+    assertTrue(code == 0)
+    assertTrue(out.contains("crc_value"))
+    assertTrue(out.contains("used_time_ms"))
+    assertEquals("0", parseJson(out.trim()).start_version)
+    assertEquals("7", parseJson(out.trim()).end_version)
+    assertEquals("7", parseJson(out.trim()).rowset_count)
+    assertEquals("12", parseJson(out.trim()).file_count)
+
+    // build index 
+    sql """
+        BUILD INDEX idx_score ON ${tableName};
+    """
+    wait_for_build_index_on_partition_finish(tableName, timeout)
+
+    (code, out, err) = calc_file_crc_on_tablet(ip, port, tablet_id)
+    logger.info("Run calc_file_crc_on_tablet: code=" + code + ", out=" + out + 
", err=" + err)
+    assertTrue(code == 0)
+    assertTrue(out.contains("crc_value"))
+    assertTrue(out.contains("used_time_ms"))
+    assertEquals("0", parseJson(out.trim()).start_version)
+    assertEquals("7", parseJson(out.trim()).end_version)
+    assertEquals("7", parseJson(out.trim()).rowset_count)
+    assertEquals("12", parseJson(out.trim()).file_count)
+
+    qt_sql "SELECT * FROM $tableName WHERE name match 'andy' order by id, 
name, score;"
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_create_table_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_create_table_with_format_v2.groovy
new file mode 100644
index 00000000000..dd87a214e2d
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_create_table_with_format_v2.groovy
@@ -0,0 +1,72 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_create_table_with_format_v2", "inverted_index_format_v2"){
+    def tableName = "test_create_table_with_format_v2"
+
+    def calc_file_crc_on_tablet = { ip, port, tablet ->
+        return curl("GET", 
String.format("http://%s:%s/api/calc_crc?tablet_id=%s";, ip, port, tablet))
+    }
+    def backendId_to_backendIP = [:]
+    def backendId_to_backendHttpPort = [:]
+    getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
+
+    sql "DROP TABLE IF EXISTS ${tableName}"
+    
+    sql """
+           CREATE TABLE ${tableName} (
+            `id` int(11) NULL,
+            `name` varchar(255) NULL,
+            `score` int(11) NULL,
+            index index_name (name) using inverted,
+            index index_score (score) using inverted
+        ) ENGINE=OLAP
+        DUPLICATE KEY(`id`)
+        COMMENT 'OLAP'
+        DISTRIBUTED BY HASH(`id`) BUCKETS 1
+        PROPERTIES (
+            "replication_allocation" = "tag.location.default: 1",
+            "inverted_index_storage_format" = "V2",
+            "disable_auto_compaction" = "true"
+        );
+    """
+    sql """ INSERT INTO ${tableName} VALUES (1, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (1, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "bason", 99); """
+
+    qt_sql "SELECT * FROM $tableName WHERE name match 'andy' order by id, 
name, score;"
+
+    def tablets = sql_return_maparray """ show tablets from ${tableName}; """
+    String tablet_id = tablets[0].TabletId
+    String backend_id = tablets[0].BackendId
+    String ip = backendId_to_backendIP.get(backend_id)
+    String port = backendId_to_backendHttpPort.get(backend_id)
+    def (code, out, err) = calc_file_crc_on_tablet(ip, port, tablet_id)
+    logger.info("Run calc_file_crc_on_tablet: code=" + code + ", out=" + out + 
", err=" + err)
+    assertTrue(code == 0)
+    assertTrue(out.contains("crc_value"))
+    assertTrue(out.contains("used_time_ms"))
+    assertEquals("0", parseJson(out.trim()).start_version)
+    assertEquals("7", parseJson(out.trim()).end_version)
+    assertEquals("7", parseJson(out.trim()).rowset_count)
+    assertEquals("12", parseJson(out.trim()).file_count)
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_cumulative_compaction_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_cumulative_compaction_with_format_v2.groovy
new file mode 100644
index 00000000000..f8ae36175a5
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_cumulative_compaction_with_format_v2.groovy
@@ -0,0 +1,277 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_cumulative_compaction_with_format_v2", "inverted_index_format_v2") 
{
+    def tableName = "test_cumulative_compaction_with_format_v2"
+
+    def timeout = 60000
+    def delta_time = 1000
+    def alter_res = "null"
+    def useTime = 0
+
+    def wait_for_latest_op_on_table_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${table_name}" ORDER BY CreateTime DESC LIMIT 1;"""
+            alter_res = alter_res.toString()
+            if(alter_res.contains("FINISHED")) {
+                sleep(10000) // wait change table state to normal
+                logger.info(table_name + " latest alter job finished, detail: 
" + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish 
timeout")
+    }
+
+    def calc_file_crc_on_tablet = { ip, port, tablet ->
+        return curl("GET", 
String.format("http://%s:%s/api/calc_crc?tablet_id=%s";, ip, port, tablet))
+    }
+
+    def calc_segment_count = { tablet -> 
+        int segment_count = 0
+        String tablet_id = tablet.TabletId
+        StringBuilder sb = new StringBuilder();
+        sb.append("curl -X GET ")
+        sb.append(tablet.CompactionStatus)
+        String command = sb.toString()
+        // wait for cleaning stale_rowsets
+        def process = command.execute()
+        def code = process.waitFor()
+        def err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        def out = process.getText()
+        logger.info("Show tablets status: code=" + code + ", out=" + out + ", 
err=" + err)
+        assertEquals(code, 0)
+        def tabletJson = parseJson(out.trim())
+        assert tabletJson.rowsets instanceof List
+        for (String rowset in (List<String>) tabletJson.rowsets) {
+            segment_count += Integer.parseInt(rowset.split(" ")[1])
+        }
+        return segment_count
+    }
+
+    try {
+        
//BackendId,Cluster,IP,HeartbeatPort,BePort,HttpPort,BrpcPort,LastStartTime,LastHeartbeat,Alive,SystemDecommissioned,ClusterDecommissioned,TabletNum,DataUsedCapacity,AvailCapacity,TotalCapacity,UsedPct,MaxDiskUsedPct,Tag,ErrMsg,Version,Status
+        String[][] backends = sql """ show backends; """
+        assertTrue(backends.size() > 0)
+        String backend_id;
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        getBackendIpHttpPort(backendId_to_backendIP, 
backendId_to_backendHttpPort);
+
+        backend_id = backendId_to_backendIP.keySet()[0]
+        StringBuilder showConfigCommand = new StringBuilder();
+        showConfigCommand.append("curl -X GET http://";)
+        showConfigCommand.append(backendId_to_backendIP.get(backend_id))
+        showConfigCommand.append(":")
+        showConfigCommand.append(backendId_to_backendHttpPort.get(backend_id))
+        showConfigCommand.append("/api/show_config")
+        logger.info(showConfigCommand.toString())
+        def process = showConfigCommand.toString().execute()
+        int code = process.waitFor()
+        String err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        String out = process.getText()
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        boolean disableAutoCompaction = true
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+            }
+        }
+
+        sql """ DROP TABLE IF EXISTS ${tableName} """
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName} (
+                `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                `datev2` DATEV2 NOT NULL COMMENT "数据灌入日期时间",
+                `datetimev2_1` DATETIMEV2(3) NOT NULL COMMENT "数据灌入日期时间",
+                `datetimev2_2` DATETIMEV2(6) NOT NULL COMMENT "数据灌入日期时间",
+                `city` VARCHAR(20) COMMENT "用户所在城市",
+                `age` SMALLINT COMMENT "用户年龄",
+                `sex` TINYINT COMMENT "用户性别",
+                `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次访问时间",
+                `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `datetime_val1` DATETIMEV2(3) DEFAULT "1970-01-01 
00:00:00.111" COMMENT "用户最后一次访问时间",
+                `datetime_val2` DATETIME(6) DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `last_visit_date_not_null` DATETIME NOT NULL DEFAULT 
"1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
+                `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
+                `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间",
+                INDEX idx_user_id (`user_id`) USING INVERTED,
+                INDEX idx_date (`date`) USING INVERTED,
+                INDEX idx_city (`city`) USING INVERTED)
+            DUPLICATE KEY(`user_id`, `date`, `datev2`, `datetimev2_1`, 
`datetimev2_2`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`)
+            PROPERTIES ( "replication_num" = "1", "disable_auto_compaction" = 
"true" );
+        """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', 
'2017-10-01 11:11:11.170000', '2017-10-01 11:11:11.110111', '2020-01-01', 1, 
30, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2017-10-01 11:11:11.160000', '2017-10-01 11:11:11.100111', '2020-01-02', 1, 
31, 19)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shanghai', 10, 1, '2020-01-02', '2020-01-02', 
'2017-10-01 11:11:11.150000', '2017-10-01 11:11:11.130111', '2020-01-02', 1, 
31, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shanghai', 10, 1, '2020-01-03', '2020-01-03', 
'2017-10-01 11:11:11.140000', '2017-10-01 11:11:11.120111', '2020-01-03', 1, 
32, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Guangzhou', 10, 1, '2020-01-03', '2020-01-03', 
'2017-10-01 11:11:11.100000', '2017-10-01 11:11:11.140111', '2020-01-03', 1, 
32, 22)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Guangzhou', 10, 1, '2020-01-04', '2020-01-04', 
'2017-10-01 11:11:11.110000', '2017-10-01 11:11:11.150111', '2020-01-04', 1, 
33, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shenzhen', 10, 1, NULL, NULL, NULL, NULL, 
'2020-01-05', 1, 34, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (4, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shenzhen', 10, 1, NULL, NULL, NULL, NULL, 
'2020-01-05', 1, 34, 20)
+            """
+
+        sql """ sync """
+
+        qt_select_default """ SELECT * FROM ${tableName} t WHERE city MATCH 
'Beijing' ORDER BY 
user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time;
 """
+
+        
//TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus
+        def tablets = sql_return_maparray """ show tablets from ${tableName}; 
"""
+
+        // trigger compactions for all tablets in ${tableName}
+        for (def tablet in tablets) {
+            String tablet_id = tablet.TabletId
+            backend_id = tablet.BackendId
+            String ip = backendId_to_backendIP.get(backend_id)
+            String port = backendId_to_backendHttpPort.get(backend_id)
+            int segment_count = calc_segment_count(tablet)
+            logger.info("TabletId: " + tablet_id + ", segment_count: " + 
segment_count)
+            def (c, o, e) = calc_file_crc_on_tablet(ip, port, tablet_id)
+            logger.info("Run calc_file_crc_on_tablet: code=" + c + ", out=" + 
o + ", err=" + e)
+            assertTrue(c == 0)
+            assertTrue(o.contains("crc_value"))
+            assertTrue(o.contains("used_time_ms"))
+            assertEquals("0", parseJson(o.trim()).start_version)
+            assertEquals("9", parseJson(o.trim()).end_version)
+            assertEquals("9", parseJson(o.trim()).rowset_count)
+            int file_count = segment_count * 2
+            assertEquals(file_count, 
Integer.parseInt(parseJson(o.trim()).file_count))
+
+            StringBuilder sb = new StringBuilder();
+            sb.append("curl -X POST http://";)
+            sb.append(backendId_to_backendIP.get(backend_id))
+            sb.append(":")
+            sb.append(backendId_to_backendHttpPort.get(backend_id))
+            sb.append("/api/compaction/run?tablet_id=")
+            sb.append(tablet_id)
+            sb.append("&compact_type=cumulative")
+
+            String command = sb.toString()
+            process = command.execute()
+            code = process.waitFor()
+            err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+            out = process.getText()
+            logger.info("Run compaction: code=" + code + ", out=" + out + ", 
err=" + err)
+            assertEquals(code, 0)
+            def compactJson = parseJson(out.trim())
+            if (compactJson.status.toLowerCase() == "fail") {
+                assertEquals(disableAutoCompaction, false)
+                logger.info("Compaction was done automatically!")
+            }
+            if (disableAutoCompaction) {
+                assertEquals("success", compactJson.status.toLowerCase())
+            }
+        }
+
+        // wait for all compactions done
+        for (def tablet in tablets) {
+            boolean running = true
+            String tablet_id = tablet.TabletId
+            backend_id = tablet.BackendId
+            do {
+                Thread.sleep(1000)
+                StringBuilder sb = new StringBuilder();
+                sb.append("curl -X GET http://";)
+                sb.append(backendId_to_backendIP.get(backend_id))
+                sb.append(":")
+                sb.append(backendId_to_backendHttpPort.get(backend_id))
+                sb.append("/api/compaction/run_status?tablet_id=")
+                sb.append(tablet_id)
+
+                String command = sb.toString()
+                logger.info(command)
+                process = command.execute()
+                code = process.waitFor()
+                err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                out = process.getText()
+                logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
+                assertEquals(code, 0)
+                def compactionStatus = parseJson(out.trim())
+                assertEquals("success", compactionStatus.status.toLowerCase())
+                running = compactionStatus.run_status
+            } while (running)
+
+            String ip = backendId_to_backendIP.get(backend_id)
+            String port = backendId_to_backendHttpPort.get(backend_id)
+            int segment_count = calc_segment_count(tablet)
+            logger.info("TabletId: " + tablet_id + ", segment_count: " + 
segment_count)
+            def (c, o, e) = calc_file_crc_on_tablet(ip, port, tablet_id)
+            logger.info("Run calc_file_crc_on_tablet: code=" + c + ", out=" + 
o + ", err=" + e)
+            assertTrue(c == 0)
+            assertTrue(o.contains("crc_value"))
+            assertTrue(o.contains("used_time_ms"))
+            assertEquals("0", parseJson(o.trim()).start_version)
+            assertEquals("9", parseJson(o.trim()).end_version)
+            // after compaction, there are 2 rwosets.
+            assertEquals("2", parseJson(o.trim()).rowset_count)
+            int file_count = segment_count * 2
+            assertEquals(file_count, 
Integer.parseInt(parseJson(o.trim()).file_count))
+        }
+
+        int segmentsCount = 0
+        for (def tablet in tablets) {
+            segmentsCount += calc_segment_count(tablet)
+        }
+
+        def dedup_tablets = deduplicate_tablets(tablets)
+
+        // In the p0 testing environment, there are no expected operations 
such as scaling down BE (backend) services
+        // if tablets or dedup_tablets is empty, exception is thrown, and case 
fail
+        int replicaNum = Math.floor(tablets.size() / dedup_tablets.size())
+        if (replicaNum != 1 && replicaNum != 3)
+        {
+            assert(false);
+        }
+
+        assert (segmentsCount <= 8*replicaNum)
+        qt_select_default2 """ SELECT * FROM ${tableName} t WHERE city MATCH 
'Beijing' ORDER BY 
user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time;
 """
+    } finally {
+    }
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_drop_column_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_drop_column_with_format_v2.groovy
new file mode 100644
index 00000000000..24ac5c6ee6f
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_drop_column_with_format_v2.groovy
@@ -0,0 +1,155 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_drop_column_with_format_v2", "inverted_index_format_v2"){
+    // cloud mode is light schema change, tablet meta will not be updated 
after alter table
+    // so we can't get the latest tablet meta
+    if (isCloudMode()) {
+        return
+    }
+    def tableName = "test_drop_column_with_format_v2"
+
+    def calc_file_crc_on_tablet = { ip, port, tablet ->
+        return curl("GET", 
String.format("http://%s:%s/api/calc_crc?tablet_id=%s";, ip, port, tablet))
+    }
+    def backendId_to_backendIP = [:]
+    def backendId_to_backendHttpPort = [:]
+    getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
+
+    def timeout = 60000
+    def delta_time = 1000
+    def alter_res = "null"
+    def useTime = 0
+    def wait_for_latest_op_on_table_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${table_name}" ORDER BY CreateTime DESC LIMIT 1;"""
+            alter_res = alter_res.toString()
+            if(alter_res.contains("FINISHED")) {
+                sleep(3000) // wait change table state to normal
+                logger.info(table_name + " latest alter job finished, detail: 
" + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish 
timeout")
+    }
+
+    def wait_for_build_index_on_partition_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW BUILD INDEX WHERE TableName = 
"${table_name}";"""
+            def expected_finished_num = alter_res.size();
+            def finished_num = 0;
+            for (int i = 0; i < expected_finished_num; i++) {
+                logger.info(table_name + " build index job state: " + 
alter_res[i][7] + i)
+                if (alter_res[i][7] == "FINISHED") {
+                    ++finished_num;
+                }
+            }
+            if (finished_num == expected_finished_num) {
+                logger.info(table_name + " all build index jobs finished, 
detail: " + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, 
"wait_for_latest_build_index_on_partition_finish timeout")
+    }
+
+    sql "DROP TABLE IF EXISTS ${tableName}"
+    
+    sql """
+           CREATE TABLE ${tableName} (
+            `id` int(11) NULL,
+            `name` varchar(255) NULL,
+            `score` int(11) NULL,
+            index index_name (name) using inverted,
+            index index_score (score) using inverted
+        ) ENGINE=OLAP
+        DUPLICATE KEY(`id`)
+        COMMENT 'OLAP'
+        DISTRIBUTED BY HASH(`id`) BUCKETS 1
+        PROPERTIES (
+            "replication_allocation" = "tag.location.default: 1",
+            "inverted_index_storage_format" = "V2",
+            "disable_auto_compaction" = "true",
+            "light_schema_change" = "false"
+        );
+    """
+    sql """ INSERT INTO ${tableName} VALUES (1, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (1, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "bason", 99); """
+
+    qt_sql "SELECT * FROM $tableName WHERE name match 'andy' order by id, 
name, score;"
+
+    def tablets = sql_return_maparray """ show tablets from ${tableName}; """
+    String tablet_id = tablets[0].TabletId
+    String backend_id = tablets[0].BackendId
+    String ip = backendId_to_backendIP.get(backend_id)
+    String port = backendId_to_backendHttpPort.get(backend_id)
+    def (code, out, err) = calc_file_crc_on_tablet(ip, port, tablet_id)
+    logger.info("Run calc_file_crc_on_tablet: code=" + code + ", out=" + out + 
", err=" + err)
+    assertTrue(code == 0)
+    assertTrue(out.contains("crc_value"))
+    assertTrue(out.contains("used_time_ms"))
+    assertEquals("0", parseJson(out.trim()).start_version)
+    assertEquals("7", parseJson(out.trim()).end_version)
+    assertEquals("7", parseJson(out.trim()).rowset_count)
+    assertEquals("12", parseJson(out.trim()).file_count)
+
+    // drop column
+    sql """ ALTER TABLE ${tableName} DROP COLUMN score; """
+    wait_for_latest_op_on_table_finish(tableName, timeout)
+
+    // select to sync rowset meta in cloud mode
+    sql """ select * from ${tableName} limit 1; """
+
+    tablets = sql_return_maparray """ show tablets from ${tableName}; """
+    tablet_id = tablets[0].TabletId
+    (code, out, err) = calc_file_crc_on_tablet(ip, port, tablet_id)
+    logger.info("Run calc_file_crc_on_tablet: code=" + code + ", out=" + out + 
", err=" + err)
+    assertTrue(code == 0)
+    assertTrue(out.contains("crc_value"))
+    assertTrue(out.contains("used_time_ms"))
+    assertEquals("0", parseJson(out.trim()).start_version)
+    assertEquals("7", parseJson(out.trim()).end_version)
+    assertEquals("7", parseJson(out.trim()).rowset_count)
+    assertEquals("12", parseJson(out.trim()).file_count)
+
+    sql """ ALTER TABLE ${tableName} DROP COLUMN name; """
+    wait_for_latest_op_on_table_finish(tableName, timeout)
+
+    // select to sync rowset meta in cloud mode
+    sql """ select * from ${tableName} limit 1; """
+    
+    tablets = sql_return_maparray """ show tablets from ${tableName}; """
+    tablet_id = tablets[0].TabletId
+    (code, out, err) = calc_file_crc_on_tablet(ip, port, tablet_id)
+    logger.info("Run calc_file_crc_on_tablet: code=" + code + ", out=" + out + 
", err=" + err)
+    assertTrue(code == 0)
+    assertTrue(out.contains("crc_value"))
+    assertTrue(out.contains("used_time_ms"))
+    assertEquals("0", parseJson(out.trim()).start_version)
+    assertEquals("7", parseJson(out.trim()).end_version)
+    assertEquals("7", parseJson(out.trim()).rowset_count)
+    assertEquals("6", parseJson(out.trim()).file_count)
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_drop_index_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_drop_index_with_format_v2.groovy
new file mode 100644
index 00000000000..c2ce451e857
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_drop_index_with_format_v2.groovy
@@ -0,0 +1,128 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_drop_index_with_format_v2", "inverted_index_format_v2"){
+    // cloud mode is light schema change, tablet meta will not be updated 
after alter table
+    // so we can't get the latest tablet meta
+    if (isCloudMode()) {
+        return
+    }
+    def tableName = "test_drop_index_with_format_v2"
+
+    def calc_file_crc_on_tablet = { ip, port, tablet ->
+        return curl("GET", 
String.format("http://%s:%s/api/calc_crc?tablet_id=%s";, ip, port, tablet))
+    }
+    def backendId_to_backendIP = [:]
+    def backendId_to_backendHttpPort = [:]
+    getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
+
+    def timeout = 60000
+    def delta_time = 1000
+    def alter_res = "null"
+    def useTime = 0
+
+    def wait_for_latest_op_on_table_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${table_name}" ORDER BY CreateTime DESC LIMIT 1;"""
+            alter_res = alter_res.toString()
+            if(alter_res.contains("FINISHED")) {
+                sleep(3000) // wait change table state to normal
+                logger.info(table_name + " latest alter job finished, detail: 
" + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish 
timeout")
+    }
+
+    sql "DROP TABLE IF EXISTS ${tableName}"
+    
+    sql """
+           CREATE TABLE ${tableName} (
+            `id` int(11) NULL,
+            `name` varchar(255) NULL,
+            `score` int(11) NULL,
+            index index_name (name) using inverted,
+            index index_score (score) using inverted
+        ) ENGINE=OLAP
+        DUPLICATE KEY(`id`)
+        COMMENT 'OLAP'
+        DISTRIBUTED BY HASH(`id`) BUCKETS 1
+        PROPERTIES (
+            "replication_allocation" = "tag.location.default: 1",
+            "inverted_index_storage_format" = "V2",
+            "disable_auto_compaction" = "true"
+        );
+    """
+    sql """ INSERT INTO ${tableName} VALUES (1, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (1, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "bason", 99); """
+
+    qt_sql "SELECT * FROM $tableName WHERE name match 'andy' order by id, 
name, score;"
+
+    def tablets = sql_return_maparray """ show tablets from ${tableName}; """
+    String tablet_id = tablets[0].TabletId
+    String backend_id = tablets[0].BackendId
+    String ip = backendId_to_backendIP.get(backend_id)
+    String port = backendId_to_backendHttpPort.get(backend_id)
+    def (code, out, err) = calc_file_crc_on_tablet(ip, port, tablet_id)
+    logger.info("Run calc_file_crc_on_tablet: code=" + code + ", out=" + out + 
", err=" + err)
+    assertTrue(code == 0)
+    assertTrue(out.contains("crc_value"))
+    assertTrue(out.contains("used_time_ms"))
+    assertEquals("0", parseJson(out.trim()).start_version)
+    assertEquals("7", parseJson(out.trim()).end_version)
+    assertEquals("7", parseJson(out.trim()).rowset_count)
+    assertEquals("12", parseJson(out.trim()).file_count)
+
+    // drop index 
+    sql """ DROP INDEX index_name on ${tableName}; """
+    wait_for_latest_op_on_table_finish(tableName, timeout)
+
+    (code, out, err) = calc_file_crc_on_tablet(ip, port, tablet_id)
+    logger.info("Run calc_file_crc_on_tablet: code=" + code + ", out=" + out + 
", err=" + err)
+    assertTrue(code == 0)
+    assertTrue(out.contains("crc_value"))
+    assertTrue(out.contains("used_time_ms"))
+    assertEquals("0", parseJson(out.trim()).start_version)
+    assertEquals("7", parseJson(out.trim()).end_version)
+    assertEquals("7", parseJson(out.trim()).rowset_count)
+    assertEquals("12", parseJson(out.trim()).file_count)
+
+    // drop index 
+    sql """ DROP INDEX index_score on ${tableName}; """
+    wait_for_latest_op_on_table_finish(tableName, timeout)
+
+    // select to sync rowset meta in cloud mode
+    sql """ select * from ${tableName} limit 1; """
+
+    (code, out, err) = calc_file_crc_on_tablet(ip, port, tablet_id)
+    logger.info("Run calc_file_crc_on_tablet: code=" + code + ", out=" + out + 
", err=" + err)
+    assertTrue(code == 0)
+    assertTrue(out.contains("crc_value"))
+    assertTrue(out.contains("used_time_ms"))
+    assertEquals("0", parseJson(out.trim()).start_version)
+    assertEquals("7", parseJson(out.trim()).end_version)
+    assertEquals("7", parseJson(out.trim()).rowset_count)
+    assertEquals("6", parseJson(out.trim()).file_count)
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_index_change_format.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_index_change_format.groovy
new file mode 100644
index 00000000000..8a5179b0010
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_index_change_format.groovy
@@ -0,0 +1,51 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_index_change_format", "inverted_index_format_v2"){
+    def createAndAlterTable = { tableName, initialFormat, newFormat -> 
+        sql "DROP TABLE IF EXISTS ${tableName}"
+        
+        sql """
+            CREATE TABLE ${tableName} (
+                `id` int(11) NULL,
+                `name` varchar(255) NULL,
+                `score` int(11) NULL,
+                index index_name (name) using inverted,
+                index index_score (score) using inverted
+            ) ENGINE=OLAP
+            DUPLICATE KEY(`id`)
+            COMMENT 'OLAP'
+            DISTRIBUTED BY HASH(`id`) BUCKETS 1
+            PROPERTIES (
+                "replication_allocation" = "tag.location.default: 1",
+                "inverted_index_storage_format" = "${initialFormat}",
+                "disable_auto_compaction" = "true"
+            );
+        """
+        try {
+            sql "ALTER TABLE ${tableName} SET ('inverted_index_storage_format' 
= '${newFormat}')"
+        } catch (Exception e) {
+            log.info(e.getMessage())
+            assertTrue(e.getMessage().contains('Property 
inverted_index_storage_format is not allowed to change'))
+        }
+    }
+
+    createAndAlterTable("test_index_change_format_v1", "V1", "V2")
+    createAndAlterTable("test_index_change_format_v2", "V2", "V1")
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_mor_table_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_mor_table_with_format_v2.groovy
new file mode 100644
index 00000000000..700cb6abc32
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_mor_table_with_format_v2.groovy
@@ -0,0 +1,283 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_mor_table_with_format_v2", "inverted_index_format_v2") {
+    def tableName = "test_mor_table_with_format_v2"
+
+    def timeout = 60000
+    def delta_time = 1000
+    def alter_res = "null"
+    def useTime = 0
+
+    def wait_for_latest_op_on_table_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${table_name}" ORDER BY CreateTime DESC LIMIT 1;"""
+            alter_res = alter_res.toString()
+            if(alter_res.contains("FINISHED")) {
+                sleep(10000) // wait change table state to normal
+                logger.info(table_name + " latest alter job finished, detail: 
" + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish 
timeout")
+    }
+
+    def calc_file_crc_on_tablet = { ip, port, tablet ->
+        return curl("GET", 
String.format("http://%s:%s/api/calc_crc?tablet_id=%s";, ip, port, tablet))
+    }
+
+    def calc_segment_count = { tablet -> 
+        int segment_count = 0
+        String tablet_id = tablet.TabletId
+        StringBuilder sb = new StringBuilder();
+        sb.append("curl -X GET ")
+        sb.append(tablet.CompactionStatus)
+        String command = sb.toString()
+        // wait for cleaning stale_rowsets
+        def process = command.execute()
+        def code = process.waitFor()
+        def err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        def out = process.getText()
+        logger.info("Show tablets status: code=" + code + ", out=" + out + ", 
err=" + err)
+        assertEquals(code, 0)
+        def tabletJson = parseJson(out.trim())
+        assert tabletJson.rowsets instanceof List
+        for (String rowset in (List<String>) tabletJson.rowsets) {
+            segment_count += Integer.parseInt(rowset.split(" ")[1])
+        }
+        return segment_count
+    }
+
+    try {
+        
//BackendId,Cluster,IP,HeartbeatPort,BePort,HttpPort,BrpcPort,LastStartTime,LastHeartbeat,Alive,SystemDecommissioned,ClusterDecommissioned,TabletNum,DataUsedCapacity,AvailCapacity,TotalCapacity,UsedPct,MaxDiskUsedPct,Tag,ErrMsg,Version,Status
+        String[][] backends = sql """ show backends; """
+        assertTrue(backends.size() > 0)
+        String backend_id;
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        getBackendIpHttpPort(backendId_to_backendIP, 
backendId_to_backendHttpPort);
+
+        backend_id = backendId_to_backendIP.keySet()[0]
+        StringBuilder showConfigCommand = new StringBuilder();
+        showConfigCommand.append("curl -X GET http://";)
+        showConfigCommand.append(backendId_to_backendIP.get(backend_id))
+        showConfigCommand.append(":")
+        showConfigCommand.append(backendId_to_backendHttpPort.get(backend_id))
+        showConfigCommand.append("/api/show_config")
+        logger.info(showConfigCommand.toString())
+        def process = showConfigCommand.toString().execute()
+        int code = process.waitFor()
+        String err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        String out = process.getText()
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        boolean disableAutoCompaction = true
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+            }
+        }
+
+        sql """ DROP TABLE IF EXISTS ${tableName} """
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName} (
+                `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                `datev2` DATEV2 NOT NULL COMMENT "数据灌入日期时间",
+                `datetimev2_1` DATETIMEV2(3) NOT NULL COMMENT "数据灌入日期时间",
+                `datetimev2_2` DATETIMEV2(6) NOT NULL COMMENT "数据灌入日期时间",
+                `city` VARCHAR(20) COMMENT "用户所在城市",
+                `age` SMALLINT COMMENT "用户年龄",
+                `sex` TINYINT COMMENT "用户性别",
+                `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次访问时间",
+                `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `datetime_val1` DATETIMEV2(3) DEFAULT "1970-01-01 
00:00:00.111" COMMENT "用户最后一次访问时间",
+                `datetime_val2` DATETIME(6) DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `last_visit_date_not_null` DATETIME NOT NULL DEFAULT 
"1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
+                `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
+                `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间",
+                INDEX idx_user_id (`user_id`) USING INVERTED,
+                INDEX idx_date (`date`) USING INVERTED,
+                INDEX idx_city (`city`) USING INVERTED)
+            UNIQUE KEY(`user_id`, `date`, `datev2`, `datetimev2_1`, 
`datetimev2_2`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`)
+            PROPERTIES ( "replication_num" = "1", "disable_auto_compaction" = 
"true", "enable_unique_key_merge_on_write" = "false" );
+        """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', 
'2017-10-01 11:11:11.170000', '2017-10-01 11:11:11.110111', '2020-01-01', 1, 
30, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2017-10-01 11:11:11.160000', '2017-10-01 11:11:11.100111', '2020-01-02', 1, 
31, 19)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shanghai', 10, 1, '2020-01-02', '2020-01-02', 
'2017-10-01 11:11:11.150000', '2017-10-01 11:11:11.130111', '2020-01-02', 1, 
31, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shanghai', 10, 1, '2020-01-03', '2020-01-03', 
'2017-10-01 11:11:11.140000', '2017-10-01 11:11:11.120111', '2020-01-03', 1, 
32, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Guangzhou', 10, 1, '2020-01-03', '2020-01-03', 
'2017-10-01 11:11:11.100000', '2017-10-01 11:11:11.140111', '2020-01-03', 1, 
32, 22)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Guangzhou', 10, 1, '2020-01-04', '2020-01-04', 
'2017-10-01 11:11:11.110000', '2017-10-01 11:11:11.150111', '2020-01-04', 1, 
33, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shenzhen', 10, 1, NULL, NULL, NULL, NULL, 
'2020-01-05', 1, 34, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (4, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shenzhen', 10, 1, NULL, NULL, NULL, NULL, 
'2020-01-05', 1, 34, 20)
+            """
+
+        sql """ sync """
+
+        sql """ DELETE FROM ${tableName} WHERE user_id = 3 """
+
+        qt_select_default """ SELECT * FROM ${tableName} t WHERE city MATCH 
'Shenzhen' ORDER BY 
user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time;
 """
+
+        
//TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus
+        def tablets = sql_return_maparray """ show tablets from ${tableName}; 
"""
+
+        // trigger compactions for all tablets in ${tableName}
+        for (def tablet in tablets) {
+            String tablet_id = tablet.TabletId
+            backend_id = tablet.BackendId
+            String ip = backendId_to_backendIP.get(backend_id)
+            String port = backendId_to_backendHttpPort.get(backend_id)
+            int segment_count = calc_segment_count(tablet)
+            logger.info("TabletId: " + tablet_id + ", segment_count: " + 
segment_count)
+            def (c, o, e) = calc_file_crc_on_tablet(ip, port, tablet_id)
+            logger.info("Run calc_file_crc_on_tablet: code=" + c + ", out=" + 
o + ", err=" + e)
+            assertTrue(c == 0)
+            assertTrue(o.contains("crc_value"))
+            assertTrue(o.contains("used_time_ms"))
+            assertEquals("0", parseJson(o.trim()).start_version)
+            assertEquals("10", parseJson(o.trim()).end_version)
+            assertEquals("10", parseJson(o.trim()).rowset_count)
+            int file_count = segment_count * 2
+            assertEquals(file_count, 
Integer.parseInt(parseJson(o.trim()).file_count))
+
+            StringBuilder sb = new StringBuilder();
+            sb.append("curl -X POST http://";)
+            sb.append(backendId_to_backendIP.get(backend_id))
+            sb.append(":")
+            sb.append(backendId_to_backendHttpPort.get(backend_id))
+            sb.append("/api/compaction/run?tablet_id=")
+            sb.append(tablet_id)
+            sb.append("&compact_type=full")
+
+            String command = sb.toString()
+            process = command.execute()
+            code = process.waitFor()
+            err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+            out = process.getText()
+            logger.info("Run compaction: code=" + code + ", out=" + out + ", 
err=" + err)
+            assertEquals(code, 0)
+            def compactJson = parseJson(out.trim())
+            if (compactJson.status.toLowerCase() == "fail") {
+                assertEquals(disableAutoCompaction, false)
+                logger.info("Compaction was done automatically!")
+            }
+            if (disableAutoCompaction) {
+                assertEquals("success", compactJson.status.toLowerCase())
+            }
+        }
+
+        // wait for all compactions done
+        for (def tablet in tablets) {
+            boolean running = true
+            String tablet_id = tablet.TabletId
+            backend_id = tablet.BackendId
+            do {
+                Thread.sleep(1000)
+                StringBuilder sb = new StringBuilder();
+                sb.append("curl -X GET http://";)
+                sb.append(backendId_to_backendIP.get(backend_id))
+                sb.append(":")
+                sb.append(backendId_to_backendHttpPort.get(backend_id))
+                sb.append("/api/compaction/run_status?tablet_id=")
+                sb.append(tablet_id)
+
+                String command = sb.toString()
+                logger.info(command)
+                process = command.execute()
+                code = process.waitFor()
+                err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                out = process.getText()
+                logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
+                assertEquals(code, 0)
+                def compactionStatus = parseJson(out.trim())
+                assertEquals("success", compactionStatus.status.toLowerCase())
+                running = compactionStatus.run_status
+            } while (running)
+
+            String ip = backendId_to_backendIP.get(backend_id)
+            String port = backendId_to_backendHttpPort.get(backend_id)
+            int segment_count = calc_segment_count(tablet)
+            logger.info("TabletId: " + tablet_id + ", segment_count: " + 
segment_count)
+            def (c, o, e) = calc_file_crc_on_tablet(ip, port, tablet_id)
+            logger.info("Run calc_file_crc_on_tablet: code=" + c + ", out=" + 
o + ", err=" + e)
+            assertTrue(c == 0)
+            assertTrue(o.contains("crc_value"))
+            assertTrue(o.contains("used_time_ms"))
+            assertEquals("0", parseJson(o.trim()).start_version)
+            assertEquals("10", parseJson(o.trim()).end_version)
+            // after compaction, there are 1 rwoset in local mode and 2 
rowsets in cloud mode.
+            if (isCloudMode()) {
+                assertEquals("2", parseJson(o.trim()).rowset_count)
+            } else {
+                assertEquals("1", parseJson(o.trim()).rowset_count)
+            }
+            int file_count = segment_count * 2
+            assertEquals(file_count, 
Integer.parseInt(parseJson(o.trim()).file_count))
+        }
+
+        int segmentsCount = 0
+        for (def tablet in tablets) {
+            segmentsCount += calc_segment_count(tablet)
+        }
+
+        def dedup_tablets = deduplicate_tablets(tablets)
+
+        // In the p0 testing environment, there are no expected operations 
such as scaling down BE (backend) services
+        // if tablets or dedup_tablets is empty, exception is thrown, and case 
fail
+        int replicaNum = Math.floor(tablets.size() / dedup_tablets.size())
+        if (replicaNum != 1 && replicaNum != 3)
+        {
+            assert(false);
+        }
+
+        assert (segmentsCount <= 8*replicaNum)
+        qt_select_default2 """ SELECT * FROM ${tableName} t WHERE city MATCH 
'Shenzhen' ORDER BY 
user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time;
 """
+    } finally {
+    }
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_mow_table_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_mow_table_with_format_v2.groovy
new file mode 100644
index 00000000000..e38f7bfe5e0
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_mow_table_with_format_v2.groovy
@@ -0,0 +1,277 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_mow_table_with_format_v2", "inverted_index_format_v2") {
+    def tableName = "test_mow_table_with_format_v2"
+
+    def timeout = 60000
+    def delta_time = 1000
+    def alter_res = "null"
+    def useTime = 0
+
+    def wait_for_latest_op_on_table_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${table_name}" ORDER BY CreateTime DESC LIMIT 1;"""
+            alter_res = alter_res.toString()
+            if(alter_res.contains("FINISHED")) {
+                sleep(10000) // wait change table state to normal
+                logger.info(table_name + " latest alter job finished, detail: 
" + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish 
timeout")
+    }
+
+    def calc_file_crc_on_tablet = { ip, port, tablet ->
+        return curl("GET", 
String.format("http://%s:%s/api/calc_crc?tablet_id=%s";, ip, port, tablet))
+    }
+
+    def calc_segment_count = { tablet -> 
+        int segment_count = 0
+        String tablet_id = tablet.TabletId
+        StringBuilder sb = new StringBuilder();
+        sb.append("curl -X GET ")
+        sb.append(tablet.CompactionStatus)
+        String command = sb.toString()
+        // wait for cleaning stale_rowsets
+        def process = command.execute()
+        def code = process.waitFor()
+        def err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        def out = process.getText()
+        logger.info("Show tablets status: code=" + code + ", out=" + out + ", 
err=" + err)
+        assertEquals(code, 0)
+        def tabletJson = parseJson(out.trim())
+        assert tabletJson.rowsets instanceof List
+        for (String rowset in (List<String>) tabletJson.rowsets) {
+            segment_count += Integer.parseInt(rowset.split(" ")[1])
+        }
+        return segment_count
+    }
+
+    try {
+        
//BackendId,Cluster,IP,HeartbeatPort,BePort,HttpPort,BrpcPort,LastStartTime,LastHeartbeat,Alive,SystemDecommissioned,ClusterDecommissioned,TabletNum,DataUsedCapacity,AvailCapacity,TotalCapacity,UsedPct,MaxDiskUsedPct,Tag,ErrMsg,Version,Status
+        String[][] backends = sql """ show backends; """
+        assertTrue(backends.size() > 0)
+        String backend_id;
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        getBackendIpHttpPort(backendId_to_backendIP, 
backendId_to_backendHttpPort);
+
+        backend_id = backendId_to_backendIP.keySet()[0]
+        StringBuilder showConfigCommand = new StringBuilder();
+        showConfigCommand.append("curl -X GET http://";)
+        showConfigCommand.append(backendId_to_backendIP.get(backend_id))
+        showConfigCommand.append(":")
+        showConfigCommand.append(backendId_to_backendHttpPort.get(backend_id))
+        showConfigCommand.append("/api/show_config")
+        logger.info(showConfigCommand.toString())
+        def process = showConfigCommand.toString().execute()
+        int code = process.waitFor()
+        String err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        String out = process.getText()
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        boolean disableAutoCompaction = true
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+            }
+        }
+
+        sql """ DROP TABLE IF EXISTS ${tableName} """
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName} (
+                `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                `datev2` DATEV2 NOT NULL COMMENT "数据灌入日期时间",
+                `datetimev2_1` DATETIMEV2(3) NOT NULL COMMENT "数据灌入日期时间",
+                `datetimev2_2` DATETIMEV2(6) NOT NULL COMMENT "数据灌入日期时间",
+                `city` VARCHAR(20) COMMENT "用户所在城市",
+                `age` SMALLINT COMMENT "用户年龄",
+                `sex` TINYINT COMMENT "用户性别",
+                `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次访问时间",
+                `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `datetime_val1` DATETIMEV2(3) DEFAULT "1970-01-01 
00:00:00.111" COMMENT "用户最后一次访问时间",
+                `datetime_val2` DATETIME(6) DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `last_visit_date_not_null` DATETIME NOT NULL DEFAULT 
"1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
+                `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
+                `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间",
+                INDEX idx_user_id (`user_id`) USING INVERTED,
+                INDEX idx_date (`date`) USING INVERTED,
+                INDEX idx_city (`city`) USING INVERTED)
+            UNIQUE KEY(`user_id`, `date`, `datev2`, `datetimev2_1`, 
`datetimev2_2`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`)
+            PROPERTIES ( "replication_num" = "1", "disable_auto_compaction" = 
"true", "enable_unique_key_merge_on_write" = "true" );
+        """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', 
'2017-10-01 11:11:11.170000', '2017-10-01 11:11:11.110111', '2020-01-01', 1, 
30, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2017-10-01 11:11:11.160000', '2017-10-01 11:11:11.100111', '2020-01-02', 1, 
31, 19)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shanghai', 10, 1, '2020-01-02', '2020-01-02', 
'2017-10-01 11:11:11.150000', '2017-10-01 11:11:11.130111', '2020-01-02', 1, 
31, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shanghai', 10, 1, '2020-01-03', '2020-01-03', 
'2017-10-01 11:11:11.140000', '2017-10-01 11:11:11.120111', '2020-01-03', 1, 
32, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Guangzhou', 10, 1, '2020-01-03', '2020-01-03', 
'2017-10-01 11:11:11.100000', '2017-10-01 11:11:11.140111', '2020-01-03', 1, 
32, 22)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Guangzhou', 10, 1, '2020-01-04', '2020-01-04', 
'2017-10-01 11:11:11.110000', '2017-10-01 11:11:11.150111', '2020-01-04', 1, 
33, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shenzhen', 10, 1, NULL, NULL, NULL, NULL, 
'2020-01-05', 1, 34, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (4, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shenzhen', 10, 1, NULL, NULL, NULL, NULL, 
'2020-01-05', 1, 34, 20)
+            """
+
+        sql """ sync """
+
+        qt_select_default """ SELECT * FROM ${tableName} t WHERE city MATCH 
'Beijing' ORDER BY 
user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time;
 """
+
+        
//TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus
+        def tablets = sql_return_maparray """ show tablets from ${tableName}; 
"""
+
+        // trigger compactions for all tablets in ${tableName}
+        for (def tablet in tablets) {
+            String tablet_id = tablet.TabletId
+            backend_id = tablet.BackendId
+            String ip = backendId_to_backendIP.get(backend_id)
+            String port = backendId_to_backendHttpPort.get(backend_id)
+            int segment_count = calc_segment_count(tablet)
+            logger.info("TabletId: " + tablet_id + ", segment_count: " + 
segment_count)
+            def (c, o, e) = calc_file_crc_on_tablet(ip, port, tablet_id)
+            logger.info("Run calc_file_crc_on_tablet: code=" + c + ", out=" + 
o + ", err=" + e)
+            assertTrue(c == 0)
+            assertTrue(o.contains("crc_value"))
+            assertTrue(o.contains("used_time_ms"))
+            assertEquals("0", parseJson(o.trim()).start_version)
+            assertEquals("9", parseJson(o.trim()).end_version)
+            assertEquals("9", parseJson(o.trim()).rowset_count)
+            int file_count = segment_count * 2
+            assertEquals(file_count, 
Integer.parseInt(parseJson(o.trim()).file_count))
+
+            StringBuilder sb = new StringBuilder();
+            sb.append("curl -X POST http://";)
+            sb.append(backendId_to_backendIP.get(backend_id))
+            sb.append(":")
+            sb.append(backendId_to_backendHttpPort.get(backend_id))
+            sb.append("/api/compaction/run?tablet_id=")
+            sb.append(tablet_id)
+            sb.append("&compact_type=cumulative")
+
+            String command = sb.toString()
+            process = command.execute()
+            code = process.waitFor()
+            err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+            out = process.getText()
+            logger.info("Run compaction: code=" + code + ", out=" + out + ", 
err=" + err)
+            assertEquals(code, 0)
+            def compactJson = parseJson(out.trim())
+            if (compactJson.status.toLowerCase() == "fail") {
+                assertEquals(disableAutoCompaction, false)
+                logger.info("Compaction was done automatically!")
+            }
+            if (disableAutoCompaction) {
+                assertEquals("success", compactJson.status.toLowerCase())
+            }
+        }
+
+        // wait for all compactions done
+        for (def tablet in tablets) {
+            boolean running = true
+            String tablet_id = tablet.TabletId
+            backend_id = tablet.BackendId
+            do {
+                Thread.sleep(1000)
+                StringBuilder sb = new StringBuilder();
+                sb.append("curl -X GET http://";)
+                sb.append(backendId_to_backendIP.get(backend_id))
+                sb.append(":")
+                sb.append(backendId_to_backendHttpPort.get(backend_id))
+                sb.append("/api/compaction/run_status?tablet_id=")
+                sb.append(tablet_id)
+
+                String command = sb.toString()
+                logger.info(command)
+                process = command.execute()
+                code = process.waitFor()
+                err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                out = process.getText()
+                logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
+                assertEquals(code, 0)
+                def compactionStatus = parseJson(out.trim())
+                assertEquals("success", compactionStatus.status.toLowerCase())
+                running = compactionStatus.run_status
+            } while (running)
+
+            String ip = backendId_to_backendIP.get(backend_id)
+            String port = backendId_to_backendHttpPort.get(backend_id)
+            int segment_count = calc_segment_count(tablet)
+            logger.info("TabletId: " + tablet_id + ", segment_count: " + 
segment_count)
+            def (c, o, e) = calc_file_crc_on_tablet(ip, port, tablet_id)
+            logger.info("Run calc_file_crc_on_tablet: code=" + c + ", out=" + 
o + ", err=" + e)
+            assertTrue(c == 0)
+            assertTrue(o.contains("crc_value"))
+            assertTrue(o.contains("used_time_ms"))
+            assertEquals("0", parseJson(o.trim()).start_version)
+            assertEquals("9", parseJson(o.trim()).end_version)
+            // after compaction, there are 2 rwosets.
+            assertEquals("2", parseJson(o.trim()).rowset_count)
+            int file_count = segment_count * 2
+            assertEquals(file_count, 
Integer.parseInt(parseJson(o.trim()).file_count))
+        }
+
+        int segmentsCount = 0
+        for (def tablet in tablets) {
+            segmentsCount += calc_segment_count(tablet)
+        }
+
+        def dedup_tablets = deduplicate_tablets(tablets)
+
+        // In the p0 testing environment, there are no expected operations 
such as scaling down BE (backend) services
+        // if tablets or dedup_tablets is empty, exception is thrown, and case 
fail
+        int replicaNum = Math.floor(tablets.size() / dedup_tablets.size())
+        if (replicaNum != 1 && replicaNum != 3)
+        {
+            assert(false);
+        }
+
+        assert (segmentsCount <= 8*replicaNum)
+        qt_select_default2 """ SELECT * FROM ${tableName} t WHERE city MATCH 
'Beijing' ORDER BY 
user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time;
 """
+    } finally {
+    }
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_recover_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_recover_with_format_v2.groovy
new file mode 100644
index 00000000000..ee109817c4c
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_recover_with_format_v2.groovy
@@ -0,0 +1,92 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_recover_with_format_v2", "inverted_index_format_v2"){
+    def tableName = "test_recover_with_format_v2"
+
+    def calc_file_crc_on_tablet = { ip, port, tablet ->
+        return curl("GET", 
String.format("http://%s:%s/api/calc_crc?tablet_id=%s";, ip, port, tablet))
+    }
+    def backendId_to_backendIP = [:]
+    def backendId_to_backendHttpPort = [:]
+    getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
+
+    def check_index_file = { ->
+        def tablets = sql_return_maparray """ show tablets from ${tableName}; 
"""
+        for (int i = 0; i < tablets.size(); i++) {
+            String tablet_id = tablets[i].TabletId
+            String backend_id = tablets[i].BackendId
+            String ip = backendId_to_backendIP.get(backend_id)
+            String port = backendId_to_backendHttpPort.get(backend_id)
+            def (code, out, err) = calc_file_crc_on_tablet(ip, port, tablet_id)
+            logger.info("Run calc_file_crc_on_tablet: code=" + code + ", out=" 
+ out + ", err=" + err)
+            assertTrue(code == 0)
+            assertTrue(out.contains("crc_value"))
+            assertTrue(out.contains("used_time_ms"))
+            assertEquals("0", parseJson(out.trim()).start_version)
+            assertEquals("3", parseJson(out.trim()).end_version)
+            assertEquals("3", parseJson(out.trim()).rowset_count)
+            assertEquals("4", parseJson(out.trim()).file_count)
+        }
+    }
+
+    sql "DROP TABLE IF EXISTS ${tableName}"
+    
+    sql """
+           CREATE TABLE ${tableName} (
+            `id` int(11) NULL,
+            `name` varchar(255) NULL,
+            `score` int(11) NULL,
+            index index_name (name) using inverted,
+            index index_score (score) using inverted
+        ) ENGINE=OLAP
+        DUPLICATE KEY(`id`)
+        COMMENT 'OLAP'
+        PARTITION BY LIST(`id`)
+        (
+            PARTITION p1 VALUES IN ("1"),
+            PARTITION p2 VALUES IN ("2"),
+            PARTITION p3 VALUES IN ("3")
+        )
+        DISTRIBUTED BY HASH(`id`) BUCKETS 1
+        PROPERTIES (
+            "replication_allocation" = "tag.location.default: 1",
+            "inverted_index_storage_format" = "V2",
+            "disable_auto_compaction" = "true"
+        );
+    """
+    sql """ INSERT INTO ${tableName} VALUES (1, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (1, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "bason", 99); """
+
+    check_index_file()
+
+    // drop table and recover
+    sql "DROP TABLE IF EXISTS ${tableName}"
+    sql "RECOVER TABLE ${tableName}"
+    check_index_file()
+
+    // drop partition and recover
+    sql "ALTER TABLE ${tableName} DROP PARTITION p1"
+    sql "RECOVER PARTITION p1 from ${tableName}"
+    check_index_file()
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_rename_column_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_rename_column_with_format_v2.groovy
new file mode 100644
index 00000000000..663eb8434a3
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_rename_column_with_format_v2.groovy
@@ -0,0 +1,123 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_rename_column_with_format_v2", "inverted_index_format_v2"){
+    def tableName = "test_rename_column_with_format_v2"
+
+    def calc_file_crc_on_tablet = { ip, port, tablet ->
+        return curl("GET", 
String.format("http://%s:%s/api/calc_crc?tablet_id=%s";, ip, port, tablet))
+    }
+    def backendId_to_backendIP = [:]
+    def backendId_to_backendHttpPort = [:]
+    getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
+
+    def timeout = 60000
+    def delta_time = 1000
+    def alter_res = "null"
+    def useTime = 0
+    def wait_for_latest_op_on_table_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${table_name}" ORDER BY CreateTime DESC LIMIT 1;"""
+            alter_res = alter_res.toString()
+            if(alter_res.contains("FINISHED")) {
+                sleep(3000) // wait change table state to normal
+                logger.info(table_name + " latest alter job finished, detail: 
" + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish 
timeout")
+    }
+
+    sql "DROP TABLE IF EXISTS ${tableName}"
+    
+    sql """
+           CREATE TABLE ${tableName} (
+            `id` int(11) NULL,
+            `name` varchar(255) NULL,
+            `score` int(11) NULL,
+            index index_name (name) using inverted
+        ) ENGINE=OLAP
+        DUPLICATE KEY(`id`)
+        COMMENT 'OLAP'
+        DISTRIBUTED BY HASH(`id`) BUCKETS 1
+        PROPERTIES (
+            "replication_allocation" = "tag.location.default: 1",
+            "inverted_index_storage_format" = "V2",
+            "disable_auto_compaction" = "true"
+        );
+    """
+    sql """ INSERT INTO ${tableName} VALUES (1, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (1, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (2, "bason", 99); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "andy", 100); """
+    sql """ INSERT INTO ${tableName} VALUES (3, "bason", 99); """
+
+    qt_sql "SELECT * FROM $tableName WHERE name match 'andy' order by id, 
name, score;"
+
+    def tablets = sql_return_maparray """ show tablets from ${tableName}; """
+    String tablet_id = tablets[0].TabletId
+    String backend_id = tablets[0].BackendId
+    String ip = backendId_to_backendIP.get(backend_id)
+    String port = backendId_to_backendHttpPort.get(backend_id)
+    def (code, out, err) = calc_file_crc_on_tablet(ip, port, tablet_id)
+    logger.info("Run calc_file_crc_on_tablet: code=" + code + ", out=" + out + 
", err=" + err)
+    assertTrue(code == 0)
+    assertTrue(out.contains("crc_value"))
+    assertTrue(out.contains("used_time_ms"))
+    assertEquals("0", parseJson(out.trim()).start_version)
+    assertEquals("7", parseJson(out.trim()).end_version)
+    assertEquals("7", parseJson(out.trim()).rowset_count)
+    assertEquals("12", parseJson(out.trim()).file_count)
+
+    // rename column
+    sql """ ALTER TABLE ${tableName} RENAME COLUMN name name_new; """
+    wait_for_latest_op_on_table_finish(tableName, timeout)
+
+    qt_sql "SELECT * FROM $tableName WHERE name_new match 'andy' order by id, 
name_new, score;"
+
+    (code, out, err) = calc_file_crc_on_tablet(ip, port, tablet_id)
+    logger.info("Run calc_file_crc_on_tablet: code=" + code + ", out=" + out + 
", err=" + err)
+    assertTrue(code == 0)
+    assertTrue(out.contains("crc_value"))
+    assertTrue(out.contains("used_time_ms"))
+    assertEquals("0", parseJson(out.trim()).start_version)
+    assertEquals("7", parseJson(out.trim()).end_version)
+    assertEquals("7", parseJson(out.trim()).rowset_count)
+    assertEquals("12", parseJson(out.trim()).file_count)
+
+    // drop column
+    sql """ ALTER TABLE ${tableName} DROP COLUMN name_new; """
+    wait_for_latest_op_on_table_finish(tableName, timeout)
+
+    (code, out, err) = calc_file_crc_on_tablet(ip, port, tablet_id)
+    logger.info("Run calc_file_crc_on_tablet: code=" + code + ", out=" + out + 
", err=" + err)
+    assertTrue(code == 0)
+    assertTrue(out.contains("crc_value"))
+    assertTrue(out.contains("used_time_ms"))
+    assertEquals("0", parseJson(out.trim()).start_version)
+    assertEquals("7", parseJson(out.trim()).end_version)
+    assertEquals("7", parseJson(out.trim()).rowset_count)
+    // 6 files are expected after dropping the column
+    // Since we do not support change light_schema_change from true to false, 
so we can not do directly schema change.
+    // As a result, the rowset schema will not remove the index info, so the 
file count will not decrease.
+    assertEquals("12", parseJson(out.trim()).file_count)
+}
diff --git 
a/regression-test/suites/inverted_index_p0/index_format_v2/test_single_replica_compaction_with_format_v2.groovy
 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_single_replica_compaction_with_format_v2.groovy
new file mode 100644
index 00000000000..453fcd91226
--- /dev/null
+++ 
b/regression-test/suites/inverted_index_p0/index_format_v2/test_single_replica_compaction_with_format_v2.groovy
@@ -0,0 +1,281 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite("test_single_replica_compaction_with_format_v2", 
"inverted_index_format_v2") {
+    def tableName = "test_single_replica_compaction_with_format_v2"
+
+    def backends = sql_return_maparray('show backends')
+    // if backens is less than 2, skip this case
+    if (backends.size() < 2) {
+        return
+    }
+
+    def timeout = 60000
+    def delta_time = 1000
+    def alter_res = "null"
+    def useTime = 0
+
+    def wait_for_latest_op_on_table_finish = { table_name, OpTimeout ->
+        for(int t = delta_time; t <= OpTimeout; t += delta_time){
+            alter_res = sql """SHOW ALTER TABLE COLUMN WHERE TableName = 
"${table_name}" ORDER BY CreateTime DESC LIMIT 1;"""
+            alter_res = alter_res.toString()
+            if(alter_res.contains("FINISHED")) {
+                sleep(10000) // wait change table state to normal
+                logger.info(table_name + " latest alter job finished, detail: 
" + alter_res)
+                break
+            }
+            useTime = t
+            sleep(delta_time)
+        }
+        assertTrue(useTime <= OpTimeout, "wait_for_latest_op_on_table_finish 
timeout")
+    }
+
+    def calc_file_crc_on_tablet = { ip, port, tablet ->
+        return curl("GET", 
String.format("http://%s:%s/api/calc_crc?tablet_id=%s";, ip, port, tablet))
+    }
+
+    def calc_segment_count = { tablet -> 
+        int segment_count = 0
+        String tablet_id = tablet.TabletId
+        StringBuilder sb = new StringBuilder();
+        sb.append("curl -X GET ")
+        sb.append(tablet.CompactionStatus)
+        String command = sb.toString()
+        // wait for cleaning stale_rowsets
+        def process = command.execute()
+        def code = process.waitFor()
+        def err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        def out = process.getText()
+        logger.info("Show tablets status: code=" + code + ", out=" + out + ", 
err=" + err)
+        assertEquals(code, 0)
+        def tabletJson = parseJson(out.trim())
+        assert tabletJson.rowsets instanceof List
+        for (String rowset in (List<String>) tabletJson.rowsets) {
+            segment_count += Integer.parseInt(rowset.split(" ")[1])
+        }
+        return segment_count
+    }
+
+    try {
+        
//BackendId,Cluster,IP,HeartbeatPort,BePort,HttpPort,BrpcPort,LastStartTime,LastHeartbeat,Alive,SystemDecommissioned,ClusterDecommissioned,TabletNum,DataUsedCapacity,AvailCapacity,TotalCapacity,UsedPct,MaxDiskUsedPct,Tag,ErrMsg,Version,Status
+        String backend_id;
+        def backendId_to_backendIP = [:]
+        def backendId_to_backendHttpPort = [:]
+        getBackendIpHttpPort(backendId_to_backendIP, 
backendId_to_backendHttpPort);
+
+        backend_id = backendId_to_backendIP.keySet()[0]
+        StringBuilder showConfigCommand = new StringBuilder();
+        showConfigCommand.append("curl -X GET http://";)
+        showConfigCommand.append(backendId_to_backendIP.get(backend_id))
+        showConfigCommand.append(":")
+        showConfigCommand.append(backendId_to_backendHttpPort.get(backend_id))
+        showConfigCommand.append("/api/show_config")
+        logger.info(showConfigCommand.toString())
+        def process = showConfigCommand.toString().execute()
+        int code = process.waitFor()
+        String err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+        String out = process.getText()
+        logger.info("Show config: code=" + code + ", out=" + out + ", err=" + 
err)
+        assertEquals(code, 0)
+        def configList = parseJson(out.trim())
+        assert configList instanceof List
+
+        boolean disableAutoCompaction = true
+        for (Object ele in (List) configList) {
+            assert ele instanceof List<String>
+            if (((List<String>) ele)[0] == "disable_auto_compaction") {
+                disableAutoCompaction = Boolean.parseBoolean(((List<String>) 
ele)[2])
+            }
+        }
+
+        sql """ DROP TABLE IF EXISTS ${tableName} """
+        sql """
+            CREATE TABLE IF NOT EXISTS ${tableName} (
+                `user_id` LARGEINT NOT NULL COMMENT "用户id",
+                `date` DATE NOT NULL COMMENT "数据灌入日期时间",
+                `datev2` DATEV2 NOT NULL COMMENT "数据灌入日期时间",
+                `datetimev2_1` DATETIMEV2(3) NOT NULL COMMENT "数据灌入日期时间",
+                `datetimev2_2` DATETIMEV2(6) NOT NULL COMMENT "数据灌入日期时间",
+                `city` VARCHAR(20) COMMENT "用户所在城市",
+                `age` SMALLINT COMMENT "用户年龄",
+                `sex` TINYINT COMMENT "用户性别",
+                `last_visit_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次访问时间",
+                `last_update_date` DATETIME DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `datetime_val1` DATETIMEV2(3) DEFAULT "1970-01-01 
00:00:00.111" COMMENT "用户最后一次访问时间",
+                `datetime_val2` DATETIME(6) DEFAULT "1970-01-01 00:00:00" 
COMMENT "用户最后一次更新时间",
+                `last_visit_date_not_null` DATETIME NOT NULL DEFAULT 
"1970-01-01 00:00:00" COMMENT "用户最后一次访问时间",
+                `cost` BIGINT DEFAULT "0" COMMENT "用户总消费",
+                `max_dwell_time` INT DEFAULT "0" COMMENT "用户最大停留时间",
+                `min_dwell_time` INT DEFAULT "99999" COMMENT "用户最小停留时间",
+                INDEX idx_user_id (`user_id`) USING INVERTED,
+                INDEX idx_date (`date`) USING INVERTED,
+                INDEX idx_city (`city`) USING INVERTED)
+            DUPLICATE KEY(`user_id`, `date`, `datev2`, `datetimev2_1`, 
`datetimev2_2`, `city`, `age`, `sex`) DISTRIBUTED BY HASH(`user_id`)
+            PROPERTIES ( "replication_num" = "2", "disable_auto_compaction" = 
"true", "enable_single_replica_compaction" = "true" );
+        """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Beijing', 10, 1, '2020-01-01', '2020-01-01', 
'2017-10-01 11:11:11.170000', '2017-10-01 11:11:11.110111', '2020-01-01', 1, 
30, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (1, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Beijing', 10, 1, '2020-01-02', '2020-01-02', 
'2017-10-01 11:11:11.160000', '2017-10-01 11:11:11.100111', '2020-01-02', 1, 
31, 19)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shanghai', 10, 1, '2020-01-02', '2020-01-02', 
'2017-10-01 11:11:11.150000', '2017-10-01 11:11:11.130111', '2020-01-02', 1, 
31, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (2, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shanghai', 10, 1, '2020-01-03', '2020-01-03', 
'2017-10-01 11:11:11.140000', '2017-10-01 11:11:11.120111', '2020-01-03', 1, 
32, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Guangzhou', 10, 1, '2020-01-03', '2020-01-03', 
'2017-10-01 11:11:11.100000', '2017-10-01 11:11:11.140111', '2020-01-03', 1, 
32, 22)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Guangzhou', 10, 1, '2020-01-04', '2020-01-04', 
'2017-10-01 11:11:11.110000', '2017-10-01 11:11:11.150111', '2020-01-04', 1, 
33, 21)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (3, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shenzhen', 10, 1, NULL, NULL, NULL, NULL, 
'2020-01-05', 1, 34, 20)
+            """
+
+        sql """ INSERT INTO ${tableName} VALUES
+             (4, '2017-10-01', '2017-10-01', '2017-10-01 11:11:11.110000', 
'2017-10-01 11:11:11.110111', 'Shenzhen', 10, 1, NULL, NULL, NULL, NULL, 
'2020-01-05', 1, 34, 20)
+            """
+
+        sql """ sync """
+
+        qt_select_default """ SELECT * FROM ${tableName} t WHERE city MATCH 
'Beijing' ORDER BY 
user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time;
 """
+
+        
//TabletId,ReplicaId,BackendId,SchemaHash,Version,LstSuccessVersion,LstFailedVersion,LstFailedTime,LocalDataSize,RemoteDataSize,RowCount,State,LstConsistencyCheckTime,CheckVersion,VersionCount,QueryHits,PathHash,MetaUrl,CompactionStatus
+        def tablets = sql_return_maparray """ show tablets from ${tableName}; 
"""
+
+        // trigger compactions for all tablets in ${tableName}
+        for (def tablet in tablets) {
+            String tablet_id = tablet.TabletId
+            backend_id = tablet.BackendId
+            String ip = backendId_to_backendIP.get(backend_id)
+            String port = backendId_to_backendHttpPort.get(backend_id)
+            int segment_count = calc_segment_count(tablet)
+            logger.info("TabletId: " + tablet_id + ", segment_count: " + 
segment_count)
+            def (c, o, e) = calc_file_crc_on_tablet(ip, port, tablet_id)
+            logger.info("Run calc_file_crc_on_tablet: code=" + c + ", out=" + 
o + ", err=" + e)
+            assertTrue(c == 0)
+            assertTrue(o.contains("crc_value"))
+            assertTrue(o.contains("used_time_ms"))
+            assertEquals("0", parseJson(o.trim()).start_version)
+            assertEquals("9", parseJson(o.trim()).end_version)
+            assertEquals("9", parseJson(o.trim()).rowset_count)
+            int file_count = segment_count * 2
+            assertEquals(file_count, 
Integer.parseInt(parseJson(o.trim()).file_count))
+
+            StringBuilder sb = new StringBuilder();
+            sb.append("curl -X POST http://";)
+            sb.append(backendId_to_backendIP.get(backend_id))
+            sb.append(":")
+            sb.append(backendId_to_backendHttpPort.get(backend_id))
+            sb.append("/api/compaction/run?tablet_id=")
+            sb.append(tablet_id)
+            sb.append("&compact_type=cumulative")
+
+            String command = sb.toString()
+            process = command.execute()
+            code = process.waitFor()
+            err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+            out = process.getText()
+            logger.info("Run compaction: code=" + code + ", out=" + out + ", 
err=" + err)
+            assertEquals(code, 0)
+            def compactJson = parseJson(out.trim())
+            if (compactJson.status.toLowerCase() == "fail") {
+                assertEquals(disableAutoCompaction, false)
+                logger.info("Compaction was done automatically!")
+            }
+            if (disableAutoCompaction) {
+                assertEquals("success", compactJson.status.toLowerCase())
+            }
+        }
+
+        // wait for all compactions done
+        for (def tablet in tablets) {
+            boolean running = true
+            String tablet_id = tablet.TabletId
+            backend_id = tablet.BackendId
+            do {
+                Thread.sleep(1000)
+                StringBuilder sb = new StringBuilder();
+                sb.append("curl -X GET http://";)
+                sb.append(backendId_to_backendIP.get(backend_id))
+                sb.append(":")
+                sb.append(backendId_to_backendHttpPort.get(backend_id))
+                sb.append("/api/compaction/run_status?tablet_id=")
+                sb.append(tablet_id)
+
+                String command = sb.toString()
+                logger.info(command)
+                process = command.execute()
+                code = process.waitFor()
+                err = IOGroovyMethods.getText(new BufferedReader(new 
InputStreamReader(process.getErrorStream())));
+                out = process.getText()
+                logger.info("Get compaction status: code=" + code + ", out=" + 
out + ", err=" + err)
+                assertEquals(code, 0)
+                def compactionStatus = parseJson(out.trim())
+                assertEquals("success", compactionStatus.status.toLowerCase())
+                running = compactionStatus.run_status
+            } while (running)
+
+            String ip = backendId_to_backendIP.get(backend_id)
+            String port = backendId_to_backendHttpPort.get(backend_id)
+            int segment_count = calc_segment_count(tablet)
+            logger.info("TabletId: " + tablet_id + ", segment_count: " + 
segment_count)
+            def (c, o, e) = calc_file_crc_on_tablet(ip, port, tablet_id)
+            logger.info("Run calc_file_crc_on_tablet: code=" + c + ", out=" + 
o + ", err=" + e)
+            assertTrue(c == 0)
+            assertTrue(o.contains("crc_value"))
+            assertTrue(o.contains("used_time_ms"))
+            assertEquals("0", parseJson(o.trim()).start_version)
+            assertEquals("9", parseJson(o.trim()).end_version)
+            // after compaction, there are 2 rwosets.
+            assertEquals("2", parseJson(o.trim()).rowset_count)
+            int file_count = segment_count * 2
+            assertEquals(file_count, 
Integer.parseInt(parseJson(o.trim()).file_count))
+        }
+
+        int segmentsCount = 0
+        for (def tablet in tablets) {
+            segmentsCount += calc_segment_count(tablet)
+        }
+
+        def dedup_tablets = deduplicate_tablets(tablets)
+
+        // In the p0 testing environment, there are no expected operations 
such as scaling down BE (backend) services
+        // if tablets or dedup_tablets is empty, exception is thrown, and case 
fail
+        int replicaNum = Math.floor(tablets.size() / dedup_tablets.size())
+        if (replicaNum < 1)
+        {
+            assert(false);
+        }
+
+        assert (segmentsCount <= 8*replicaNum)
+        qt_select_default2 """ SELECT * FROM ${tableName} t WHERE city MATCH 
'Beijing' ORDER BY 
user_id,date,city,age,sex,last_visit_date,last_update_date,last_visit_date_not_null,cost,max_dwell_time,min_dwell_time;
 """
+    } finally {
+    }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to