This is an automated email from the ASF dual-hosted git repository.

dataroaring pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git


The following commit(s) were added to refs/heads/branch-3.0 by this push:
     new 21802294a81 branch-3.0: [Opt](partial update) Add some cases for 
partial update #47900 (#48129)
21802294a81 is described below

commit 21802294a81985dbb1b33b6951cd1e1bc4906aa0
Author: github-actions[bot] 
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Thu Feb 20 15:22:06 2025 +0800

    branch-3.0: [Opt](partial update) Add some cases for partial update #47900 
(#48129)
    
    Cherry-picked from #47900
    
    Co-authored-by: bobhan1 <bao...@selectdb.com>
---
 be/src/olap/base_tablet.cpp                        |   2 +
 .../test_partial_update_publish_seq.out            | Bin 0 -> 1714 bytes
 .../test_partial_update_default_value.out          | Bin 259 -> 1003 bytes
 .../test_partial_update_lookup_row_key.out         | Bin 0 -> 779 bytes
 .../test_partial_update_row_store.out              | Bin 0 -> 847 bytes
 .../test_partial_update_publish_seq.groovy         | 180 +++++++++++++++++++++
 .../test_partial_update_default_value.groovy       |  25 ++-
 .../test_partial_update_lookup_row_key.groovy      |  81 ++++++++++
 .../test_partial_update_row_store.groovy           | 103 ++++++++++++
 9 files changed, 390 insertions(+), 1 deletion(-)

diff --git a/be/src/olap/base_tablet.cpp b/be/src/olap/base_tablet.cpp
index 25398e84346..895899b9ca6 100644
--- a/be/src/olap/base_tablet.cpp
+++ b/be/src/olap/base_tablet.cpp
@@ -707,6 +707,8 @@ Status 
BaseTablet::calc_segment_delete_bitmap(RowsetSharedPtr rowset,
                                    row_id);
                 ++conflict_rows;
                 continue;
+                // NOTE: for partial update which doesn't specify the sequence 
column, we can't use the sequence column value filled in flush phase
+                // as its final value. Otherwise it may cause inconsistency 
between replicas.
             }
             if (is_partial_update && rowset_writer != nullptr) {
                 // In publish version, record rows to be deleted for 
concurrent update
diff --git 
a/regression-test/data/fault_injection_p0/partial_update/test_partial_update_publish_seq.out
 
b/regression-test/data/fault_injection_p0/partial_update/test_partial_update_publish_seq.out
new file mode 100644
index 00000000000..a7fa43e0ad9
Binary files /dev/null and 
b/regression-test/data/fault_injection_p0/partial_update/test_partial_update_publish_seq.out
 differ
diff --git 
a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_default_value.out
 
b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_default_value.out
index edd3326a752..231696c1d6e 100644
Binary files 
a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_default_value.out
 and 
b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_default_value.out
 differ
diff --git 
a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_lookup_row_key.out
 
b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_lookup_row_key.out
new file mode 100644
index 00000000000..2250bd3535a
Binary files /dev/null and 
b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_lookup_row_key.out
 differ
diff --git 
a/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_row_store.out
 
b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_row_store.out
new file mode 100644
index 00000000000..59b96bcbd7c
Binary files /dev/null and 
b/regression-test/data/unique_with_mow_p0/partial_update/test_partial_update_row_store.out
 differ
diff --git 
a/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_publish_seq.groovy
 
b/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_publish_seq.groovy
new file mode 100644
index 00000000000..19639998da4
--- /dev/null
+++ 
b/regression-test/suites/fault_injection_p0/partial_update/test_partial_update_publish_seq.groovy
@@ -0,0 +1,180 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.junit.Assert
+import java.util.concurrent.TimeUnit
+import org.awaitility.Awaitility
+
+suite("test_partial_update_publish_seq", "nonConcurrent") {
+
+    def enable_block_in_publish = {
+        if (isCloudMode()) {
+            
GetDebugPoint().enableDebugPointForAllFEs("CloudGlobalTransactionMgr.getDeleteBitmapUpdateLock.enable_spin_wait")
+            
GetDebugPoint().enableDebugPointForAllFEs("CloudGlobalTransactionMgr.getDeleteBitmapUpdateLock.block")
+        } else {
+            
GetDebugPoint().enableDebugPointForAllBEs("EnginePublishVersionTask::execute.enable_spin_wait")
+            
GetDebugPoint().enableDebugPointForAllBEs("EnginePublishVersionTask::execute.block")
+        }
+    }
+
+    def disable_block_in_publish = {
+        if (isCloudMode()) {
+            
GetDebugPoint().disableDebugPointForAllFEs("CloudGlobalTransactionMgr.getDeleteBitmapUpdateLock.enable_spin_wait")
+            
GetDebugPoint().disableDebugPointForAllFEs("CloudGlobalTransactionMgr.getDeleteBitmapUpdateLock.block")
+        } else {
+            
GetDebugPoint().disableDebugPointForAllBEs("EnginePublishVersionTask::execute.enable_spin_wait")
+            
GetDebugPoint().disableDebugPointForAllBEs("EnginePublishVersionTask::execute.block")
+        }
+    }
+
+    def inspect_rows = { sqlStr ->
+        sql "set skip_delete_sign=true;"
+        sql "set skip_delete_bitmap=true;"
+        sql "sync"
+        qt_inspect sqlStr
+        sql "set skip_delete_sign=false;"
+        sql "set skip_delete_bitmap=false;"
+        sql "sync"
+    }
+
+
+    try {
+        GetDebugPoint().clearDebugPointsForAllFEs()
+        GetDebugPoint().clearDebugPointsForAllBEs()
+
+        def table1 = "test_partial_update_publish_seq_map"
+        sql "DROP TABLE IF EXISTS ${table1} FORCE;"
+        sql """ CREATE TABLE IF NOT EXISTS ${table1} (
+                `k1` int NOT NULL,
+                `c1` int,
+                `c2` int,
+                `c3` int,
+                `c4` int
+                )UNIQUE KEY(k1)
+            DISTRIBUTED BY HASH(k1) BUCKETS 1
+            PROPERTIES (
+                "enable_mow_light_delete" = "false",
+                "disable_auto_compaction" = "true",
+                "function_column.sequence_col" = "c1",
+                "replication_num" = "1"); """
+
+        sql "insert into ${table1} values(1,1,1,1,1),(2,2,2,2,2),(3,3,3,3,3);"
+        sql "sync;"
+        qt_seq_map_0 "select * from ${table1} order by k1;"
+
+
+        // with seq map val, >/=/< conflicting seq val
+        enable_block_in_publish()
+        def t1 = Thread.start {
+            sql "set enable_unique_key_partial_update=true;"
+            sql "set enable_insert_strict=false;"
+            sql "sync;"
+            sql "insert into ${table1}(k1,c1,c2) 
values(1,10,99),(2,10,99),(3,10,99);"
+        }
+        Thread.sleep(500)
+        def t2 = Thread.start {
+            sql "set enable_unique_key_partial_update=true;"
+            sql "set enable_insert_strict=false;"
+            sql "sync;"
+            sql "insert into ${table1}(k1,c1,c3) 
values(1,20,88),(2,10,88),(3,5,88);"
+        }
+        Thread.sleep(1000)
+        disable_block_in_publish()
+        t1.join()
+        t2.join()
+        qt_seq_map_1 "select * from ${table1} order by k1;"
+        inspect_rows "select 
*,__DORIS_DELETE_SIGN__,__DORIS_SEQUENCE_COL__,__DORIS_VERSION_COL__ from 
${table1} order by k1,__DORIS_VERSION_COL__;"
+
+        // without seq map val, the filled seq val >/=/< conflicting seq val
+        enable_block_in_publish()
+        t1 = Thread.start {
+            sql "set enable_unique_key_partial_update=true;"
+            sql "set enable_insert_strict=false;"
+            sql "sync;"
+            sql "insert into ${table1}(k1,c1,c2) 
values(1,9,77),(2,10,77),(3,50,77);"
+        }
+        Thread.sleep(500)
+        t2 = Thread.start {
+            sql "set enable_unique_key_partial_update=true;"
+            sql "set enable_insert_strict=false;"
+            sql "sync;"
+            sql "insert into ${table1}(k1,c4) values(1,33),(2,33),(3,33);"
+        }
+        Thread.sleep(1000)
+        disable_block_in_publish()
+        t1.join()
+        t2.join()
+        qt_seq_map_2 "select * from ${table1} order by k1;"
+        inspect_rows "select 
*,__DORIS_DELETE_SIGN__,__DORIS_SEQUENCE_COL__,__DORIS_VERSION_COL__ from 
${table1} order by k1,__DORIS_VERSION_COL__;"
+
+        // with delete sign and seq col val, >/=/< conflicting seq val
+        enable_block_in_publish()
+        t1 = Thread.start {
+            sql "set enable_unique_key_partial_update=true;"
+            sql "set enable_insert_strict=false;"
+            sql "sync;"
+            sql "insert into ${table1}(k1,c1,c2) 
values(1,80,66),(2,100,66),(3,120,66);"
+        }
+        Thread.sleep(500)
+        t2 = Thread.start {
+            sql "set enable_unique_key_partial_update=true;"
+            sql "set enable_insert_strict=false;"
+            sql "sync;"
+            sql "insert into ${table1}(k1,c1,__DORIS_DELETE_SIGN__) 
values(1,100,1),(2,100,1),(3,100,1);"
+        }
+        Thread.sleep(1000)
+        disable_block_in_publish()
+        t1.join()
+        t2.join()
+        qt_seq_map_3 "select * from ${table1} order by k1;"
+        inspect_rows "select 
*,__DORIS_DELETE_SIGN__,__DORIS_SEQUENCE_COL__,__DORIS_VERSION_COL__ from 
${table1} order by k1,__DORIS_VERSION_COL__;"
+
+
+        sql "truncate table ${table1};"
+        sql "insert into ${table1} 
values(1,10,1,1,1),(2,10,2,2,2),(3,10,3,3,3);"
+        sql "sync;"
+        // with delete sign and without seq col val, >/=/< conflicting seq val
+        enable_block_in_publish()
+        t1 = Thread.start {
+            sql "set enable_unique_key_partial_update=true;"
+            sql "set enable_insert_strict=false;"
+            sql "sync;"
+            sql "insert into ${table1}(k1,c1,c2) 
values(1,20,55),(2,100,55),(3,120,55);"
+        }
+        Thread.sleep(500)
+        t2 = Thread.start {
+            sql "set enable_unique_key_partial_update=true;"
+            sql "set enable_insert_strict=false;"
+            sql "sync;"
+            sql "insert into ${table1}(k1,c4,__DORIS_DELETE_SIGN__) 
values(1,100,1),(2,100,1),(3,100,1);"
+        }
+        Thread.sleep(1000)
+        disable_block_in_publish()
+        t1.join()
+        t2.join()
+        qt_seq_map_4 "select * from ${table1} order by k1;"
+        inspect_rows "select 
*,__DORIS_DELETE_SIGN__,__DORIS_SEQUENCE_COL__,__DORIS_VERSION_COL__ from 
${table1} order by k1,__DORIS_VERSION_COL__;"
+
+
+    } catch(Exception e) {
+        logger.info(e.getMessage())
+        throw e
+    } finally {
+        GetDebugPoint().clearDebugPointsForAllFEs()
+        GetDebugPoint().clearDebugPointsForAllBEs()
+    }
+}
diff --git 
a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_default_value.groovy
 
b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_default_value.groovy
index 28d1d0ed42d..fb12c840280 100644
--- 
a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_default_value.groovy
+++ 
b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_default_value.groovy
@@ -68,8 +68,31 @@ suite("test_primary_key_partial_update_default_value", "p0") 
{
                 select * from ${tableName} order by id;
             """
 
-            // drop drop
+            // test special default values
+            tableName = "test_primary_key_partial_update_default_value2"
+            // create table
             sql """ DROP TABLE IF EXISTS ${tableName} """
+            sql """ CREATE TABLE ${tableName} (
+                    k int,
+                    c1 int,
+                    c2 bitmap NOT NULL DEFAULT bitmap_empty,
+                    c3 double DEFAULT PI,
+                    c4 double DEFAULT E,
+                    c5 array<int> NOT NULL DEFAULT "[]"
+                    ) UNIQUE KEY(`k`) DISTRIBUTED BY HASH(`k`) BUCKETS 1
+                    PROPERTIES("replication_num" = "1", 
"enable_unique_key_merge_on_write" = "true",
+                    "store_row_column" = "${use_row_store}"); """
+
+            sql "set enable_unique_key_partial_update=true;"
+            sql "set enable_insert_strict=false;"
+            sql "sync;"
+            sql "insert into ${tableName}(k,c1) values(1,1),(2,2),(3,3);"
+            sql "sync;"
+            qt_sql "select k,c1,bitmap_to_string(c2),c3,c4,ARRAY_SIZE(c5) from 
${tableName} order by k;"
+
+            sql "insert into ${tableName}(k,c1) 
values(1,10),(2,20),(4,40),(5,50);"
+            sql "sync;"
+            qt_sql "select k,c1,bitmap_to_string(c2),c3,c4,ARRAY_SIZE(c5) from 
${tableName} order by k;"
         }
     }
 }
diff --git 
a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_lookup_row_key.groovy
 
b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_lookup_row_key.groovy
new file mode 100644
index 00000000000..dfd8f43a6b3
--- /dev/null
+++ 
b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_lookup_row_key.groovy
@@ -0,0 +1,81 @@
+
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+suite("test_partial_update_lookup_row_key", "p0") {
+
+    String db = context.config.getDbNameByFile(context.file)
+    sql "select 1;" // to create database
+
+    for (def use_row_store : [false, true]) {
+        logger.info("current params: use_row_store: ${use_row_store}")
+
+        connect( context.config.jdbcUser, context.config.jdbcPassword, 
context.config.jdbcUrl) {
+            sql "use ${db};"
+            sql "sync;"
+
+            def tableName = "test_partial_update_publish_conflict_seq"
+            sql """ DROP TABLE IF EXISTS ${tableName} force;"""
+            sql """ CREATE TABLE ${tableName} (
+                `k` int(11) NULL, 
+                `v1` BIGINT NULL,
+                `v2` BIGINT NULL,
+                `v3` BIGINT NULL,
+                `v4` BIGINT NULL,
+                ) UNIQUE KEY(`k`) DISTRIBUTED BY HASH(`k`) BUCKETS 1
+                PROPERTIES(
+                "replication_num" = "1",
+                "enable_unique_key_merge_on_write" = "true",
+                "disable_auto_compaction" = "true",
+                "function_column.sequence_col" = "v1",
+                "store_row_column" = "${use_row_store}"); """
+
+            sql """ insert into ${tableName} values
+                    (1,400,1,1,1),(2,100,2,2,2),(3,30,3,3,3),(4,300,4,4,4);"""
+            sql """ insert into ${tableName} values
+                    (1,100,1,1,1),(2,400,2,2,2),(3,100,3,3,3),(4,200,4,4,4);"""
+            sql """ insert into ${tableName} values
+                    (1,200,1,1,1),(2,200,2,2,2),(3,300,3,3,3),(4,400,4,4,4);"""
+            sql """ insert into ${tableName} values
+                    (1,300,1,1,1),(2,300,2,2,2),(3,400,3,3,3),(4,100,4,4,4);"""
+            qt_1 "select * from ${tableName} order by k;"
+            // lookup_row_key will find key rowset with highest version to 
lowest version
+            // the index of valid segment for each key will be in the search 
seqeuence
+
+            sql "set enable_unique_key_partial_update=true;"
+            sql "set enable_insert_strict=false;"
+            sql "sync;"
+            sql "insert into ${tableName}(k,v2) 
values(1,99),(2,99),(3,99),(4,99),(5,99),(6,99);"
+            qt_1 "select *,__DORIS_SEQUENCE_COL__ from ${tableName} order by 
k;"
+
+
+            sql "truncate table ${tableName};"
+            sql """ insert into ${tableName} values
+                    (1,400,1,1,1),(2,100,2,2,2),(3,30,3,3,3),(4,300,4,4,4);"""
+            sql """ insert into ${tableName} values
+                    (1,100,1,1,1),(2,400,2,2,2),(3,100,3,3,3),(4,200,4,4,4);"""
+            sql """ insert into ${tableName} values
+                    (1,200,1,1,1),(2,200,2,2,2),(3,300,3,3,3),(4,400,4,4,4);"""
+            sql """ insert into ${tableName} values
+                    (1,300,1,1,1),(2,300,2,2,2),(3,400,3,3,3),(4,100,4,4,4);"""
+            qt_2 "select * from ${tableName} order by k;"
+
+            sql "insert into ${tableName}(k,v1,v3) 
values(1,500,88),(2,500,88),(3,300,88),(4,200,88),(5,200,88),(6,200,88);"
+            qt_2 "select *,__DORIS_SEQUENCE_COL__ from ${tableName} order by 
k;"
+        }
+    }
+}
diff --git 
a/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_row_store.groovy
 
b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_row_store.groovy
new file mode 100644
index 00000000000..30102f9064d
--- /dev/null
+++ 
b/regression-test/suites/unique_with_mow_p0/partial_update/test_partial_update_row_store.groovy
@@ -0,0 +1,103 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.junit.Assert
+import java.util.concurrent.TimeUnit
+import org.awaitility.Awaitility
+
+suite("test_partial_update_row_store", "nonConcurrent") {
+
+    def table1 = "test_partial_update_row_store"
+    sql "DROP TABLE IF EXISTS ${table1} FORCE;"
+    sql """ CREATE TABLE IF NOT EXISTS ${table1} (
+            `k1` int NOT NULL,
+            `c1` int,
+            `c2` int,
+            c3 int
+        )UNIQUE KEY(k1)
+        DISTRIBUTED BY HASH(k1) BUCKETS 1
+        PROPERTIES (
+            "enable_mow_light_delete" = "false",
+            "disable_auto_compaction" = "true",
+            "replication_num" = "1",
+            "store_row_column" = "false"); """
+
+    sql "insert into ${table1} values(1,1,1,1),(2,2,2,2),(3,3,3,3);"
+    sql "insert into ${table1} values(4,4,4,4),(5,5,5,5),(6,6,6,6);"
+    sql "sync;"
+    qt_1 "select * from ${table1} order by k1;"
+
+    def doSchemaChange = { cmd ->
+        sql cmd
+        waitForSchemaChangeDone {
+            sql """SHOW ALTER TABLE COLUMN WHERE IndexName='${table1}' ORDER 
BY createtime DESC LIMIT 1"""
+            time 2000
+        }
+    }
+
+    // turn on row_store_column, but only store part of columns
+    doSchemaChange """alter table ${table1} set ("store_row_column" = 
"true")"""
+    doSchemaChange """alter table ${table1} set ("row_store_columns" = 
"k1,c2")"""
+    sql "insert into ${table1} values(7,7,7,7),(8,8,8,8),(9,9,9,9);"
+
+    sql "set enable_unique_key_partial_update=true;"
+    sql "set enable_insert_strict=false;"
+    sql "sync;"
+    sql "insert into ${table1}(k1,c1,c2) 
values(1,10,10),(2,20,20),(5,50,50),(7,70,70),(100,100,100);"
+    qt_2 "select *, LENGTH(__DORIS_ROW_STORE_COL__) from ${table1} order by 
k1;"
+    sql "insert into ${table1}(k1,c3) 
values(1,99),(3,99),(6,99),(8,99),(200,200);"
+    qt_2 "select *, LENGTH(__DORIS_ROW_STORE_COL__) from ${table1} order by 
k1;"
+    sql "set enable_unique_key_partial_update=false;"
+    sql "set enable_insert_strict=true;"
+    sql "sync;"
+
+
+    sql "truncate table ${table1};"
+    sql "insert into ${table1} values(1,1,1,1),(2,2,2,2),(3,3,3,3);"
+    sql "insert into ${table1} values(4,4,4,4),(5,5,5,5),(6,6,6,6);"
+    sql "sync;"
+    qt_3 "select * from ${table1} order by k1;"
+
+
+    // turn on full row store column
+    doSchemaChange """alter table ${table1} set ("store_row_column" = 
"true")"""
+    sql "insert into ${table1} values(7,7,7,7),(8,8,8,8),(9,9,9,9);"
+
+    sql "set enable_unique_key_partial_update=true;"
+    sql "set enable_insert_strict=false;"
+    sql "sync;"
+    sql "insert into ${table1}(k1,c2) 
values(2,777),(3,777),(10,777),(21,777),(8,777);"
+    qt_4 "select *,LENGTH(__DORIS_ROW_STORE_COL__) from ${table1} order by k1;"
+    sql "set enable_unique_key_partial_update=false;"
+    sql "set enable_insert_strict=true;"
+    sql "sync;"
+
+    // from row store to part columns row store
+    doSchemaChange """alter table ${table1} set ("row_store_columns" = 
"k1,c2")"""
+    sql "insert into ${table1} values(11,11,11,11),(20,20,20,20);"
+
+    sql "set enable_unique_key_partial_update=true;"
+    sql "set enable_insert_strict=false;"
+    sql "sync;"
+    sql "insert into ${table1}(k1,c3) values(1,987),(2,987),(11,987),(22,987);"
+    qt_4 "select *,LENGTH(__DORIS_ROW_STORE_COL__) from ${table1} order by k1;"
+    sql "set enable_unique_key_partial_update=false;"
+    sql "set enable_insert_strict=true;"
+    sql "sync;"
+
+    // Can not alter store_row_column from true to false currently, should add 
related case if supported
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to