This is an automated email from the ASF dual-hosted git repository.
dataroaring pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-2.0 by this push:
new 4b8554535da [branch-2.0] Picks "[Fix](schema change) Fix can't do
reorder column schema change for MOW table and duplicate key table #37067"
(#37515)
4b8554535da is described below
commit 4b8554535da7d2b41be91e2b9d8ccf20301d4e6c
Author: bobhan1 <[email protected]>
AuthorDate: Sat Jul 13 22:07:46 2024 +0800
[branch-2.0] Picks "[Fix](schema change) Fix can't do reorder column schema
change for MOW table and duplicate key table #37067" (#37515)
## Proposed changes
picks https://github.com/apache/doris/pull/37067
---
.../apache/doris/alter/SchemaChangeHandler.java | 4 +-
.../org/apache/doris/analysis/CreateTableStmt.java | 3 +-
.../test_schema_change_datev2_with_delete.out | 81 ----------
.../datev2/test_dup_keys_schema_change_datev2.out | 70 ---------
.../test_modify_reorder_column.out | 22 +++
.../test_schema_change_datev2_with_delete.groovy | 120 ---------------
.../test_dup_keys_schema_change_datev2.groovy | 167 ---------------------
.../test_modify_reorder_column.groovy | 131 ++++++++++++++++
8 files changed, 157 insertions(+), 441 deletions(-)
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
index dae99f40a36..b526b217f34 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/alter/SchemaChangeHandler.java
@@ -541,7 +541,7 @@ public class SchemaChangeHandler extends AlterHandler {
}
if (!modColumn.isKey()) {
if (olapTable.getEnableUniqueKeyMergeOnWrite()) {
- modColumn.setAggregationType(AggregateType.NONE, false);
+ modColumn.setAggregationType(AggregateType.NONE, true);
} else {
modColumn.setAggregationType(AggregateType.REPLACE, true);
}
@@ -928,7 +928,7 @@ public class SchemaChangeHandler extends AlterHandler {
}
if (!newColumn.isKey()) {
if (olapTable.getEnableUniqueKeyMergeOnWrite()) {
- newColumn.setAggregationType(AggregateType.NONE, false);
+ newColumn.setAggregationType(AggregateType.NONE, true);
} else {
newColumn.setAggregationType(AggregateType.REPLACE, true);
}
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java
b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java
index 22f55598856..b2120a3133c 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/analysis/CreateTableStmt.java
@@ -574,7 +574,8 @@ public class CreateTableStmt extends DdlStmt {
for (ColumnDef columnDef : columnDefs) {
Column col = columnDef.toColumn();
- if (keysDesc != null && keysDesc.getKeysType() ==
KeysType.UNIQUE_KEYS) {
+ if (keysDesc != null && (keysDesc.getKeysType() ==
KeysType.UNIQUE_KEYS
+ || keysDesc.getKeysType() == KeysType.DUP_KEYS)) {
if (!col.isKey()) {
col.setAggregationTypeImplicit(true);
}
diff --git
a/regression-test/data/schema_change/test_schema_change_datev2_with_delete.out
b/regression-test/data/schema_change/test_schema_change_datev2_with_delete.out
deleted file mode 100644
index 2ef72df7c71..00000000000
---
a/regression-test/data/schema_change/test_schema_change_datev2_with_delete.out
+++ /dev/null
@@ -1,81 +0,0 @@
--- This file is automatically generated. You should know what you did if you
want to edit this
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
diff --git
a/regression-test/data/schema_change_p0/datev2/test_dup_keys_schema_change_datev2.out
b/regression-test/data/schema_change_p0/datev2/test_dup_keys_schema_change_datev2.out
deleted file mode 100644
index de1946b723f..00000000000
---
a/regression-test/data/schema_change_p0/datev2/test_dup_keys_schema_change_datev2.out
+++ /dev/null
@@ -1,70 +0,0 @@
--- This file is automatically generated. You should know what you did if you
want to edit this
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
--- !sql --
-
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
--- !sql --
-
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
--- !sql --
-2022-01-01 2022-01-06T11:11:11 2022-01-06 2022-01-06T11:11:11
-2022-01-02 2022-01-02T11:11:11 2022-01-02 2022-01-02T11:11:11
-2022-01-03 2022-01-03T11:11:11 2022-01-03 2022-01-01T11:11:11
-2022-01-04 2022-01-04T11:11:11 2022-01-01 2022-01-04T11:11:11
-2022-01-05 2022-01-01T11:11:11 2022-01-05 2022-01-05T11:11:11
-
--- !sql --
-
diff --git
a/regression-test/data/schema_change_p0/test_modify_reorder_column.out
b/regression-test/data/schema_change_p0/test_modify_reorder_column.out
new file mode 100644
index 00000000000..3f951b0640d
--- /dev/null
+++ b/regression-test/data/schema_change_p0/test_modify_reorder_column.out
@@ -0,0 +1,22 @@
+-- This file is automatically generated. You should know what you did if you
want to edit this
+-- !dup --
+1 {"f1": "A", "f2": "B", "f3": 10, "f4": 3.14} {"f1": "C", "f2": "D",
"f3": 20, "f4": 8.343} 10
+
+-- !dup --
+1 {"f1": "A", "f2": "B", "f3": 10, "f4": 3.14} 10 {"f1": "C",
"f2": "D", "f3": 20, "f4": 8.343}
+2 {"f1": "E", "f2": "F", "f3": 30, "f4": 484.3234} 20 \N
+
+-- !mor --
+1 {"f1": "A", "f2": "B", "f3": 10, "f4": 3.14} {"f1": "C", "f2": "D",
"f3": 20, "f4": 8.343} 10
+
+-- !mor --
+1 {"f1": "A", "f2": "B", "f3": 10, "f4": 3.14} 10 {"f1": "C",
"f2": "D", "f3": 20, "f4": 8.343}
+2 {"f1": "E", "f2": "F", "f3": 30, "f4": 484.3234} 20 \N
+
+-- !mow --
+1 {"f1": "A", "f2": "B", "f3": 10, "f4": 3.14} {"f1": "C", "f2": "D",
"f3": 20, "f4": 8.343} 10
+
+-- !mow --
+1 {"f1": "A", "f2": "B", "f3": 10, "f4": 3.14} 10 {"f1": "C",
"f2": "D", "f3": 20, "f4": 8.343}
+2 {"f1": "E", "f2": "F", "f3": 30, "f4": 484.3234} 20 \N
+
diff --git
a/regression-test/suites/schema_change/test_schema_change_datev2_with_delete.groovy
b/regression-test/suites/schema_change/test_schema_change_datev2_with_delete.groovy
deleted file mode 100644
index ca2cb666dbb..00000000000
---
a/regression-test/suites/schema_change/test_schema_change_datev2_with_delete.groovy
+++ /dev/null
@@ -1,120 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-suite("test_schema_change_datev2_with_delete") {
- def tbName = "test_schema_change_datev2_with_delete"
- def getJobState = { tableName ->
- def jobStateResult = sql """ SHOW ALTER TABLE COLUMN WHERE
IndexName='${tableName}' ORDER BY createtime DESC LIMIT 1 """
- return jobStateResult[0][9]
- }
-
- sql """ DROP TABLE IF EXISTS ${tbName} FORCE"""
- // Create table and disable light weight schema change
- sql """
- CREATE TABLE IF NOT EXISTS ${tbName}
- (
- `datek1` date DEFAULT '2022-01-01',
- `datek2` datetime DEFAULT '2022-01-01 11:11:11',
- `datev1` date DEFAULT '2022-01-01',
- `datev2` datetime DEFAULT '2022-01-01 11:11:11'
- )
- DUPLICATE KEY(`datek1`,`datek2`)
- DISTRIBUTED BY HASH(`datek1`) BUCKETS 1
- PROPERTIES("replication_num" = "1", "light_schema_change" =
"false");
- """
- sql """ insert into ${tbName} values('2022-01-02', '2022-01-02 11:11:11',
'2022-01-02', '2022-01-02 11:11:11');"""
- sql """ insert into ${tbName} (`datek1`, `datek2`, `datev1`)
values('2022-01-03', '2022-01-03 11:11:11', '2022-01-03');"""
- sql """ insert into ${tbName} (`datek1`, `datek2`, `datev2`)
values('2022-01-04', '2022-01-04 11:11:11', '2022-01-04 11:11:11');"""
- sql """ insert into ${tbName} (`datek1`, `datev1`, `datev2`)
values('2022-01-05', '2022-01-05', '2022-01-05 11:11:11');"""
- sql """ insert into ${tbName} (`datek2`, `datev1`, `datev2`)
values('2022-01-06 11:11:11', '2022-01-06', '2022-01-06 11:11:11');"""
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
-
- sql """ alter table ${tbName} modify column `datev1` datev2 DEFAULT
'2022-01-01' """
- int max_try_time = 1000
- while(max_try_time--){
- String result = getJobState(tbName)
- if (result == "FINISHED") {
- sleep(3000)
- break
- } else {
- sleep(100)
- if (max_try_time < 1){
- assertEquals(1,2)
- }
- }
- }
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
-
- sql """ alter table ${tbName} modify column `datev2` datetimev2 DEFAULT
'2022-01-01 11:11:11' """
- max_try_time = 1000
- while(max_try_time--){
- String result = getJobState(tbName)
- if (result == "FINISHED") {
- sleep(3000)
- break
- } else {
- sleep(100)
- if (max_try_time < 1){
- assertEquals(1,2)
- }
- }
- }
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
-
- sql """ alter table ${tbName} modify column `datev2` datetimev2(3)
DEFAULT '2022-01-01 11:11:11' """
- max_try_time = 1000
- while(max_try_time--){
- String result = getJobState(tbName)
- if (result == "FINISHED") {
- sleep(3000)
- break
- } else {
- sleep(100)
- if (max_try_time < 1){
- assertEquals(1,2)
- }
- }
- }
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
-
- sql """ delete from ${tbName} where `datev1`='2022-01-02';"""
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
-
- sql """ delete from ${tbName} where `datev2`='2022-01-04 11:11:11.111';"""
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
-
- sql """ delete from ${tbName} where `datev2`='2022-01-04 11:11:11';"""
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
-
- sql """ insert into ${tbName} values('2022-01-02', '2022-01-02 11:11:11',
'2022-01-02', '2022-01-02 11:11:11');"""
- sql """ insert into ${tbName} (`datek1`, `datek2`, `datev1`)
values('2022-01-03', '2022-01-03 11:11:11', '2022-01-03');"""
- sql """ insert into ${tbName} (`datek1`, `datek2`, `datev2`)
values('2022-01-04', '2022-01-04 11:11:11', '2022-01-04 11:11:11');"""
- sql """ insert into ${tbName} (`datek1`, `datev1`, `datev2`)
values('2022-01-05', '2022-01-05', '2022-01-05 11:11:11');"""
- sql """ insert into ${tbName} (`datek2`, `datev1`, `datev2`)
values('2022-01-06 11:11:11', '2022-01-06', '2022-01-06 11:11:11');"""
-
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
- sql """ delete from ${tbName} where `datev1`='2022-01-01';"""
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
-
- sql """ delete from ${tbName} where `datev2`='2022-01-01 11:11:11.111';"""
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
-
- sql """ delete from ${tbName} where `datev2`='2022-01-01 11:11:11';"""
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
-
- sql """ DROP TABLE ${tbName} force"""
-}
diff --git
a/regression-test/suites/schema_change_p0/datev2/test_dup_keys_schema_change_datev2.groovy
b/regression-test/suites/schema_change_p0/datev2/test_dup_keys_schema_change_datev2.groovy
deleted file mode 100644
index cb50fc85b09..00000000000
---
a/regression-test/suites/schema_change_p0/datev2/test_dup_keys_schema_change_datev2.groovy
+++ /dev/null
@@ -1,167 +0,0 @@
-// Licensed to the Apache Software Foundation (ASF) under one
-// or more contributor license agreements. See the NOTICE file
-// distributed with this work for additional information
-// regarding copyright ownership. The ASF licenses this file
-// to you under the Apache License, Version 2.0 (the
-// "License"); you may not use this file except in compliance
-// with the License. You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing,
-// software distributed under the License is distributed on an
-// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-// KIND, either express or implied. See the License for the
-// specific language governing permissions and limitations
-// under the License.
-
-import org.codehaus.groovy.runtime.IOGroovyMethods
-
-suite("test_dup_keys_schema_change_datev2") {
- def tbName = "test_dup_keys_schema_change_datev2"
- def getJobState = { tableName ->
- def jobStateResult = sql """ SHOW ALTER TABLE COLUMN WHERE
IndexName='${tableName}' ORDER BY createtime DESC LIMIT 1 """
- return jobStateResult[0][9]
- }
-
- String backend_id;
- def backendId_to_backendIP = [:]
- def backendId_to_backendHttpPort = [:]
- getBackendIpHttpPort(backendId_to_backendIP, backendId_to_backendHttpPort);
-
- backend_id = backendId_to_backendIP.keySet()[0]
- def (code, out, err) =
show_be_config(backendId_to_backendIP.get(backend_id),
backendId_to_backendHttpPort.get(backend_id))
-
- logger.info("Show config: code=" + code + ", out=" + out + ", err=" + err)
- assertEquals(code, 0)
- def configList = parseJson(out.trim())
- assert configList instanceof List
-
- def do_compact = { tableName ->
- String[][] tablets = sql """ show tablets from ${tableName}; """
- for (String[] tablet in tablets) {
- String tablet_id = tablet[0]
- backend_id = tablet[2]
- logger.info("run compaction:" + tablet_id)
- (code, out, err) =
be_run_cumulative_compaction(backendId_to_backendIP.get(backend_id),
backendId_to_backendHttpPort.get(backend_id), tablet_id)
- logger.info("Run compaction: code=" + code + ", out=" + out + ",
err=" + err)
- }
-
- // wait for all compactions done
- for (String[] tablet in tablets) {
- boolean running = true
- do {
- Thread.sleep(100)
- String tablet_id = tablet[0]
- backend_id = tablet[2]
- (code, out, err) =
be_get_compaction_status(backendId_to_backendIP.get(backend_id),
backendId_to_backendHttpPort.get(backend_id), tablet_id)
- logger.info("Get compaction status: code=" + code + ", out=" +
out + ", err=" + err)
- assertEquals(code, 0)
- def compactionStatus = parseJson(out.trim())
- assertEquals("success", compactionStatus.status.toLowerCase())
- running = compactionStatus.run_status
- } while (running)
- }
- }
-
- sql """ DROP TABLE IF EXISTS ${tbName} FORCE"""
- // Create table and disable light weight schema change
- sql """
- CREATE TABLE IF NOT EXISTS ${tbName}
- (
- `datek1` date DEFAULT '2022-01-01',
- `datek2` datetime DEFAULT '2022-01-01 11:11:11',
- `datev1` date DEFAULT '2022-01-01',
- `datev2` datetime DEFAULT '2022-01-01 11:11:11'
- )
- DUPLICATE KEY(`datek1`,`datek2`)
- DISTRIBUTED BY HASH(`datek1`) BUCKETS 1
- PROPERTIES("replication_num" = "1", "light_schema_change" =
"false");
- """
- // datev2
- sql """ insert into ${tbName} values('2022-01-02', '2022-01-02 11:11:11',
'2022-01-02', '2022-01-02 11:11:11');"""
- sql """ insert into ${tbName} (`datek1`, `datek2`, `datev1`)
values('2022-01-03', '2022-01-03 11:11:11', '2022-01-03');"""
- sql """ insert into ${tbName} (`datek1`, `datek2`, `datev2`)
values('2022-01-04', '2022-01-04 11:11:11', '2022-01-04 11:11:11');"""
- sql """ insert into ${tbName} (`datek1`, `datev1`, `datev2`)
values('2022-01-05', '2022-01-05', '2022-01-05 11:11:11');"""
- sql """ insert into ${tbName} (`datek2`, `datev1`, `datev2`)
values('2022-01-06 11:11:11', '2022-01-06', '2022-01-06 11:11:11');"""
-
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
-
- sql """ alter table ${tbName} modify column `datev1` datev2 DEFAULT
'2022-01-01' """
- int max_try_time = 1000
- while (max_try_time--){
- String result = getJobState(tbName)
- if (result == "FINISHED") {
- sleep(3000)
- break
- } else {
- sleep(100)
- if (max_try_time < 1){
- assertEquals(1,2)
- }
- }
- }
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
- do_compact(tbName)
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
- sql """delete from ${tbName} where `datev1` <= '2022-01-06';"""
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
-
- // datetimev2(0)
- sql """ insert into ${tbName} values('2022-01-02', '2022-01-02 11:11:11',
'2022-01-02', '2022-01-02 11:11:11');"""
- sql """ insert into ${tbName} (`datek1`, `datek2`, `datev1`)
values('2022-01-03', '2022-01-03 11:11:11', '2022-01-03');"""
- sql """ insert into ${tbName} (`datek1`, `datek2`, `datev2`)
values('2022-01-04', '2022-01-04 11:11:11', '2022-01-04 11:11:11');"""
- sql """ insert into ${tbName} (`datek1`, `datev1`, `datev2`)
values('2022-01-05', '2022-01-05', '2022-01-05 11:11:11');"""
- sql """ insert into ${tbName} (`datek2`, `datev1`, `datev2`)
values('2022-01-06 11:11:11', '2022-01-06', '2022-01-06 11:11:11');"""
-
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
- sql """ alter table ${tbName} modify column `datev2` datetimev2 DEFAULT
'2022-01-01 11:11:11' """
- max_try_time = 1000
- while (max_try_time--){
- String result = getJobState(tbName)
- if (result == "FINISHED") {
- sleep(3000)
- break
- } else {
- sleep(100)
- if (max_try_time < 1){
- assertEquals(1,2)
- }
- }
- }
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
- do_compact(tbName)
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
- sql """delete from ${tbName} where `datev2` <= '2022-01-06 11:11:11';"""
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
-
- // datetimev2(3)
- sql """ insert into ${tbName} values('2022-01-02', '2022-01-02 11:11:11',
'2022-01-02', '2022-01-02 11:11:11');"""
- sql """ insert into ${tbName} (`datek1`, `datek2`, `datev1`)
values('2022-01-03', '2022-01-03 11:11:11', '2022-01-03');"""
- sql """ insert into ${tbName} (`datek1`, `datek2`, `datev2`)
values('2022-01-04', '2022-01-04 11:11:11', '2022-01-04 11:11:11');"""
- sql """ insert into ${tbName} (`datek1`, `datev1`, `datev2`)
values('2022-01-05', '2022-01-05', '2022-01-05 11:11:11');"""
- sql """ insert into ${tbName} (`datek2`, `datev1`, `datev2`)
values('2022-01-06 11:11:11', '2022-01-06', '2022-01-06 11:11:11');"""
-
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
- sql """ alter table ${tbName} modify column `datev2` datetimev2(3)
DEFAULT '2022-01-01 11:11:11' """
- max_try_time = 1000
- while (max_try_time--){
- String result = getJobState(tbName)
- if (result == "FINISHED") {
- sleep(3000)
- break
- } else {
- sleep(100)
- if (max_try_time < 1){
- assertEquals(1,2)
- }
- }
- }
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
- do_compact(tbName)
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
- sql """delete from ${tbName} where `datev2` < '2022-01-06 11:11:11.111';"""
- qt_sql """select * from ${tbName} ORDER BY `datek1`;"""
-
- sql """ DROP TABLE ${tbName} force"""
-}
diff --git
a/regression-test/suites/schema_change_p0/test_modify_reorder_column.groovy
b/regression-test/suites/schema_change_p0/test_modify_reorder_column.groovy
new file mode 100644
index 00000000000..8a9d5d1779e
--- /dev/null
+++ b/regression-test/suites/schema_change_p0/test_modify_reorder_column.groovy
@@ -0,0 +1,131 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+import org.codehaus.groovy.runtime.IOGroovyMethods
+
+suite ("test_modify_reorder_column") {
+
+ def getJobState = { tableName ->
+ def jobStateResult = sql """ SHOW ALTER TABLE COLUMN WHERE
IndexName='${tableName}' ORDER BY createtime DESC LIMIT 1 """
+ return jobStateResult[0][9]
+ }
+
+ // test reorder column on duplicate table
+ def tbl1 = "test_modify_reorder_column_dup"
+ sql """ DROP TABLE IF EXISTS ${tbl1}; """
+ sql """ create table ${tbl1} (
+ k1 TINYINT,
+ colnotnull STRUCT<f1: varchar(65533), f2: char(32), f3: int, f4:
double> NOT NULL,
+ colnull STRUCT<f1: varchar(65533), f2: char(32), f3: int, f4:
double> NULL,
+ v int
+ ) duplicate key(k1) distributed by hash(k1) buckets 1
+ properties( "replication_num" = "1" ); """
+
+ sql """insert into ${tbl1} values
+ (1, {"A", "B", 10, 3.14}, {"C", "D", 20, 8.343}, 10) """
+ qt_dup """ select * from ${tbl1} order by k1;"""
+
+ sql "ALTER TABLE ${tbl1} MODIFY COLUMN colnull STRUCT<f1: varchar(65533),
f2: char(32), f3: int, f4: double> NULL AFTER v"
+
+
+ int max_try_time = 100
+ while (max_try_time--){
+ String result = getJobState("${tbl1}")
+ if (result == "FINISHED") {
+ sleep(1000)
+ break
+ } else {
+ sleep(1000)
+ assertTrue(max_try_time>1)
+ }
+ }
+
+ sql """insert into ${tbl1} values
+ (2, {"E", "F", 30, 484.3234}, 20, null) """
+ qt_dup """ select * from ${tbl1} order by k1;"""
+ sql """DROP TABLE IF EXISTS ${tbl1} FORCE; """
+
+
+ // test reorder column on MOR table
+ def tbl2 = "test_modify_reorder_column_mor"
+ sql """ DROP TABLE IF EXISTS ${tbl2}; """
+ sql """ create table ${tbl2} (
+ k1 TINYINT,
+ colnotnull STRUCT<f1: varchar(65533), f2: char(32), f3: int, f4:
double> NOT NULL,
+ colnull STRUCT<f1: varchar(65533), f2: char(32), f3: int, f4:
double> NULL,
+ v int
+ ) unique key(k1) distributed by hash(k1) buckets 1
+ properties( "replication_num" = "1",
"enable_unique_key_merge_on_write" = "false" ); """
+
+ sql """insert into ${tbl2} values
+ (1, {"A", "B", 10, 3.14}, {"C", "D", 20, 8.343}, 10) """
+ qt_mor """ select * from ${tbl2} order by k1;"""
+
+ sql "ALTER TABLE ${tbl2} MODIFY COLUMN colnull STRUCT<f1: varchar(65533),
f2: char(32), f3: int, f4: double> NULL AFTER v"
+
+ max_try_time = 100
+ while (max_try_time--){
+ String result = getJobState("${tbl2}")
+ if (result == "FINISHED") {
+ sleep(1000)
+ break
+ } else {
+ sleep(1000)
+ assertTrue(max_try_time>1)
+ }
+ }
+
+ sql """insert into ${tbl2} values
+ (2, {"E", "F", 30, 484.3234}, 20, null) """
+ qt_mor """ select * from ${tbl2} order by k1;"""
+ sql """DROP TABLE IF EXISTS ${tbl2} FORCE; """
+
+
+ // test reorder column on MOW table
+ def tbl3 = "test_modify_reorder_column_mow"
+ sql """ DROP TABLE IF EXISTS ${tbl3}; """
+ sql """ create table ${tbl3} (
+ k1 TINYINT,
+ colnotnull STRUCT<f1: varchar(65533), f2: char(32), f3: int, f4:
double> NOT NULL,
+ colnull STRUCT<f1: varchar(65533), f2: char(32), f3: int, f4:
double> NULL,
+ v int
+ ) unique key(k1) distributed by hash(k1) buckets 1
+ properties( "replication_num" = "1",
"enable_unique_key_merge_on_write" = "false" ); """
+
+ sql """insert into ${tbl3} values
+ (1, {"A", "B", 10, 3.14}, {"C", "D", 20, 8.343}, 10) """
+ qt_mow """ select * from ${tbl3} order by k1;"""
+
+ sql "ALTER TABLE ${tbl3} MODIFY COLUMN colnull STRUCT<f1: varchar(65533),
f2: char(32), f3: int, f4: double> NULL AFTER v"
+
+ max_try_time = 100
+ while (max_try_time--){
+ String result = getJobState("${tbl3}")
+ if (result == "FINISHED") {
+ sleep(1000)
+ break
+ } else {
+ sleep(1000)
+ assertTrue(max_try_time>1)
+ }
+ }
+
+ sql """insert into ${tbl3} values
+ (2, {"E", "F", 30, 484.3234}, 20, null) """
+ qt_mow """ select * from ${tbl3} order by k1;"""
+ sql """DROP TABLE IF EXISTS ${tbl3} FORCE; """
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]