This is an automated email from the ASF dual-hosted git repository.
dataroaring pushed a commit to branch branch-3.0
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-3.0 by this push:
new 9498f95ac31 branch-3.0: [fix](regression) fix the failed of
cold_heat_separation_p2 #49625 (#50065)
9498f95ac31 is described below
commit 9498f95ac318ad63945fd10cf674754f5d5b36bf
Author: github-actions[bot]
<41898282+github-actions[bot]@users.noreply.github.com>
AuthorDate: Wed Apr 16 11:24:50 2025 +0800
branch-3.0: [fix](regression) fix the failed of cold_heat_separation_p2
#49625 (#50065)
Cherry-picked from #49625
Co-authored-by: yagagagaga <[email protected]>
---
.../add_drop_partition.groovy | 41 ++++++---------
.../add_drop_partition_by_hdfs.groovy | 60 +++++++++-------------
.../create_table_use_dynamic_partition.groovy | 38 ++++++++------
...eate_table_use_dynamic_partition_by_hdfs.groovy | 55 ++++++++++----------
.../create_table_use_partition_policy.groovy | 17 +++---
...reate_table_use_partition_policy_by_hdfs.groovy | 25 ++++-----
.../create_table_use_policy.groovy | 17 +++---
.../create_table_use_policy_by_hdfs.groovy | 27 +++++-----
.../load_colddata_to_hdfs.groovy | 22 ++++----
.../modify_replica_use_partition.groovy | 48 ++++++++++++-----
.../modify_replica_use_partition_by_hdfs.groovy | 57 +++++++++++++-------
.../table_modify_resouce_and_policy.groovy | 15 +++---
.../table_modify_resouce_and_policy_by_hdfs.groovy | 23 ++++-----
.../test_show_storage_policy_using.groovy | 2 +-
14 files changed, 240 insertions(+), 207 deletions(-)
diff --git
a/regression-test/suites/cold_heat_separation_p2/add_drop_partition.groovy
b/regression-test/suites/cold_heat_separation_p2/add_drop_partition.groovy
index f53d5c27b36..2250607465e 100644
--- a/regression-test/suites/cold_heat_separation_p2/add_drop_partition.groovy
+++ b/regression-test/suites/cold_heat_separation_p2/add_drop_partition.groovy
@@ -48,7 +48,8 @@ suite("add_drop_partition") {
}
// used as passing out parameter to fetchDataSize
List<Long> sizes = [-1, -1]
- def tableName = "tbl1"
+ def suffix = UUID.randomUUID().hashCode().abs()
+ def tableName = "tbl1${suffix}"
sql """ DROP TABLE IF EXISTS ${tableName} """
def check_storage_policy_exist = { name->
@@ -63,8 +64,8 @@ suite("add_drop_partition") {
return false;
}
- def resource_name = "test_add_drop_partition_resource"
- def policy_name= "test_add_drop_partition_policy"
+ def resource_name = "test_add_drop_partition_resource${suffix}"
+ def policy_name= "test_add_drop_partition_policy${suffix}"
if (check_storage_policy_exist(policy_name)) {
sql """
@@ -159,7 +160,8 @@ suite("add_drop_partition") {
"""
log.info( "test tablets not empty")
fetchDataSize(sizes, tablets[0])
- while (sizes[1] == 0) {
+ def retry = 100
+ while (sizes[1] == 0 && retry --> 0) {
log.info( "test remote size is zero, sleep 10s")
sleep(10000)
tablets = sql_return_maparray """
@@ -167,6 +169,7 @@ suite("add_drop_partition") {
"""
fetchDataSize(sizes, tablets[0])
}
+ assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error
occurred")
assertTrue(tablets.size() > 0)
LocalDataSize1 = sizes[0]
RemoteDataSize1 = sizes[1]
@@ -193,16 +196,14 @@ suite("add_drop_partition") {
assertTrue(par[12] == "${policy_name}")
}
- try_sql """
- drop storage policy add_policy;
- """
+ def add_resource = "add_resource${suffix}"
try_sql """
- drop resource add_resource;
+ drop resource ${add_resource};
"""
sql """
- CREATE RESOURCE IF NOT EXISTS "add_resource"
+ CREATE RESOURCE IF NOT EXISTS "${add_resource}"
PROPERTIES(
"type"="s3",
"AWS_ENDPOINT" = "${getS3Endpoint()}",
@@ -218,11 +219,6 @@ suite("add_drop_partition") {
);
"""
- try_sql """
- create storage policy tmp_policy
- PROPERTIES( "storage_resource" = "add_resource", "cooldown_ttl" = "300");
- """
-
// can not set to one policy with different resource
try {
sql """alter table ${tableName} set ("storage_policy" =
"add_policy");"""
@@ -230,22 +226,23 @@ suite("add_drop_partition") {
assertTrue(true)
}
+ def add_policy1 = "add_policy1${suffix}"
sql """
- CREATE STORAGE POLICY IF NOT EXISTS add_policy1
+ CREATE STORAGE POLICY IF NOT EXISTS ${add_policy1}
PROPERTIES(
"storage_resource" = "${resource_name}",
"cooldown_ttl" = "60"
)
"""
- sql """alter table ${tableName} set ("storage_policy" = "add_policy1");"""
+ sql """alter table ${tableName} set ("storage_policy" =
"${add_policy1}");"""
// wait for report
sleep(300000)
partitions = sql "show partitions from ${tableName}"
for (par in partitions) {
- assertTrue(par[12] == "add_policy1")
+ assertTrue(par[12] == "${add_policy1}")
}
@@ -260,7 +257,7 @@ suite("add_drop_partition") {
partitions = sql "show partitions from ${tableName}"
for (par in partitions) {
- assertTrue(par[12] == "add_policy1")
+ assertTrue(par[12] == "${add_policy1}")
}
sql """
@@ -271,16 +268,12 @@ suite("add_drop_partition") {
DROP TABLE ${tableName}
"""
- try_sql """
- drop storage policy add_policy;
- """
-
sql """
- drop storage policy add_policy1;
+ drop storage policy ${add_policy1};
"""
sql """
- drop resource add_resource;
+ drop resource ${add_resource};
"""
diff --git
a/regression-test/suites/cold_heat_separation_p2/add_drop_partition_by_hdfs.groovy
b/regression-test/suites/cold_heat_separation_p2/add_drop_partition_by_hdfs.groovy
index f42ddc22503..721c12d7dd4 100644
---
a/regression-test/suites/cold_heat_separation_p2/add_drop_partition_by_hdfs.groovy
+++
b/regression-test/suites/cold_heat_separation_p2/add_drop_partition_by_hdfs.groovy
@@ -19,6 +19,9 @@ import org.codehaus.groovy.runtime.IOGroovyMethods
import java.time.LocalDate;
suite("add_drop_partition_by_hdfs") {
+ if (!enableHdfs()) {
+ logger.info("skip this case because hdfs is not enabled");
+ }
def fetchBeHttp = { check_func, meta_url ->
def i = meta_url.indexOf("/api")
String endPoint = meta_url.substring(0, i)
@@ -48,7 +51,8 @@ suite("add_drop_partition_by_hdfs") {
}
// used as passing out parameter to fetchDataSize
List<Long> sizes = [-1, -1]
- def tableName = "tbl1"
+ def suffix = UUID.randomUUID().hashCode().abs()
+ def tableName = "tbl1${suffix}"
sql """ DROP TABLE IF EXISTS ${tableName} """
def check_storage_policy_exist = { name->
@@ -63,8 +67,8 @@ suite("add_drop_partition_by_hdfs") {
return false;
}
- def resource_name = "test_add_drop_partition_resource"
- def policy_name= "test_add_drop_partition_policy"
+ def resource_name = "test_add_drop_partition_resource${suffix}"
+ def policy_name= "test_add_drop_partition_policy${suffix}"
if (check_storage_policy_exist(policy_name)) {
sql """
@@ -87,12 +91,7 @@ suite("add_drop_partition_by_hdfs") {
"type"="hdfs",
"fs.defaultFS"="${getHdfsFs()}",
"hadoop.username"="${getHdfsUser()}",
- "hadoop.password"="${getHdfsPasswd()}",
- "dfs.nameservices" = "my_ha",
- "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2",
- "dfs.namenode.rpc-address.my_ha.my_namenode1" = "127.0.0.1:10000",
- "dfs.namenode.rpc-address.my_ha.my_namenode2" = "127.0.0.1:10000",
- "dfs.client.failover.proxy.provider" =
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+ "hadoop.password"="${getHdfsPasswd()}"
);
"""
@@ -157,7 +156,8 @@ suite("add_drop_partition_by_hdfs") {
"""
log.info( "test tablets not empty")
fetchDataSize(sizes, tablets[0])
- while (sizes[1] == 0) {
+ def retry = 100
+ while (sizes[1] == 0 && retry --> 0) {
log.info( "test remote size is zero, sleep 10s")
sleep(10000)
tablets = sql_return_maparray """
@@ -165,6 +165,7 @@ suite("add_drop_partition_by_hdfs") {
"""
fetchDataSize(sizes, tablets[0])
}
+ assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error
occurred")
assertTrue(tablets.size() > 0)
LocalDataSize1 = sizes[0]
RemoteDataSize1 = sizes[1]
@@ -191,34 +192,22 @@ suite("add_drop_partition_by_hdfs") {
assertTrue(par[12] == "${policy_name}")
}
- try_sql """
- drop storage policy add_policy;
- """
+ def add_resource = "add_resource${suffix}"
try_sql """
- drop resource add_resource;
+ drop resource ${add_resource};
"""
sql """
- CREATE RESOURCE IF NOT EXISTS "add_resource"
+ CREATE RESOURCE IF NOT EXISTS "${add_resource}"
PROPERTIES(
"type"="hdfs",
"fs.defaultFS"="${getHdfsFs()}",
"hadoop.username"="${getHdfsUser()}",
- "hadoop.password"="${getHdfsPasswd()}",
- "dfs.nameservices" = "my_ha",
- "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2",
- "dfs.namenode.rpc-address.my_ha.my_namenode1" = "127.0.0.1:10000",
- "dfs.namenode.rpc-address.my_ha.my_namenode2" = "127.0.0.1:10000",
- "dfs.client.failover.proxy.provider" =
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+ "hadoop.password"="${getHdfsPasswd()}"
);
"""
- try_sql """
- create storage policy tmp_policy
- PROPERTIES( "storage_resource" = "add_resource", "cooldown_ttl" = "300");
- """
-
// can not set to one policy with different resource
try {
sql """alter table ${tableName} set ("storage_policy" =
"add_policy");"""
@@ -226,22 +215,23 @@ suite("add_drop_partition_by_hdfs") {
assertTrue(true)
}
+ def add_policy1 = "add_policy1${suffix}"
sql """
- CREATE STORAGE POLICY IF NOT EXISTS add_policy1
+ CREATE STORAGE POLICY IF NOT EXISTS ${add_policy1}
PROPERTIES(
"storage_resource" = "${resource_name}",
"cooldown_ttl" = "60"
)
"""
- sql """alter table ${tableName} set ("storage_policy" = "add_policy1");"""
+ sql """alter table ${tableName} set ("storage_policy" =
"${add_policy1}");"""
// wait for report
sleep(300000)
partitions = sql "show partitions from ${tableName}"
for (par in partitions) {
- assertTrue(par[12] == "add_policy1")
+ assertTrue(par[12] == "${add_policy1}")
}
@@ -251,12 +241,12 @@ suite("add_drop_partition_by_hdfs") {
"""
sql """
- insert into ${tableName} values(1, "2017-01-01");
+ insert into ${tableName} values(1, "2016-01-01");
"""
partitions = sql "show partitions from ${tableName}"
for (par in partitions) {
- assertTrue(par[12] == "add_policy1")
+ assertTrue(par[12] == "${add_policy1}")
}
sql """
@@ -267,16 +257,12 @@ suite("add_drop_partition_by_hdfs") {
DROP TABLE ${tableName}
"""
- try_sql """
- drop storage policy add_policy;
- """
-
sql """
- drop storage policy add_policy1;
+ drop storage policy ${add_policy1};
"""
sql """
- drop resource add_resource;
+ drop resource ${add_resource};
"""
diff --git
a/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition.groovy
b/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition.groovy
index 580946bd7a8..bcd17146b3d 100644
---
a/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition.groovy
+++
b/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition.groovy
@@ -48,7 +48,8 @@ suite("cold_heat_dynamic_partition") {
}
// used as passing out parameter to fetchDataSize
List<Long> sizes = [-1, -1]
- def tableName = "tbl2"
+ def suffix = UUID.randomUUID().hashCode().abs()
+ def tableName = "tbl2${suffix}"
sql """ DROP TABLE IF EXISTS ${tableName} """
def check_storage_policy_exist = { name->
@@ -63,8 +64,8 @@ suite("cold_heat_dynamic_partition") {
return false;
}
- def resource_name = "test_dynamic_partition_resource"
- def policy_name= "test_dynamic_partition_policy"
+ def resource_name = "test_dynamic_partition_resource${suffix}"
+ def policy_name= "test_dynamic_partition_policy${suffix}"
if (check_storage_policy_exist(policy_name)) {
sql """
@@ -169,7 +170,8 @@ suite("cold_heat_dynamic_partition") {
"""
log.info( "test tablets not empty")
fetchDataSize(sizes, tablets[0])
- while (sizes[1] == 0) {
+ def retry = 100
+ while (sizes[1] == 0 && retry --> 0) {
log.info( "test remote size is zero, sleep 10s")
sleep(10000)
tablets = sql_return_maparray """
@@ -177,6 +179,7 @@ suite("cold_heat_dynamic_partition") {
"""
fetchDataSize(sizes, tablets[0])
}
+ assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error
occurred")
assertTrue(tablets.size() > 0)
LocalDataSize1 = sizes[0]
RemoteDataSize1 = sizes[1]
@@ -203,16 +206,18 @@ suite("cold_heat_dynamic_partition") {
assertTrue(par[12] == "${policy_name}")
}
+ def tmp_policy = "tmp_policy${suffix}"
try_sql """
- drop storage policy tmp_policy;
+ drop storage policy ${tmp_policy};
"""
+ def tmp_resource = "tmp_resource${suffix}"
try_sql """
- drop resource tmp_resource;
+ drop resource ${tmp_resource};
"""
sql """
- CREATE RESOURCE IF NOT EXISTS "tmp_resource"
+ CREATE RESOURCE IF NOT EXISTS "${tmp_resource}"
PROPERTIES(
"type"="s3",
"AWS_ENDPOINT" = "${getS3Endpoint()}",
@@ -229,33 +234,34 @@ suite("cold_heat_dynamic_partition") {
"""
try_sql """
- create storage policy tmp_policy
- PROPERTIES( "storage_resource" = "tmp_resource", "cooldown_ttl" = "300");
+ create storage policy ${tmp_policy}
+ PROPERTIES( "storage_resource" = "${tmp_resource}", "cooldown_ttl" =
"300");
"""
// can not set to one policy with different resource
try {
- sql """alter table ${tableName} set ("storage_policy" =
"tmp_policy");"""
+ sql """alter table ${tableName} set ("storage_policy" =
"${tmp_policy}");"""
} catch (java.sql.SQLException t) {
assertTrue(true)
}
+ def tmp_policy1 = "tmp_policy1${suffix}"
sql """
- CREATE STORAGE POLICY IF NOT EXISTS tmp_policy1
+ CREATE STORAGE POLICY IF NOT EXISTS ${tmp_policy1}
PROPERTIES(
"storage_resource" = "${resource_name}",
"cooldown_ttl" = "60"
)
"""
- sql """alter table ${tableName} set ("storage_policy" = "tmp_policy1");"""
+ sql """alter table ${tableName} set ("storage_policy" =
"${tmp_policy1}");"""
// wait for report
sleep(300000)
partitions = sql "show partitions from ${tableName}"
for (par in partitions) {
- assertTrue(par[12] == "tmp_policy1")
+ assertTrue(par[12] == "${tmp_policy1}")
}
sql """
@@ -267,15 +273,15 @@ suite("cold_heat_dynamic_partition") {
"""
sql """
- drop storage policy tmp_policy;
+ drop storage policy ${tmp_policy};
"""
sql """
- drop storage policy tmp_policy1;
+ drop storage policy ${tmp_policy1};
"""
sql """
- drop resource tmp_resource;
+ drop resource ${tmp_resource};
"""
diff --git
a/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition_by_hdfs.groovy
b/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition_by_hdfs.groovy
index d099e43d7be..0e64132ba51 100644
---
a/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition_by_hdfs.groovy
+++
b/regression-test/suites/cold_heat_separation_p2/create_table_use_dynamic_partition_by_hdfs.groovy
@@ -19,6 +19,9 @@ import org.codehaus.groovy.runtime.IOGroovyMethods
import java.time.LocalDate;
suite("cold_heat_dynamic_partition_by_hdfs") {
+ if (!enableHdfs()) {
+ logger.info("skip this case because hdfs is not enabled");
+ }
def fetchBeHttp = { check_func, meta_url ->
def i = meta_url.indexOf("/api")
String endPoint = meta_url.substring(0, i)
@@ -48,7 +51,8 @@ suite("cold_heat_dynamic_partition_by_hdfs") {
}
// used as passing out parameter to fetchDataSize
List<Long> sizes = [-1, -1]
- def tableName = "tbl2"
+ def suffix = UUID.randomUUID().hashCode().abs()
+ def tableName = "tbl2${suffix}"
sql """ DROP TABLE IF EXISTS ${tableName} """
def check_storage_policy_exist = { name->
@@ -63,8 +67,8 @@ suite("cold_heat_dynamic_partition_by_hdfs") {
return false;
}
- def resource_name = "test_dynamic_partition_resource"
- def policy_name= "test_dynamic_partition_policy"
+ def resource_name = "test_dynamic_partition_resource${suffix}"
+ def policy_name= "test_dynamic_partition_policy${suffix}"
if (check_storage_policy_exist(policy_name)) {
sql """
@@ -87,12 +91,7 @@ suite("cold_heat_dynamic_partition_by_hdfs") {
"type"="hdfs",
"fs.defaultFS"="${getHdfsFs()}",
"hadoop.username"="${getHdfsUser()}",
- "hadoop.password"="${getHdfsPasswd()}",
- "dfs.nameservices" = "my_ha",
- "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2",
- "dfs.namenode.rpc-address.my_ha.my_namenode1" = "127.0.0.1:10000",
- "dfs.namenode.rpc-address.my_ha.my_namenode2" = "127.0.0.1:10000",
- "dfs.client.failover.proxy.provider" =
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+ "hadoop.password"="${getHdfsPasswd()}"
);
"""
@@ -167,7 +166,8 @@ suite("cold_heat_dynamic_partition_by_hdfs") {
"""
log.info( "test tablets not empty")
fetchDataSize(sizes, tablets[0])
- while (sizes[1] == 0) {
+ def retry = 100
+ while (sizes[1] == 0 && retry --> 0) {
log.info( "test remote size is zero, sleep 10s")
sleep(10000)
tablets = sql_return_maparray """
@@ -175,6 +175,7 @@ suite("cold_heat_dynamic_partition_by_hdfs") {
"""
fetchDataSize(sizes, tablets[0])
}
+ assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error
occurred")
assertTrue(tablets.size() > 0)
LocalDataSize1 = sizes[0]
RemoteDataSize1 = sizes[1]
@@ -201,57 +202,55 @@ suite("cold_heat_dynamic_partition_by_hdfs") {
assertTrue(par[12] == "${policy_name}")
}
+ def tmp_policy = "tmp_policy${suffix}"
try_sql """
- drop storage policy tmp_policy;
+ drop storage policy ${tmp_policy};
"""
+ def tmp_resource = "tmp_resource${suffix}"
try_sql """
- drop resource tmp_resource;
+ drop resource ${tmp_resource};
"""
sql """
- CREATE RESOURCE IF NOT EXISTS "tmp_resource"
+ CREATE RESOURCE IF NOT EXISTS "${tmp_resource}"
PROPERTIES(
"type"="hdfs",
"fs.defaultFS"="${getHdfsFs()}",
"hadoop.username"="${getHdfsUser()}",
- "hadoop.password"="${getHdfsPasswd()}",
- "dfs.nameservices" = "my_ha",
- "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2",
- "dfs.namenode.rpc-address.my_ha.my_namenode1" = "127.0.0.1:10000",
- "dfs.namenode.rpc-address.my_ha.my_namenode2" = "127.0.0.1:10000",
- "dfs.client.failover.proxy.provider" =
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+ "hadoop.password"="${getHdfsPasswd()}"
);
"""
try_sql """
- create storage policy tmp_policy
- PROPERTIES( "storage_resource" = "tmp_resource", "cooldown_ttl" = "300");
+ create storage policy ${tmp_policy}
+ PROPERTIES( "storage_resource" = "${tmp_resource}", "cooldown_ttl" =
"300");
"""
// can not set to one policy with different resource
try {
- sql """alter table ${tableName} set ("storage_policy" =
"tmp_policy");"""
+ sql """alter table ${tableName} set ("storage_policy" =
"${tmp_policy}");"""
} catch (java.sql.SQLException t) {
assertTrue(true)
}
+ def tmp_policy1 = "tmp_policy1${suffix}"
sql """
- CREATE STORAGE POLICY IF NOT EXISTS tmp_policy1
+ CREATE STORAGE POLICY IF NOT EXISTS ${tmp_policy1}
PROPERTIES(
"storage_resource" = "${resource_name}",
"cooldown_ttl" = "60"
)
"""
- sql """alter table ${tableName} set ("storage_policy" = "tmp_policy1");"""
+ sql """alter table ${tableName} set ("storage_policy" =
"${tmp_policy1}");"""
// wait for report
sleep(300000)
partitions = sql "show partitions from ${tableName}"
for (par in partitions) {
- assertTrue(par[12] == "tmp_policy1")
+ assertTrue(par[12] == "${tmp_policy1}")
}
sql """
@@ -263,15 +262,15 @@ suite("cold_heat_dynamic_partition_by_hdfs") {
"""
sql """
- drop storage policy tmp_policy;
+ drop storage policy ${tmp_policy};
"""
sql """
- drop storage policy tmp_policy1;
+ drop storage policy ${tmp_policy1};
"""
sql """
- drop resource tmp_resource;
+ drop resource ${tmp_resource};
"""
diff --git
a/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy.groovy
b/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy.groovy
index 97d83ec64e0..25eaa5e7a30 100644
---
a/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy.groovy
+++
b/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy.groovy
@@ -47,7 +47,8 @@ suite("create_table_use_partition_policy") {
}
// used as passing out parameter to fetchDataSize
List<Long> sizes = [-1, -1]
- def tableName = "lineitem1"
+ def suffix = UUID.randomUUID().hashCode().abs()
+ def tableName = "lineitem1${suffix}"
sql """ DROP TABLE IF EXISTS ${tableName} """
def stream_load_one_part = { partnum ->
streamLoad {
@@ -90,13 +91,13 @@ suite("create_table_use_partition_policy") {
def load_lineitem_table = {
stream_load_one_part("00")
stream_load_one_part("01")
- def tablets = sql """
+ def tablets = sql_return_maparray """
SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
"""
- while (tablets[0][8] == "0") {
+ while (tablets[0].LocalDataSize == "0") {
log.info( "test local size is zero, sleep 10s")
sleep(10000)
- tablets = sql """
+ tablets = sql_return_maparray """
SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
"""
}
@@ -114,8 +115,8 @@ suite("create_table_use_partition_policy") {
return false;
}
- def resource_name = "test_table_partition_with_data_resource"
- def policy_name= "test_table_partition_with_data_policy"
+ def resource_name = "test_table_partition_with_data_resource${suffix}"
+ def policy_name= "test_table_partition_with_data_policy${suffix}"
if (check_storage_policy_exist(policy_name)) {
sql """
@@ -217,7 +218,8 @@ suite("create_table_use_partition_policy") {
"""
log.info( "test tablets not empty")
fetchDataSize(sizes, tablets[0])
- while (sizes[1] == 0) {
+ def retry = 100
+ while (sizes[1] == 0 && retry --> 0) {
log.info( "test remote size is zero, sleep 10s")
sleep(10000)
tablets = sql_return_maparray """
@@ -225,6 +227,7 @@ suite("create_table_use_partition_policy") {
"""
fetchDataSize(sizes, tablets[0])
}
+ assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error
occurred")
assertTrue(tablets.size() > 0)
LocalDataSize1 = sizes[0]
RemoteDataSize1 = sizes[1]
diff --git
a/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy_by_hdfs.groovy
b/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy_by_hdfs.groovy
index 5c03aeba0c1..ce4264480a8 100644
---
a/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy_by_hdfs.groovy
+++
b/regression-test/suites/cold_heat_separation_p2/create_table_use_partition_policy_by_hdfs.groovy
@@ -18,6 +18,9 @@ import groovy.json.JsonSlurper
import org.codehaus.groovy.runtime.IOGroovyMethods
suite("create_table_use_partition_policy_by_hdfs") {
+ if (!enableHdfs()) {
+ logger.info("skip this case because hdfs is not enabled");
+ }
def fetchBeHttp = { check_func, meta_url ->
def i = meta_url.indexOf("/api")
String endPoint = meta_url.substring(0, i)
@@ -47,7 +50,8 @@ suite("create_table_use_partition_policy_by_hdfs") {
}
// used as passing out parameter to fetchDataSize
List<Long> sizes = [-1, -1]
- def tableName = "lineitem1"
+ def suffix = UUID.randomUUID().hashCode().abs()
+ def tableName = "lineitem1${suffix}"
sql """ DROP TABLE IF EXISTS ${tableName} """
def stream_load_one_part = { partnum ->
streamLoad {
@@ -90,13 +94,13 @@ suite("create_table_use_partition_policy_by_hdfs") {
def load_lineitem_table = {
stream_load_one_part("00")
stream_load_one_part("01")
- def tablets = sql """
+ def tablets = sql_return_maparray """
SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
"""
while (tablets[0].LocalDataSize == "0") {
log.info( "test local size is zero, sleep 10s")
sleep(10000)
- tablets = sql """
+ tablets = sql_return_maparray """
SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
"""
}
@@ -114,8 +118,8 @@ suite("create_table_use_partition_policy_by_hdfs") {
return false;
}
- def resource_name = "test_table_partition_with_data_resource"
- def policy_name= "test_table_partition_with_data_policy"
+ def resource_name = "test_table_partition_with_data_resource${suffix}"
+ def policy_name= "test_table_partition_with_data_policy${suffix}"
if (check_storage_policy_exist(policy_name)) {
sql """
@@ -138,12 +142,7 @@ suite("create_table_use_partition_policy_by_hdfs") {
"type"="hdfs",
"fs.defaultFS"="${getHdfsFs()}",
"hadoop.username"="${getHdfsUser()}",
- "hadoop.password"="${getHdfsPasswd()}",
- "dfs.nameservices" = "my_ha",
- "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2",
- "dfs.namenode.rpc-address.my_ha.my_namenode1" = "127.0.0.1:10000",
- "dfs.namenode.rpc-address.my_ha.my_namenode2" = "127.0.0.1:10000",
- "dfs.client.failover.proxy.provider" =
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+ "hadoop.password"="${getHdfsPasswd()}"
);
"""
@@ -318,8 +317,9 @@ suite("create_table_use_partition_policy_by_hdfs") {
SHOW TABLETS FROM ${tableName} PARTITIONS(p202301)
"""
log.info( "test tablets not empty")
+ def retry = 100
fetchDataSize(sizes, tablets[0])
- while (sizes[1] == 0) {
+ while (sizes[1] == 0 && retry --> 0) {
log.info( "test remote size is zero, sleep 10s")
sleep(10000)
tablets = sql_return_maparray """
@@ -327,6 +327,7 @@ suite("create_table_use_partition_policy_by_hdfs") {
"""
fetchDataSize(sizes, tablets[0])
}
+ assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error
occurred")
assertTrue(tablets.size() > 0)
LocalDataSize1 = sizes[0]
RemoteDataSize1 = sizes[1]
diff --git
a/regression-test/suites/cold_heat_separation_p2/create_table_use_policy.groovy
b/regression-test/suites/cold_heat_separation_p2/create_table_use_policy.groovy
index 4073a5c67b8..f7b024795f5 100644
---
a/regression-test/suites/cold_heat_separation_p2/create_table_use_policy.groovy
+++
b/regression-test/suites/cold_heat_separation_p2/create_table_use_policy.groovy
@@ -47,7 +47,8 @@ suite("create_table_use_policy") {
}
// used as passing out parameter to fetchDataSize
List<Long> sizes = [-1, -1]
- def tableName = "lineitem2"
+ def suffix = UUID.randomUUID().hashCode().abs()
+ def tableName = "lineitem2${suffix}"
sql """ DROP TABLE IF EXISTS ${tableName} """
def stream_load_one_part = { partnum ->
streamLoad {
@@ -114,8 +115,8 @@ suite("create_table_use_policy") {
return false;
}
- def resource_name = "test_table_with_data_resource"
- def policy_name= "test_table_with_data_policy"
+ def resource_name = "test_table_with_data_resource${suffix}"
+ def policy_name= "test_table_with_data_policy${suffix}"
if (check_storage_policy_exist(policy_name)) {
sql """
@@ -189,7 +190,7 @@ suite("create_table_use_policy") {
load_lineitem_table()
// show tablets from table, 获取第一个tablet的 LocalDataSize1
- tablets = sql_return_maparray """
+ def tablets = sql_return_maparray """
SHOW TABLETS FROM ${tableName}
"""
log.info( "test tablets not empty")
@@ -206,7 +207,8 @@ suite("create_table_use_policy") {
"""
log.info( "test tablets not empty")
fetchDataSize(sizes, tablets[0])
- while (sizes[1] == 0) {
+ def retry = 100
+ while (sizes[1] == 0 && retry --> 0) {
log.info( "test remote size is zero, sleep 10s")
sleep(10000)
tablets = sql_return_maparray """
@@ -214,6 +216,7 @@ suite("create_table_use_policy") {
"""
fetchDataSize(sizes, tablets[0])
}
+ assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error
occurred")
assertTrue(tablets.size() > 0)
log.info( "test remote size not zero")
assertEquals(LocalDataSize1, sizes[1])
@@ -272,7 +275,8 @@ suite("create_table_use_policy") {
"""
log.info( "test tablets not empty")
fetchDataSize(sizes, tablets[0])
- while (sizes[1] == 0) {
+ retry = 100
+ while (sizes[1] == 0 && retry --> 0) {
log.info( "test remote size is zero, sleep 10s")
sleep(10000)
tablets = sql_return_maparray """
@@ -280,6 +284,7 @@ suite("create_table_use_policy") {
"""
fetchDataSize(sizes, tablets[0])
}
+ assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error
occurred")
assertTrue(tablets.size() > 0)
log.info( "test remote size not zero")
assertEquals(LocalDataSize1, sizes[1])
diff --git
a/regression-test/suites/cold_heat_separation_p2/create_table_use_policy_by_hdfs.groovy
b/regression-test/suites/cold_heat_separation_p2/create_table_use_policy_by_hdfs.groovy
index 9ff61a35e04..408f2e6275b 100644
---
a/regression-test/suites/cold_heat_separation_p2/create_table_use_policy_by_hdfs.groovy
+++
b/regression-test/suites/cold_heat_separation_p2/create_table_use_policy_by_hdfs.groovy
@@ -18,6 +18,9 @@ import groovy.json.JsonSlurper
import org.codehaus.groovy.runtime.IOGroovyMethods
suite("create_table_use_policy_by_hdfs") {
+ if (!enableHdfs()) {
+ logger.info("skip this case because hdfs is not enabled");
+ }
def fetchBeHttp = { check_func, meta_url ->
def i = meta_url.indexOf("/api")
String endPoint = meta_url.substring(0, i)
@@ -47,7 +50,8 @@ suite("create_table_use_policy_by_hdfs") {
}
// used as passing out parameter to fetchDataSize
List<Long> sizes = [-1, -1]
- def tableName = "lineitem2"
+ def suffix = UUID.randomUUID().hashCode().abs()
+ def tableName = "lineitem2${suffix}"
sql """ DROP TABLE IF EXISTS ${tableName} """
def stream_load_one_part = { partnum ->
streamLoad {
@@ -114,8 +118,8 @@ suite("create_table_use_policy_by_hdfs") {
return false;
}
- def resource_name = "test_table_with_data_resource"
- def policy_name= "test_table_with_data_policy"
+ def resource_name = "test_table_with_data_resource${suffix}"
+ def policy_name= "test_table_with_data_policy${suffix}"
if (check_storage_policy_exist(policy_name)) {
sql """
@@ -138,12 +142,7 @@ suite("create_table_use_policy_by_hdfs") {
"type"="hdfs",
"fs.defaultFS"="${getHdfsFs()}",
"hadoop.username"="${getHdfsUser()}",
- "hadoop.password"="${getHdfsPasswd()}",
- "dfs.nameservices" = "my_ha",
- "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2",
- "dfs.namenode.rpc-address.my_ha.my_namenode1" = "127.0.0.1:10000",
- "dfs.namenode.rpc-address.my_ha.my_namenode2" = "127.0.0.1:10000",
- "dfs.client.failover.proxy.provider" =
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+ "hadoop.password"="${getHdfsPasswd()}"
);
"""
@@ -187,7 +186,7 @@ suite("create_table_use_policy_by_hdfs") {
load_lineitem_table()
// show tablets from table, 获取第一个tablet的 LocalDataSize1
- tablets = sql_return_maparray """
+ def tablets = sql_return_maparray """
SHOW TABLETS FROM ${tableName}
"""
log.info( "test tablets not empty")
@@ -204,7 +203,8 @@ suite("create_table_use_policy_by_hdfs") {
"""
log.info( "test tablets not empty")
fetchDataSize(sizes, tablets[0])
- while (sizes[1] == 0) {
+ def retry = 100
+ while (sizes[1] == 0 && retry --> 0) {
log.info( "test remote size is zero, sleep 10s")
sleep(10000)
tablets = sql_return_maparray """
@@ -212,6 +212,7 @@ suite("create_table_use_policy_by_hdfs") {
"""
fetchDataSize(sizes, tablets[0])
}
+ assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error
occurred")
assertTrue(tablets.size() > 0)
log.info( "test remote size not zero")
assertEquals(LocalDataSize1, sizes[1])
@@ -270,7 +271,8 @@ suite("create_table_use_policy_by_hdfs") {
"""
log.info( "test tablets not empty")
fetchDataSize(sizes, tablets[0])
- while (sizes[1] == 0) {
+ retry = 100
+ while (sizes[1] == 0 && retry --> 0) {
log.info( "test remote size is zero, sleep 10s")
sleep(10000)
tablets = sql_return_maparray """
@@ -278,6 +280,7 @@ suite("create_table_use_policy_by_hdfs") {
"""
fetchDataSize(sizes, tablets[0])
}
+ assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error
occurred")
assertTrue(tablets.size() > 0)
log.info( "test remote size not zero")
assertEquals(LocalDataSize1, sizes[1])
diff --git
a/regression-test/suites/cold_heat_separation_p2/load_colddata_to_hdfs.groovy
b/regression-test/suites/cold_heat_separation_p2/load_colddata_to_hdfs.groovy
index 673fa9d39c8..8aa2ded305a 100644
---
a/regression-test/suites/cold_heat_separation_p2/load_colddata_to_hdfs.groovy
+++
b/regression-test/suites/cold_heat_separation_p2/load_colddata_to_hdfs.groovy
@@ -51,7 +51,8 @@ suite("load_colddata_to_hdfs") {
}
// used as passing out parameter to fetchDataSize
List<Long> sizes = [-1, -1]
- def tableName = "lineitem2"
+ def suffix = UUID.randomUUID().hashCode().abs()
+ def tableName = "lineitem2${suffix}"
sql """ DROP TABLE IF EXISTS ${tableName} """
def stream_load_one_part = { partnum ->
streamLoad {
@@ -117,8 +118,8 @@ suite("load_colddata_to_hdfs") {
return false;
}
- def resource_name = "test_table_with_data_resource"
- def policy_name= "test_table_with_data_policy"
+ def resource_name = "test_table_with_data_resource${suffix}"
+ def policy_name= "test_table_with_data_policy${suffix}"
if (check_storage_policy_exist(policy_name)) {
sql """
@@ -141,13 +142,8 @@ suite("load_colddata_to_hdfs") {
PROPERTIES (
"type"="hdfs",
"fs.defaultFS"="${hdfsFs}",
- "hadoop.username"="hive",
- "hadoop.password"="hive",
- "dfs.nameservices" = "my_ha",
- "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2",
- "dfs.namenode.rpc-address.my_ha.my_namenode1" = "127.0.0.1:10000",
- "dfs.namenode.rpc-address.my_ha.my_namenode2" = "127.0.0.1:10000",
- "dfs.client.failover.proxy.provider" =
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+ "hadoop.username"="${getHdfsUser()}",
+ "hadoop.password"="${getHdfsPasswd()}"
);
"""
@@ -191,7 +187,7 @@ suite("load_colddata_to_hdfs") {
load_lineitem_table()
// show tablets from table, 获取第一个tablet的 LocalDataSize1
- tablets = sql_return_maparray """
+ def tablets = sql_return_maparray """
SHOW TABLETS FROM ${tableName}
"""
log.info( "test tablets not empty")
@@ -208,7 +204,8 @@ suite("load_colddata_to_hdfs") {
"""
log.info( "test tablets not empty")
fetchDataSize(sizes, tablets[0])
- while (sizes[1] == 0) {
+ def retry = 100
+ while (sizes[1] == 0 && retry --> 0) {
log.info( "test remote size is zero, sleep 10s")
sleep(10000)
tablets = sql_return_maparray """
@@ -216,6 +213,7 @@ suite("load_colddata_to_hdfs") {
"""
fetchDataSize(sizes, tablets[0])
}
+ assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error
occurred")
assertTrue(tablets.size() > 0)
log.info( "test remote size not zero")
assertEquals(LocalDataSize1, sizes[1])
diff --git
a/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition.groovy
b/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition.groovy
index 744fef15037..b66ced914c6 100644
---
a/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition.groovy
+++
b/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition.groovy
@@ -18,6 +18,10 @@ import groovy.json.JsonSlurper
import org.codehaus.groovy.runtime.IOGroovyMethods
suite("modify_replica_use_partition") {
+
+def replicaNum = getFeConfig("force_olap_table_replication_num")
+setFeConfig("force_olap_table_replication_num", 0)
+try {
def fetchBeHttp = { check_func, meta_url ->
def i = meta_url.indexOf("/api")
String endPoint = meta_url.substring(0, i)
@@ -61,8 +65,8 @@ suite("modify_replica_use_partition") {
assertEquals(code, 0)
return out
}
-
- def tableName = "lineitem3"
+ def suffix = UUID.randomUUID().hashCode().abs()
+ def tableName = "lineitem3${suffix}"
sql """ DROP TABLE IF EXISTS ${tableName} """
def stream_load_one_part = { partnum ->
streamLoad {
@@ -129,8 +133,8 @@ suite("modify_replica_use_partition") {
return false;
}
- def resource_name = "test_table_replica_with_data_resource"
- def policy_name= "test_table_replica_with_data_policy"
+ def resource_name = "test_table_replica_with_data_resource${suffix}"
+ def policy_name= "test_table_replica_with_data_policy${suffix}"
if (check_storage_policy_exist(policy_name)) {
sql """
@@ -209,6 +213,7 @@ suite("modify_replica_use_partition") {
load_lineitem_table()
// 等待10min,show tablets from table, 预期not_use_storage_policy_tablet_list 的
RemoteDataSize 为LocalDataSize1,LocalDataSize为0
+ log.info("wait for 10min")
sleep(600000)
@@ -217,7 +222,8 @@ suite("modify_replica_use_partition") {
"""
log.info( "test tablets not empty")
fetchDataSize(sizes, tablets[0])
- while (sizes[1] == 0) {
+ def retry = 100
+ while (sizes[1] == 0 && retry --> 0) {
log.info( "test remote size is zero, sleep 10s")
sleep(10000)
tablets = sql_return_maparray """
@@ -225,6 +231,7 @@ suite("modify_replica_use_partition") {
"""
fetchDataSize(sizes, tablets[0])
}
+ assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error
occurred")
assertTrue(tablets.size() > 0)
def LocalDataSize1 = sizes[0]
def RemoteDataSize1 = sizes[1]
@@ -233,12 +240,13 @@ suite("modify_replica_use_partition") {
log.info( "test remote size not zero")
assertTrue(RemoteDataSize1 != 0)
def originSize = tablets.size()
+ assertEquals(originSize, 6, "${tableName}'s tablets should be 6")
// alter change replication num
if (!isCloudMode()) {
sql """
ALTER TABLE ${tableName}
- MODIFY PARTITION (p202301, p202302) SET("replication_num"="3");
+ MODIFY PARTITION (p202301, p202302) SET("replication_num"="3",
"storage_policy" = "${policy_name}");
"""
}
@@ -250,19 +258,22 @@ suite("modify_replica_use_partition") {
select * from ${tableName} limit 10
"""
// wait one minute for migration to be completed
+ log.info("wait one minute for migration to be completed")
sleep(60000)
// 对比所有tablets的replicas的rowsets meta是否相同
tablets = sql_return_maparray """
SHOW TABLETS FROM ${tableName}
"""
- while (tablets.size() != 3 * originSize) {
- log.info( "tablets clone not finished, sleep 10s")
+ retry = 100
+ while (tablets.size() != 3 * originSize && retry --> 0) {
+ log.info( "tablets clone not finished(tablets.size =
${tablets.size()}, originSize = ${originSize}), sleep 10s")
sleep(10000)
tablets = sql_return_maparray """
SHOW TABLETS FROM ${tableName}
"""
}
+ assertTrue(tablets.size() == 3 * originSize, "tablets clone not finished,
maybe some error occurred")
def compactionStatusIdx = tablets[0].size() - 1
// check rowsets inside the 3 replica
def iterate_num = tablets.size() / 3;
@@ -336,6 +347,7 @@ suite("modify_replica_use_partition") {
assertEquals(RemoteDataSize1, 0)
// 等待10min,show tablets from table, 预期not_use_storage_policy_tablet_list 的
RemoteDataSize 为LocalDataSize1,LocalDataSize为0
+ log.info("wait for 10min")
sleep(600000)
@@ -345,7 +357,8 @@ suite("modify_replica_use_partition") {
log.info( "test tablets not empty")
assertTrue(tablets.size() > 0)
fetchDataSize(sizes, tablets[0])
- while (sizes[1] == 0) {
+ retry = 100
+ while (sizes[1] == 0 && retry --> 0) {
log.info( "test remote size is zero, sleep 10s")
sleep(10000)
tablets = sql_return_maparray """
@@ -353,6 +366,7 @@ suite("modify_replica_use_partition") {
"""
fetchDataSize(sizes, tablets[0])
}
+ assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error
occurred")
LocalDataSize1 = sizes[0]
RemoteDataSize1 = sizes[1]
log.info( "test local size is zero")
@@ -364,7 +378,7 @@ suite("modify_replica_use_partition") {
if (!isCloudMode()) {
sql """
ALTER TABLE ${tableName}
- MODIFY PARTITION (p202301, p202302) SET("replication_num"="1");
+ MODIFY PARTITION (p202301, p202302) SET("replication_num"="1",
"storage_policy" = "${policy_name}");
"""
}
@@ -431,6 +445,7 @@ suite("modify_replica_use_partition") {
assertEquals(RemoteDataSize1, 0)
// 等待10min,show tablets from table, 预期not_use_storage_policy_tablet_list 的
RemoteDataSize 为LocalDataSize1,LocalDataSize为0
+ log.info("wait for 10min")
sleep(600000)
@@ -440,7 +455,8 @@ suite("modify_replica_use_partition") {
log.info( "test tablets not empty")
assertTrue(tablets.size() > 0)
fetchDataSize(sizes, tablets[0])
- while (sizes[1] == 0) {
+ retry = 100
+ while (sizes[1] == 0 && retry --> 0) {
log.info( "test remote size is zero, sleep 10s")
sleep(10000)
tablets = sql_return_maparray """
@@ -448,6 +464,7 @@ suite("modify_replica_use_partition") {
"""
fetchDataSize(sizes, tablets[0])
}
+ assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error
occurred")
LocalDataSize1 = sizes[0]
RemoteDataSize1 = sizes[1]
log.info( "test local size is zero")
@@ -459,12 +476,12 @@ suite("modify_replica_use_partition") {
if (!isCloudMode()) {
sql """
ALTER TABLE ${tableName}
- MODIFY PARTITION (p202301) SET("replication_num"="1");
+ MODIFY PARTITION (p202301) SET("replication_num"="1", "storage_policy"
= "${policy_name}");
"""
sql """
ALTER TABLE ${tableName}
- MODIFY PARTITION (p202302) SET("replication_num"="3");
+ MODIFY PARTITION (p202302) SET("replication_num"="3", "storage_policy"
= "${policy_name}");
"""
}
@@ -476,6 +493,7 @@ suite("modify_replica_use_partition") {
select * from ${tableName} limit 10
"""
+ log.info("wait one minute for migration to be completed")
// wait one minute for migration to be completed
sleep(60000)
// 对比3副本的partition中所有tablets的replicas的rowsets meta是否相同
@@ -509,6 +527,8 @@ suite("modify_replica_use_partition") {
sql """
DROP TABLE ${tableName}
"""
-
+} finally {
+ setFeConfig("force_olap_table_replication_num", replicaNum)
+}
}
diff --git
a/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition_by_hdfs.groovy
b/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition_by_hdfs.groovy
index 9c9a9c0e1fa..f085a640907 100644
---
a/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition_by_hdfs.groovy
+++
b/regression-test/suites/cold_heat_separation_p2/modify_replica_use_partition_by_hdfs.groovy
@@ -18,6 +18,13 @@ import groovy.json.JsonSlurper
import org.codehaus.groovy.runtime.IOGroovyMethods
suite("modify_replica_use_partition_by_hdfs") {
+if (!enableHdfs()) {
+ logger.info("skip this case because hdfs is not enabled");
+}
+def replicaNum = getFeConfig("force_olap_table_replication_num")
+setFeConfig("force_olap_table_replication_num", 0)
+try {
+
def fetchBeHttp = { check_func, meta_url ->
def i = meta_url.indexOf("/api")
String endPoint = meta_url.substring(0, i)
@@ -62,7 +69,8 @@ suite("modify_replica_use_partition_by_hdfs") {
return out
}
- def tableName = "lineitem3"
+ def suffix = UUID.randomUUID().hashCode().abs()
+ def tableName = "lineitem3${suffix}"
sql """ DROP TABLE IF EXISTS ${tableName} """
def stream_load_one_part = { partnum ->
streamLoad {
@@ -129,8 +137,8 @@ suite("modify_replica_use_partition_by_hdfs") {
return false;
}
- def resource_name = "test_table_replica_with_data_resource"
- def policy_name= "test_table_replica_with_data_policy"
+ def resource_name = "test_table_replica_with_data_resource${suffix}"
+ def policy_name= "test_table_replica_with_data_policy${suffix}"
if (check_storage_policy_exist(policy_name)) {
sql """
@@ -153,12 +161,7 @@ suite("modify_replica_use_partition_by_hdfs") {
"type"="hdfs",
"fs.defaultFS"="${getHdfsFs()}",
"hadoop.username"="${getHdfsUser()}",
- "hadoop.password"="${getHdfsPasswd()}",
- "dfs.nameservices" = "my_ha",
- "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2",
- "dfs.namenode.rpc-address.my_ha.my_namenode1" = "127.0.0.1:10000",
- "dfs.namenode.rpc-address.my_ha.my_namenode2" = "127.0.0.1:10000",
- "dfs.client.failover.proxy.provider" =
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+ "hadoop.password"="${getHdfsPasswd()}"
);
"""
@@ -207,6 +210,7 @@ suite("modify_replica_use_partition_by_hdfs") {
load_lineitem_table()
// 等待10min,show tablets from table, 预期not_use_storage_policy_tablet_list 的
RemoteDataSize 为LocalDataSize1,LocalDataSize为0
+ log.info("wait for 10min")
sleep(600000)
@@ -215,7 +219,8 @@ suite("modify_replica_use_partition_by_hdfs") {
"""
log.info( "test tablets not empty")
fetchDataSize(sizes, tablets[0])
- while (sizes[1] == 0) {
+ def retry = 100
+ while (sizes[1] == 0 && retry --> 0) {
log.info( "test remote size is zero, sleep 10s")
sleep(10000)
tablets = sql_return_maparray """
@@ -223,6 +228,7 @@ suite("modify_replica_use_partition_by_hdfs") {
"""
fetchDataSize(sizes, tablets[0])
}
+ assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error
occurred")
assertTrue(tablets.size() > 0)
def LocalDataSize1 = sizes[0]
def RemoteDataSize1 = sizes[1]
@@ -236,7 +242,7 @@ suite("modify_replica_use_partition_by_hdfs") {
if (!isCloudMode()) {
sql """
ALTER TABLE ${tableName}
- MODIFY PARTITION (p202301, p202302) SET("replication_num"="3");
+ MODIFY PARTITION (p202301, p202302) SET("replication_num"="3",
"storage_policy" = "${policy_name}");
"""
}
@@ -248,19 +254,22 @@ suite("modify_replica_use_partition_by_hdfs") {
select * from ${tableName} limit 10
"""
// wait one minute for migration to be completed
- sleep(60000)
+ // sleep(60000)
+ log.info("wait one minute for migration to be completed")
// 对比所有tablets的replicas的rowsets meta是否相同
tablets = sql_return_maparray """
SHOW TABLETS FROM ${tableName}
"""
- while (tablets.size() != 3 * originSize) {
- log.info( "tablets clone not finished, sleep 10s")
+ retry = 100
+ while (tablets.size() != 3 * originSize && retry --> 0) {
+ log.info( "tablets clone not finished(tablets.size =
${tablets.size()}, originSize = ${originSize}), sleep 10s")
sleep(10000)
tablets = sql_return_maparray """
SHOW TABLETS FROM ${tableName}
"""
}
+ assertTrue(tablets.size() == 3 * originSize, "tablets clone not finished,
maybe some error occurred")
def compactionStatusIdx = tablets[0].size() - 1
// check rowsets inside the 3 replica
def iterate_num = tablets.size() / 3;
@@ -334,6 +343,7 @@ suite("modify_replica_use_partition_by_hdfs") {
assertEquals(RemoteDataSize1, 0)
// 等待10min,show tablets from table, 预期not_use_storage_policy_tablet_list 的
RemoteDataSize 为LocalDataSize1,LocalDataSize为0
+ log.info("wait for 10min")
sleep(600000)
@@ -343,7 +353,8 @@ suite("modify_replica_use_partition_by_hdfs") {
log.info( "test tablets not empty")
assertTrue(tablets.size() > 0)
fetchDataSize(sizes, tablets[0])
- while (sizes[1] == 0) {
+ retry = 100
+ while (sizes[1] == 0 && retry --> 0) {
log.info( "test remote size is zero, sleep 10s")
sleep(10000)
tablets = sql_return_maparray """
@@ -351,6 +362,7 @@ suite("modify_replica_use_partition_by_hdfs") {
"""
fetchDataSize(sizes, tablets[0])
}
+ assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error
occurred")
LocalDataSize1 = sizes[0]
RemoteDataSize1 = sizes[1]
log.info( "test local size is zero")
@@ -362,7 +374,7 @@ suite("modify_replica_use_partition_by_hdfs") {
if (!isCloudMode()) {
sql """
ALTER TABLE ${tableName}
- MODIFY PARTITION (p202301, p202302) SET("replication_num"="1");
+ MODIFY PARTITION (p202301, p202302) SET("replication_num"="1",
"storage_policy" = "${policy_name}");
"""
}
@@ -429,6 +441,7 @@ suite("modify_replica_use_partition_by_hdfs") {
assertEquals(RemoteDataSize1, 0)
// 等待10min,show tablets from table, 预期not_use_storage_policy_tablet_list 的
RemoteDataSize 为LocalDataSize1,LocalDataSize为0
+ log.info("wait for 10min")
sleep(600000)
@@ -438,7 +451,8 @@ suite("modify_replica_use_partition_by_hdfs") {
log.info( "test tablets not empty")
assertTrue(tablets.size() > 0)
fetchDataSize(sizes, tablets[0])
- while (sizes[1] == 0) {
+ retry = 100
+ while (sizes[1] == 0 && retry --> 0) {
log.info( "test remote size is zero, sleep 10s")
sleep(10000)
tablets = sql_return_maparray """
@@ -446,6 +460,7 @@ suite("modify_replica_use_partition_by_hdfs") {
"""
fetchDataSize(sizes, tablets[0])
}
+ assertTrue(sizes[1] != 0, "remote size is still zero, maybe some error
occurred")
LocalDataSize1 = sizes[0]
RemoteDataSize1 = sizes[1]
log.info( "test local size is zero")
@@ -457,12 +472,12 @@ suite("modify_replica_use_partition_by_hdfs") {
if (!isCloudMode()) {
sql """
ALTER TABLE ${tableName}
- MODIFY PARTITION (p202301) SET("replication_num"="1");
+ MODIFY PARTITION (p202301) SET("replication_num"="1", "storage_policy"
= "${policy_name}");
"""
sql """
ALTER TABLE ${tableName}
- MODIFY PARTITION (p202302) SET("replication_num"="3");
+ MODIFY PARTITION (p202302) SET("replication_num"="3", "storage_policy"
= "${policy_name}");
"""
}
@@ -475,6 +490,7 @@ suite("modify_replica_use_partition_by_hdfs") {
"""
// wait one minute for migration to be completed
+ log.info("wait one minute for migration to be completed")
sleep(60000)
// 对比3副本的partition中所有tablets的replicas的rowsets meta是否相同
tablets = sql_return_maparray """
@@ -508,5 +524,8 @@ suite("modify_replica_use_partition_by_hdfs") {
DROP TABLE ${tableName}
"""
+} finally {
+ setFeConfig("force_olap_table_replication_num", replicaNum)
+}
}
diff --git
a/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy.groovy
b/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy.groovy
index c23a9eea6df..ec8fcc8f255 100644
---
a/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy.groovy
+++
b/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy.groovy
@@ -47,7 +47,8 @@ suite("table_modify_resouce") {
}
// used as passing out parameter to fetchDataSize
List<Long> sizes = [-1, -1]
- def tableName = "lineitem4"
+ def suffix = UUID.randomUUID().hashCode().abs()
+ def tableName = "lineitem4${suffix}"
sql """ DROP TABLE IF EXISTS ${tableName} """
def stream_load_one_part = { partnum ->
streamLoad {
@@ -114,8 +115,8 @@ suite("table_modify_resouce") {
return false;
}
- def resource_name = "test_table_with_data_resource_modify_1"
- def policy_name= "test_table_with_data_policy_modify_1"
+ def resource_name = "test_table_with_data_resource_modify_1${suffix}"
+ def policy_name= "test_table_with_data_policy_modify_1${suffix}"
if (check_storage_policy_exist(policy_name)) {
sql """
@@ -206,7 +207,7 @@ suite("table_modify_resouce") {
"""
fetchDataSize(sizes, tablets[0])
try_times -= 1
- assertTrue(try_times > 0)
+ assertTrue(try_times > 0, "remote size is still zero, maybe some error
occurred")
}
// 修改resource和policy到新值然后查看remote data size是否能对上
@@ -227,7 +228,7 @@ suite("table_modify_resouce") {
"""
- def tablets2 = sql """
+ def tablets2 = sql_return_maparray """
SHOW TABLETS FROM ${tableName}
"""
// [8] local data size, [9] remote data size
@@ -289,7 +290,7 @@ suite("table_modify_resouce") {
"""
fetchDataSize(sizes, tablets[0])
try_times -= 1
- assertTrue(try_times > 0)
+ assertTrue(try_times > 0, "remote size is still zero, maybe some error
occurred")
}
// 修改resource和policy到新值然后查看remote data size是否能对上
@@ -317,7 +318,7 @@ suite("table_modify_resouce") {
log.info( "test all remote size not zero")
for (int i = 0; i < tablets2.size(); i++) {
fetchDataSize(sizes, tablets2[i])
- assertTrue(sizes[1] > 0)
+ assertTrue(sizes[1] > 0, tablets2[i].TabletId + " remote size is " +
sizes[1] + ", no greater than 0, MetaUrl is " + tablets2[i].MetaUrl)
}
diff --git
a/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy_by_hdfs.groovy
b/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy_by_hdfs.groovy
index a2c6f32cd63..601d42ab0d2 100644
---
a/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy_by_hdfs.groovy
+++
b/regression-test/suites/cold_heat_separation_p2/table_modify_resouce_and_policy_by_hdfs.groovy
@@ -18,6 +18,9 @@ import groovy.json.JsonSlurper
import org.codehaus.groovy.runtime.IOGroovyMethods
suite("table_modify_resouce_by_hdfs") {
+ if (!enableHdfs()) {
+ logger.info("skip this case because hdfs is not enabled");
+ }
def fetchBeHttp = { check_func, meta_url ->
def i = meta_url.indexOf("/api")
String endPoint = meta_url.substring(0, i)
@@ -47,7 +50,8 @@ suite("table_modify_resouce_by_hdfs") {
}
// used as passing out parameter to fetchDataSize
List<Long> sizes = [-1, -1]
- def tableName = "lineitem4"
+ def suffix = UUID.randomUUID().hashCode().abs()
+ def tableName = "lineitem4${suffix}"
sql """ DROP TABLE IF EXISTS ${tableName} """
def stream_load_one_part = { partnum ->
streamLoad {
@@ -114,8 +118,8 @@ suite("table_modify_resouce_by_hdfs") {
return false;
}
- def resource_name = "test_table_with_data_resource_modify_1"
- def policy_name= "test_table_with_data_policy_modify_1"
+ def resource_name = "test_table_with_data_resource_modify_1${suffix}"
+ def policy_name= "test_table_with_data_policy_modify_1${suffix}"
if (check_storage_policy_exist(policy_name)) {
sql """
@@ -138,12 +142,7 @@ suite("table_modify_resouce_by_hdfs") {
"type"="hdfs",
"fs.defaultFS"="${getHdfsFs()}",
"hadoop.username"="${getHdfsUser()}",
- "hadoop.password"="${getHdfsPasswd()}",
- "dfs.nameservices" = "my_ha",
- "dfs.ha.namenodes.my_ha" = "my_namenode1, my_namenode2",
- "dfs.namenode.rpc-address.my_ha.my_namenode1" = "127.0.0.1:10000",
- "dfs.namenode.rpc-address.my_ha.my_namenode2" = "127.0.0.1:10000",
- "dfs.client.failover.proxy.provider" =
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"
+ "hadoop.password"="${getHdfsPasswd()}"
);
"""
@@ -204,7 +203,7 @@ suite("table_modify_resouce_by_hdfs") {
"""
fetchDataSize(sizes, tablets[0])
try_times -= 1
- assertTrue(try_times > 0)
+ assertTrue(try_times > 0, "remote size is still zero, maybe some error
occurred")
}
// 修改resource和policy到新值然后查看remote data size是否能对上
@@ -287,7 +286,7 @@ suite("table_modify_resouce_by_hdfs") {
"""
fetchDataSize(sizes, tablets[0])
try_times -= 1
- assertTrue(try_times > 0)
+ assertTrue(try_times > 0, "remote size is still zero, maybe some error
occurred")
}
// 修改resource和policy到新值然后查看remote data size是否能对上
@@ -315,7 +314,7 @@ suite("table_modify_resouce_by_hdfs") {
log.info( "test all remote size not zero")
for (int i = 0; i < tablets2.size(); i++) {
fetchDataSize(sizes, tablets2[i])
- assertTrue(sizes[1] > 0)
+ assertTrue(sizes[1] > 0, tablets2[i].TabletId + " remote size is " +
sizes[1] + ", no greater than 0, MetaUrl is " + tablets2[i].MetaUrl)
}
diff --git
a/regression-test/suites/cold_heat_separation_p2/test_show_storage_policy_using.groovy
b/regression-test/suites/cold_heat_separation_p2/test_show_storage_policy_using.groovy
index 73835a0c2cc..a6277e663d7 100644
---
a/regression-test/suites/cold_heat_separation_p2/test_show_storage_policy_using.groovy
+++
b/regression-test/suites/cold_heat_separation_p2/test_show_storage_policy_using.groovy
@@ -120,7 +120,7 @@ suite("test_show_storage_policy_using") {
);
"""
- show_result = sql """
+ def show_result = sql """
show storage policy using for ${policy_name}
"""
assertEquals(show_result.size(), 2)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]