This is an automated email from the ASF dual-hosted git repository.
morningman pushed a commit to branch branch-2.0
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/branch-2.0 by this push:
new b366cada910 [bugfix](hive)use originHiveKeys for hive partitionvalue
for 2.0 (#33240)
b366cada910 is described below
commit b366cada910304084b9083c2afafa3cafc5c1468
Author: wuwenchi <[email protected]>
AuthorDate: Thu Apr 4 08:14:19 2024 +0800
[bugfix](hive)use originHiveKeys for hive partitionvalue for 2.0 (#33240)
mirror: #32664 #32768
---
.../org/apache/doris/catalog/PartitionKey.java | 10 ++
.../doris/planner/ListPartitionPrunerV2Test.java | 135 +++++++++++++++++++++
2 files changed, 145 insertions(+)
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionKey.java
b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionKey.java
index bf69a209e9e..4e273de373c 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionKey.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/catalog/PartitionKey.java
@@ -208,6 +208,11 @@ public class PartitionKey implements
Comparable<PartitionKey>, Writable {
}
public List<String> getPartitionValuesAsStringList() {
+ if (originHiveKeys.size() == keys.size()) {
+ // for hive, we need ues originHiveKeys
+ // because when a double 1.234 as partition column, it will save
as '1.123000' for PartitionValue
+ return getPartitionValuesAsStringListForHive();
+ }
return keys.stream().map(k ->
k.getStringValue()).collect(Collectors.toList());
}
@@ -538,4 +543,9 @@ public class PartitionKey implements
Comparable<PartitionKey>, Writable {
}
return false;
}
+
+ // for test
+ public List<String> getOriginHiveKeys() {
+ return originHiveKeys;
+ }
}
diff --git
a/fe/fe-core/src/test/java/org/apache/doris/planner/ListPartitionPrunerV2Test.java
b/fe/fe-core/src/test/java/org/apache/doris/planner/ListPartitionPrunerV2Test.java
new file mode 100644
index 00000000000..e837af6839b
--- /dev/null
+++
b/fe/fe-core/src/test/java/org/apache/doris/planner/ListPartitionPrunerV2Test.java
@@ -0,0 +1,135 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.planner;
+
+import org.apache.doris.analysis.PartitionValue;
+import org.apache.doris.catalog.ListPartitionItem;
+import org.apache.doris.catalog.PartitionItem;
+import org.apache.doris.catalog.PartitionKey;
+import org.apache.doris.catalog.ScalarType;
+import org.apache.doris.catalog.Type;
+import org.apache.doris.common.AnalysisException;
+import org.apache.doris.common.ThreadPoolManager;
+import org.apache.doris.datasource.HMSExternalCatalog;
+import org.apache.doris.datasource.hive.HiveMetaStoreCache;
+import org.apache.doris.datasource.hive.PooledHiveMetaStoreClient;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import mockit.Mock;
+import mockit.MockUp;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ThreadPoolExecutor;
+
+public class ListPartitionPrunerV2Test {
+ @Test
+ public void testPartitionValuesMap() throws AnalysisException {
+ List<PartitionValue> partitionValues = new ArrayList<>();
+ partitionValues.add(new PartitionValue("1.123000"));
+ ArrayList<Type> types = new ArrayList<>();
+ types.add(ScalarType.DOUBLE);
+
+ // for hive table
+ PartitionKey key =
PartitionKey.createListPartitionKeyWithTypes(partitionValues, types, true);
+ ListPartitionItem listPartitionItem = new
ListPartitionItem(Lists.newArrayList(key));
+ Map<Long, PartitionItem> idToPartitionItem =
Maps.newHashMapWithExpectedSize(partitionValues.size());
+ idToPartitionItem.put(1L, listPartitionItem);
+
+ // for olap table
+ PartitionKey key2 =
PartitionKey.createListPartitionKeyWithTypes(partitionValues, types, false);
+ ListPartitionItem listPartitionItem2 = new
ListPartitionItem(Lists.newArrayList(key2));
+ idToPartitionItem.put(2L, listPartitionItem2);
+
+ Map<Long, List<String>> partitionValuesMap =
ListPartitionPrunerV2.getPartitionValuesMap(idToPartitionItem);
+ Assert.assertEquals("1.123000", partitionValuesMap.get(1L).get(0));
+ Assert.assertEquals("1.123", partitionValuesMap.get(2L).get(0));
+ }
+
+ @Test
+ public void testInvalidateTable() {
+
+ new MockUp<HMSExternalCatalog>(HMSExternalCatalog.class) {
+ @Mock
+ public PooledHiveMetaStoreClient getClient() {
+ return new PooledHiveMetaStoreClient(new HiveConf(), 2);
+ }
+ };
+
+ new MockUp<PooledHiveMetaStoreClient>(PooledHiveMetaStoreClient.class)
{
+ @Mock
+ public List<String> listPartitionNames(String dbName, String
tblName) {
+ // Mock is used here to represent the existence of a partition
in the original table
+ return new ArrayList<String>() {{
+ add("c1=1.234000");
+ }};
+ }
+ };
+
+ ThreadPoolExecutor executor =
ThreadPoolManager.newDaemonFixedThreadPool(
+ 20, 20, "mgr", 120, false);
+ HiveMetaStoreCache cache = new HiveMetaStoreCache(
+ new HMSExternalCatalog(1L, "catalog", null, new HashMap<>(),
null), executor);
+ ArrayList<Type> types = new ArrayList<>();
+ types.add(ScalarType.DOUBLE);
+
+ // test cache
+ // the original partition of the table (in mock) will be loaded here
+ String dbName = "db";
+ String tblName = "tb";
+ HiveMetaStoreCache.HivePartitionValues partitionValues =
cache.getPartitionValues(dbName, tblName, types);
+ Assert.assertEquals(1, partitionValues.getIdToPartitionItem().size());
+
Assert.assertTrue(partitionValues.getIdToPartitionItem().containsKey(0L));
+ List<PartitionKey> items =
partitionValues.getIdToPartitionItem().get(0L).getItems();
+ Assert.assertEquals(1, items.size());
+ PartitionKey partitionKey = items.get(0);
+ Assert.assertEquals("1.234", partitionKey.getKeys().get(0).toString());
+ Assert.assertEquals("1.234000",
partitionKey.getOriginHiveKeys().get(0));
+
+ // test add cache
+ ArrayList<String> values = new ArrayList<>();
+ values.add("c1=5.678000");
+ cache.addPartitionsCache(dbName, tblName, values, types);
+ HiveMetaStoreCache.HivePartitionValues partitionValues2 =
cache.getPartitionValues(dbName, tblName, types);
+ Assert.assertEquals(2, partitionValues2.getIdToPartitionItem().size());
+
Assert.assertTrue(partitionValues2.getIdToPartitionItem().containsKey(1L));
+ List<PartitionKey> items2 =
partitionValues2.getIdToPartitionItem().get(1L).getItems();
+ Assert.assertEquals(1, items2.size());
+ PartitionKey partitionKey2 = items2.get(0);
+ Assert.assertEquals("5.678",
partitionKey2.getKeys().get(0).toString());
+ Assert.assertEquals("5.678000",
partitionKey2.getOriginHiveKeys().get(0));
+
+ // test refresh table
+ // simulates the manually added partition table being deleted, leaving
only one original partition in mock
+ cache.invalidateTableCache(dbName, tblName);
+ HiveMetaStoreCache.HivePartitionValues partitionValues3 =
cache.getPartitionValues(dbName, tblName, types);
+ Assert.assertEquals(1, partitionValues3.getIdToPartitionItem().size());
+
Assert.assertTrue(partitionValues3.getIdToPartitionItem().containsKey(0L));
+ List<PartitionKey> items3 =
partitionValues3.getIdToPartitionItem().get(0L).getItems();
+ Assert.assertEquals(1, items3.size());
+ PartitionKey partitionKey3 = items3.get(0);
+ Assert.assertEquals("1.234",
partitionKey3.getKeys().get(0).toString());
+ Assert.assertEquals("1.234000",
partitionKey3.getOriginHiveKeys().get(0));
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]