This is an automated email from the ASF dual-hosted git repository.
kirs pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new d18b5ba798a [feat](hdfs)Add HDFS HA Configuration Validation (#55675)
d18b5ba798a is described below
commit d18b5ba798a57050800dbc2a8a3b7228bdd76a6c
Author: Calvin Kirs <[email protected]>
AuthorDate: Mon Sep 8 14:08:10 2025 +0800
[feat](hdfs)Add HDFS HA Configuration Validation (#55675)
### What problem does this PR solve?
Previously, HDFS High Availability (HA) configuration checks were only
performed for the HMS catalog. This PR extends HA validation to all
components that interact with HDFS, including:
- All catalog types (HMS, Hive, Iceberg, etc.)
- Table-valued functions (TVFs) accessing HDFS resources
- Any future features that depend on HDFS HA
---
.../apache/doris/datasource/CatalogProperty.java | 27 +++++++-
.../doris/datasource/hive/HMSExternalCatalog.java | 52 +-------------
.../datasource/iceberg/IcebergExternalCatalog.java | 14 ++--
.../datasource/paimon/PaimonExternalCatalog.java | 20 +-----
.../property/storage/HdfsProperties.java | 1 +
.../property/storage/HdfsPropertiesUtils.java | 80 +++++++++++++++++++++-
.../apache/doris/common/util/LocationPathTest.java | 20 +++++-
.../property/storage/HdfsPropertiesTest.java | 6 ++
.../property/storage/HdfsPropertiesUtilsTest.java | 70 +++++++++++++++++++
.../auth_call/test_assistant_command_auth.groovy | 4 +-
.../suites/auth_call/test_ddl_catalog_auth.groovy | 12 ++--
11 files changed, 223 insertions(+), 83 deletions(-)
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogProperty.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogProperty.java
index d2c4fb7acbd..80444f45657 100644
--- a/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogProperty.java
+++ b/fe/fe-core/src/main/java/org/apache/doris/datasource/CatalogProperty.java
@@ -21,9 +21,11 @@ import org.apache.doris.common.UserException;
import org.apache.doris.datasource.property.metastore.MetastoreProperties;
import org.apache.doris.datasource.property.storage.StorageProperties;
+import com.aliyun.odps.table.utils.Preconditions;
import com.google.common.collect.Maps;
import com.google.gson.annotations.SerializedName;
import org.apache.commons.collections.MapUtils;
+import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
@@ -127,7 +129,8 @@ public class CatalogProperty {
.collect(Collectors.toMap(StorageProperties::getType, Function.identity()));
} catch (UserException e) {
LOG.warn("Failed to initialize catalog storage
properties", e);
- throw new RuntimeException("Failed to initialize
storage properties for catalog", e);
+ throw new RuntimeException("Failed to initialize
storage properties, error: "
+ + ExceptionUtils.getRootCauseMessage(e), e);
}
}
}
@@ -135,6 +138,25 @@ public class CatalogProperty {
return storagePropertiesMap;
}
+ public void checkMetaStoreAndStorageProperties(Class msClass) {
+ MetastoreProperties msProperties;
+ List<StorageProperties> storageProperties;
+ try {
+ msProperties = MetastoreProperties.create(getProperties());
+ storageProperties = StorageProperties.createAll(getProperties());
+ } catch (UserException e) {
+ throw new RuntimeException("Failed to initialize Catalog
properties, error: "
+ + ExceptionUtils.getRootCauseMessage(e), e);
+ }
+ Preconditions.checkNotNull(storageProperties,
+ "Storage properties are not configured properly");
+ Preconditions.checkNotNull(msProperties, "Metastore properties are not
configured properly");
+ Preconditions.checkArgument(
+ msClass.isInstance(msProperties),
+ String.format("Metastore properties type is not correct.
Expected %s but got %s",
+ msClass.getName(), msProperties.getClass().getName()));
+ }
+
/**
* Get metastore properties with lazy loading, using double-check locking
to ensure thread safety
*/
@@ -150,7 +172,8 @@ public class CatalogProperty {
metastoreProperties =
MetastoreProperties.create(getProperties());
} catch (UserException e) {
LOG.warn("Failed to create metastore properties", e);
- throw new RuntimeException("Failed to create metastore
properties", e);
+ throw new RuntimeException("Failed to create metastore
properties, error: "
+ + ExceptionUtils.getRootCauseMessage(e), e);
}
}
}
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalCatalog.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalCatalog.java
index 527abdb2072..3a9c90568f7 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalCatalog.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/hive/HMSExternalCatalog.java
@@ -18,11 +18,9 @@
package org.apache.doris.datasource.hive;
import org.apache.doris.catalog.Env;
-import org.apache.doris.catalog.HdfsResource;
import org.apache.doris.cluster.ClusterNamespace;
import org.apache.doris.common.DdlException;
import org.apache.doris.common.ThreadPoolManager;
-import org.apache.doris.common.UserException;
import org.apache.doris.common.util.Util;
import org.apache.doris.datasource.CatalogProperty;
import org.apache.doris.datasource.ExternalCatalog;
@@ -34,14 +32,12 @@ import
org.apache.doris.datasource.iceberg.IcebergMetadataOps;
import org.apache.doris.datasource.iceberg.IcebergUtils;
import org.apache.doris.datasource.operations.ExternalMetadataOperations;
import org.apache.doris.datasource.property.metastore.AbstractHMSProperties;
-import org.apache.doris.datasource.property.metastore.MetastoreProperties;
import org.apache.doris.fs.FileSystemProvider;
import org.apache.doris.fs.FileSystemProviderImpl;
import org.apache.doris.fs.remote.dfs.DFSFileSystem;
import org.apache.doris.transaction.TransactionManagerFactory;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Strings;
import org.apache.commons.lang3.math.NumberUtils;
import org.apache.iceberg.hive.HiveCatalog;
import org.apache.logging.log4j.LogManager;
@@ -94,14 +90,6 @@ public class HMSExternalCatalog extends ExternalCatalog {
return hmsProperties;
}
- private void initAndCheckHmsProperties() {
- try {
- this.hmsProperties = (AbstractHMSProperties)
MetastoreProperties.create(catalogProperty.getProperties());
- } catch (UserException e) {
- throw new RuntimeException("Failed to create HMSProperties from
catalog properties", e);
- }
- }
-
@VisibleForTesting
public HMSExternalCatalog() {
catalogProperty = new CatalogProperty(null, null);
@@ -134,41 +122,7 @@ public class HMSExternalCatalog extends ExternalCatalog {
throw new DdlException(
"The parameter " + PARTITION_CACHE_TTL_SECOND + " is
wrong, value is " + partitionCacheTtlSecond);
}
-
- // check the dfs.ha properties
- // 'dfs.nameservices'='your-nameservice',
- // 'dfs.ha.namenodes.your-nameservice'='nn1,nn2',
- // 'dfs.namenode.rpc-address.your-nameservice.nn1'='172.21.0.2:4007',
- // 'dfs.namenode.rpc-address.your-nameservice.nn2'='172.21.0.3:4007',
- // 'dfs.client.failover.proxy.provider.your-nameservice'='xxx'
- String dfsNameservices =
catalogProperty.getOrDefault(HdfsResource.DSF_NAMESERVICES, "");
- if (Strings.isNullOrEmpty(dfsNameservices)) {
- return;
- }
-
- String[] nameservices = dfsNameservices.split(",");
- for (String dfsservice : nameservices) {
- String namenodes =
catalogProperty.getOrDefault("dfs.ha.namenodes." + dfsservice, "");
- if (Strings.isNullOrEmpty(namenodes)) {
- throw new DdlException("Missing dfs.ha.namenodes." +
dfsservice + " property");
- }
- String[] names = namenodes.split(",");
- for (String name : names) {
- String address =
catalogProperty.getOrDefault("dfs.namenode.rpc-address." + dfsservice + "." +
name,
- "");
- if (Strings.isNullOrEmpty(address)) {
- throw new DdlException(
- "Missing dfs.namenode.rpc-address." + dfsservice +
"." + name + " property");
- }
- }
- String failoverProvider =
catalogProperty.getOrDefault("dfs.client.failover.proxy.provider." + dfsservice,
- "");
- if (Strings.isNullOrEmpty(failoverProvider)) {
- throw new DdlException(
- "Missing dfs.client.failover.proxy.provider." +
dfsservice + " property");
- }
- }
- initAndCheckHmsProperties();
+
catalogProperty.checkMetaStoreAndStorageProperties(AbstractHMSProperties.class);
}
@Override
@@ -180,7 +134,7 @@ public class HMSExternalCatalog extends ExternalCatalog {
@Override
protected void initLocalObjectsImpl() {
- initAndCheckHmsProperties();
+ this.hmsProperties = (AbstractHMSProperties)
catalogProperty.getMetastoreProperties();
initPreExecutionAuthenticator();
HiveMetadataOps hiveOps =
ExternalMetadataOperations.newHiveMetadataOps(hmsProperties.getHiveConf(),
this);
threadPoolWithPreAuth =
ThreadPoolManager.newDaemonFixedThreadPoolWithPreAuth(
@@ -190,7 +144,7 @@ public class HMSExternalCatalog extends ExternalCatalog {
true,
executionAuthenticator);
FileSystemProvider fileSystemProvider = new
FileSystemProviderImpl(Env.getCurrentEnv().getExtMetaCacheMgr(),
- this.catalogProperty.getStoragePropertiesMap());
+ this.catalogProperty.getStoragePropertiesMap());
this.fileSystemExecutor =
ThreadPoolManager.newDaemonFixedThreadPool(FILE_SYSTEM_EXECUTOR_THREAD_NUM,
Integer.MAX_VALUE,
String.format("hms_committer_%s_file_system_executor_pool", name), true);
transactionManager =
TransactionManagerFactory.createHiveTransactionManager(hiveOps,
fileSystemProvider,
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalCatalog.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalCatalog.java
index b1178363b9b..f1a655456e0 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalCatalog.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/iceberg/IcebergExternalCatalog.java
@@ -17,14 +17,13 @@
package org.apache.doris.datasource.iceberg;
+import org.apache.doris.common.DdlException;
import org.apache.doris.common.ThreadPoolManager;
-import org.apache.doris.common.UserException;
import org.apache.doris.datasource.ExternalCatalog;
import org.apache.doris.datasource.InitCatalogLog;
import org.apache.doris.datasource.SessionContext;
import org.apache.doris.datasource.operations.ExternalMetadataOperations;
import
org.apache.doris.datasource.property.metastore.AbstractIcebergProperties;
-import org.apache.doris.datasource.property.metastore.MetastoreProperties;
import org.apache.doris.transaction.TransactionManagerFactory;
import org.apache.iceberg.catalog.Catalog;
@@ -54,14 +53,11 @@ public abstract class IcebergExternalCatalog extends
ExternalCatalog {
// Create catalog based on catalog type
protected void initCatalog() {
try {
- msProperties = (AbstractIcebergProperties) MetastoreProperties
- .create(getProperties());
+ msProperties = (AbstractIcebergProperties)
catalogProperty.getMetastoreProperties();
this.catalog = msProperties.initializeCatalog(getName(), new
ArrayList<>(catalogProperty
.getStoragePropertiesMap().values()));
this.icebergCatalogType = msProperties.getIcebergCatalogType();
- } catch (UserException e) {
- throw new RuntimeException("Failed to initialize Iceberg catalog:
" + e.getMessage(), e);
} catch (ClassCastException e) {
throw new RuntimeException("Invalid properties for Iceberg
catalog: " + getProperties(), e);
} catch (Exception e) {
@@ -69,6 +65,12 @@ public abstract class IcebergExternalCatalog extends
ExternalCatalog {
}
}
+ @Override
+ public void checkProperties() throws DdlException {
+ super.checkProperties();
+
catalogProperty.checkMetaStoreAndStorageProperties(AbstractIcebergProperties.class);
+ }
+
@Override
protected synchronized void initPreExecutionAuthenticator() {
if (executionAuthenticator == null) {
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/PaimonExternalCatalog.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/PaimonExternalCatalog.java
index 7cd6fa0f2c8..76e093656bb 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/PaimonExternalCatalog.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/paimon/PaimonExternalCatalog.java
@@ -18,14 +18,12 @@
package org.apache.doris.datasource.paimon;
import org.apache.doris.common.DdlException;
-import org.apache.doris.common.UserException;
import org.apache.doris.datasource.CatalogProperty;
import org.apache.doris.datasource.ExternalCatalog;
import org.apache.doris.datasource.InitCatalogLog;
import org.apache.doris.datasource.NameMapping;
import org.apache.doris.datasource.SessionContext;
import org.apache.doris.datasource.property.metastore.AbstractPaimonProperties;
-import org.apache.doris.datasource.property.metastore.MetastoreProperties;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.logging.log4j.LogManager;
@@ -60,12 +58,7 @@ public class PaimonExternalCatalog extends ExternalCatalog {
@Override
protected void initLocalObjectsImpl() {
- try {
- paimonProperties = (AbstractPaimonProperties)
MetastoreProperties.create(catalogProperty.getProperties());
- } catch (UserException e) {
- throw new IllegalArgumentException("Failed to create Paimon
properties from catalog properties,exception: "
- + ExceptionUtils.getRootCauseMessage(e), e);
- }
+ paimonProperties = (AbstractPaimonProperties)
catalogProperty.getMetastoreProperties();
catalogType = paimonProperties.getPaimonCatalogType();
catalog = createCatalog();
initPreExecutionAuthenticator();
@@ -194,14 +187,7 @@ public class PaimonExternalCatalog extends ExternalCatalog
{
@Override
public void checkProperties() throws DdlException {
- if (null != paimonProperties) {
- try {
- this.paimonProperties = (AbstractPaimonProperties)
MetastoreProperties
- .create(catalogProperty.getProperties());
- } catch (UserException e) {
- throw new DdlException("Failed to create Paimon properties
from catalog properties, exception: "
- + ExceptionUtils.getRootCauseMessage(e), e);
- }
- }
+ super.checkProperties();
+
catalogProperty.checkMetaStoreAndStorageProperties(AbstractPaimonProperties.class);
}
}
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HdfsProperties.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HdfsProperties.java
index c1b8a7b8724..99c170f8ce2 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HdfsProperties.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HdfsProperties.java
@@ -119,6 +119,7 @@ public class HdfsProperties extends
HdfsCompatibleProperties {
initBackendConfigProperties();
this.hadoopStorageConfig = new Configuration();
this.backendConfigProperties.forEach(hadoopStorageConfig::set);
+ HdfsPropertiesUtils.checkHaConfig(backendConfigProperties);
hadoopAuthenticator =
HadoopAuthenticator.getHadoopAuthenticator(hadoopStorageConfig);
}
diff --git
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HdfsPropertiesUtils.java
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HdfsPropertiesUtils.java
index 11662a8e51b..7fc091daa64 100644
---
a/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HdfsPropertiesUtils.java
+++
b/fe/fe-core/src/main/java/org/apache/doris/datasource/property/storage/HdfsPropertiesUtils.java
@@ -20,7 +20,9 @@ package org.apache.doris.datasource.property.storage;
import org.apache.doris.common.UserException;
import
org.apache.doris.datasource.property.storage.exception.StoragePropertiesException;
+import com.google.common.base.Strings;
import org.apache.commons.lang3.StringUtils;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import java.io.UnsupportedEncodingException;
import java.net.URI;
@@ -28,8 +30,12 @@ import java.net.URISyntaxException;
import java.net.URLDecoder;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.stream.Collectors;
public class HdfsPropertiesUtils {
private static final String URI_KEY = "uri";
@@ -127,7 +133,7 @@ public class HdfsPropertiesUtils {
}
public static String validateAndNormalizeUri(String location, String host,
String defaultFs,
- Set<String>
supportedSchemas) {
+ Set<String> supportedSchemas)
{
if (StringUtils.isBlank(location)) {
throw new IllegalArgumentException("Property 'uri' is required.");
}
@@ -179,4 +185,76 @@ public class HdfsPropertiesUtils {
throw new StoragePropertiesException("Failed to parse URI: " +
location, e);
}
}
+
+ /**
+ * Validate the required HDFS HA configuration properties.
+ *
+ * <p>This method checks the following:
+ * <ul>
+ * <li>{@code dfs.nameservices} must be defined if HA is enabled.</li>
+ * <li>{@code dfs.ha.namenodes.<nameservice>} must be defined and
contain at least 2 namenodes.</li>
+ * <li>For each namenode, {@code
dfs.namenode.rpc-address.<nameservice>.<namenode>} must be defined.</li>
+ * <li>{@code dfs.client.failover.proxy.provider.<nameservice>} must be
defined.</li>
+ * </ul>
+ *
+ * @param hdfsProperties configuration map (similar to
core-site.xml/hdfs-site.xml properties)
+ */
+ public static void checkHaConfig(Map<String, String> hdfsProperties) {
+ if (hdfsProperties == null) {
+ return;
+ }
+ // 1. Check dfs.nameservices
+ String dfsNameservices =
hdfsProperties.getOrDefault(HdfsClientConfigKeys.DFS_NAMESERVICES, "");
+ if (Strings.isNullOrEmpty(dfsNameservices)) {
+ // No nameservice configured => HA is not enabled, nothing to
validate
+ return;
+ }
+ for (String dfsservice : splitAndTrim(dfsNameservices)) {
+ if (dfsservice.isEmpty()) {
+ continue;
+ }
+ // 2. Check dfs.ha.namenodes.<nameservice>
+ String haNnKey = HdfsClientConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX
+ "." + dfsservice;
+ String namenodes = hdfsProperties.getOrDefault(haNnKey, "");
+ if (Strings.isNullOrEmpty(namenodes)) {
+ throw new IllegalArgumentException("Missing property: " +
haNnKey);
+ }
+ List<String> names = splitAndTrim(namenodes);
+ if (names.size() < 2) {
+ throw new IllegalArgumentException("HA requires at least 2
namenodes for service: " + dfsservice);
+ }
+ // 3. Check dfs.namenode.rpc-address.<nameservice>.<namenode>
+ for (String name : names) {
+ String rpcKey =
HdfsClientConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY + "." + dfsservice + "." +
name;
+ String address = hdfsProperties.getOrDefault(rpcKey, "");
+ if (Strings.isNullOrEmpty(address)) {
+ throw new IllegalArgumentException("Missing property: " +
rpcKey + " (expected format: host:port)");
+ }
+ }
+ // 4. Check dfs.client.failover.proxy.provider.<nameservice>
+ String failoverKey =
HdfsClientConfigKeys.Failover.PROXY_PROVIDER_KEY_PREFIX + "." + dfsservice;
+ String failoverProvider = hdfsProperties.getOrDefault(failoverKey,
"");
+ if (Strings.isNullOrEmpty(failoverProvider)) {
+ throw new IllegalArgumentException("Missing property: " +
failoverKey);
+ }
+ }
+ }
+
+ /**
+ * Utility method to split a comma-separated string, trim whitespace,
+ * and remove empty tokens.
+ *
+ * @param s the input string
+ * @return list of trimmed non-empty values
+ */
+ private static List<String> splitAndTrim(String s) {
+ if (Strings.isNullOrEmpty(s)) {
+ return Collections.emptyList();
+ }
+ return Arrays.stream(s.split(","))
+ .map(String::trim)
+ .filter(tok -> !tok.isEmpty())
+ .collect(Collectors.toList());
+ }
}
+
diff --git
a/fe/fe-core/src/test/java/org/apache/doris/common/util/LocationPathTest.java
b/fe/fe-core/src/test/java/org/apache/doris/common/util/LocationPathTest.java
index 9a5ada45bbb..a896931babe 100644
---
a/fe/fe-core/src/test/java/org/apache/doris/common/util/LocationPathTest.java
+++
b/fe/fe-core/src/test/java/org/apache/doris/common/util/LocationPathTest.java
@@ -38,7 +38,14 @@ public class LocationPathTest {
static {
Map<String, String> props = new HashMap<>();
- props.put("dfs.nameservices", "namenode:8020");
+ props.put("dfs.nameservices", "ns1");
+ // NameNodes for this nameservice
+ props.put("dfs.ha.namenodes.ns1", "nn1,nn2");
+ // RPC addresses for each NameNode
+ props.put("dfs.namenode.rpc-address.ns1.nn1", "127.0.0.1:8020");
+ props.put("dfs.namenode.rpc-address.ns1.nn2", "127.0.0.2:8020");
+ props.put("dfs.client.failover.proxy.provider.ns1",
+
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
props.put("s3.endpoint", "s3.us-east-2.amazonaws.com");
props.put("s3.access_key", "access_key");
props.put("s3.secret_key", "secret_key");
@@ -74,7 +81,14 @@ public class LocationPathTest {
// HA props
Map<String, String> props = new HashMap<>();
- props.put("dfs.nameservices", "ns");
+ props.put("dfs.nameservices", "ns1");
+ // NameNodes for this nameservice
+ props.put("dfs.ha.namenodes.ns1", "nn1,nn2");
+ // RPC addresses for each NameNode
+ props.put("dfs.namenode.rpc-address.ns1.nn1", "127.0.0.1:8020");
+ props.put("dfs.namenode.rpc-address.ns1.nn2", "127.0.0.2:8020");
+ props.put("dfs.client.failover.proxy.provider.ns1",
+
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
//HdfsProperties hdfsProperties = (HdfsProperties)
StorageProperties.createPrimary( props);
Map<StorageProperties.Type, StorageProperties> storagePropertiesMap =
StorageProperties.createAll(props).stream()
.collect(java.util.stream.Collectors.toMap(StorageProperties::getType,
Function.identity()));
@@ -231,7 +245,7 @@ public class LocationPathTest {
assertNormalize("cosn://bucket/path/to/file",
"s3://bucket/path/to/file");
assertNormalize("viewfs://cluster/path/to/file",
"viewfs://cluster/path/to/file");
assertNormalize("/path/to/file", "hdfs://namenode:8020/path/to/file");
- assertNormalize("hdfs:///path/to/file",
"hdfs://namenode:8020/path/to/file");
+ assertNormalize("hdfs:///path/to/file", "hdfs://ns1/path/to/file");
}
private void assertNormalize(String input, String expected) {
diff --git
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HdfsPropertiesTest.java
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HdfsPropertiesTest.java
index c634ad64592..01e430fb841 100644
---
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HdfsPropertiesTest.java
+++
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HdfsPropertiesTest.java
@@ -76,6 +76,10 @@ public class HdfsPropertiesTest {
// Test 3: Valid config resources (should succeed)
origProps.put("hadoop.config.resources",
"hadoop1/core-site.xml,hadoop1/hdfs-site.xml");
+ origProps.put("dfs.ha.namenodes.ns1", "nn1,nn2");
+ origProps.put("dfs.namenode.rpc-address.ns1.nn1", "localhost:9000");
+ origProps.put("dfs.namenode.rpc-address.ns1.nn2", "localhost:9001");
+ origProps.put("dfs.client.failover.proxy.provider.ns1",
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
List<StorageProperties> storageProperties =
StorageProperties.createAll(origProps);
HdfsProperties hdfsProperties = (HdfsProperties)
storageProperties.get(0);
Configuration conf = hdfsProperties.getHadoopStorageConfig();
@@ -129,6 +133,8 @@ public class HdfsPropertiesTest {
origProps.put("dfs.nameservices", "ns1");
origProps.put("dfs.ha.namenodes.ns1", "nn1,nn2");
origProps.put("dfs.namenode.rpc-address.ns1.nn1", "localhost:9000");
+ origProps.put("dfs.namenode.rpc-address.ns1.nn2", "localhost:9001");
+ origProps.put("dfs.client.failover.proxy.provider.ns1",
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
origProps.put("hadoop.async.threads.max", "10");
properties = StorageProperties.createAll(origProps).get(0);
Assertions.assertEquals(properties.getClass(), HdfsProperties.class);
diff --git
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HdfsPropertiesUtilsTest.java
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HdfsPropertiesUtilsTest.java
index 36378f4023a..eed7360206b 100644
---
a/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HdfsPropertiesUtilsTest.java
+++
b/fe/fe-core/src/test/java/org/apache/doris/datasource/property/storage/HdfsPropertiesUtilsTest.java
@@ -145,4 +145,74 @@ public class HdfsPropertiesUtilsTest {
String result = HdfsPropertiesUtils.convertUrlToFilePath(uri, "",
supportSchema);
Assertions.assertEquals("HDFS://localhost:9000/test", result);
}
+
+ @Test
+ public void testValidHaConfig() {
+ Map<String, String> config = new HashMap<>();
+ config.put("dfs.nameservices", "ns1");
+ config.put("dfs.ha.namenodes.ns1", "nn1,nn2");
+ config.put("dfs.namenode.rpc-address.ns1.nn1", "127.0.0.1:8020");
+ config.put("dfs.namenode.rpc-address.ns1.nn2", "127.0.0.2:8020");
+ config.put("dfs.client.failover.proxy.provider.ns1",
+
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
+
+ // Valid HA configuration should pass without exception
+ Assertions.assertDoesNotThrow(() ->
HdfsPropertiesUtils.checkHaConfig(config));
+ }
+
+ @Test
+ public void testNoNameservices() {
+ Map<String, String> config = new HashMap<>();
+ // No dfs.nameservices configured → not HA mode, should not throw
+ Assertions.assertDoesNotThrow(() ->
HdfsPropertiesUtils.checkHaConfig(config));
+ }
+
+ @Test
+ public void testMissingHaNamenodes() {
+ Map<String, String> config = new HashMap<>();
+ config.put("dfs.nameservices", "ns1");
+ // dfs.ha.namenodes.ns1 missing
+ IllegalArgumentException ex =
Assertions.assertThrows(IllegalArgumentException.class,
+ () -> HdfsPropertiesUtils.checkHaConfig(config));
+
Assertions.assertTrue(ex.getMessage().contains("dfs.ha.namenodes.ns1"));
+ }
+
+ @Test
+ public void testNotEnoughNamenodes() {
+ Map<String, String> config = new HashMap<>();
+ config.put("dfs.nameservices", "ns1");
+ config.put("dfs.ha.namenodes.ns1", "nn1"); // Only 1 namenode
+ IllegalArgumentException ex =
Assertions.assertThrows(IllegalArgumentException.class,
+ () -> HdfsPropertiesUtils.checkHaConfig(config));
+ Assertions.assertTrue(ex.getMessage().contains("HA requires at least 2
namenodes"));
+ }
+
+ @Test
+ public void testMissingRpcAddress() {
+ Map<String, String> config = new HashMap<>();
+ config.put("dfs.nameservices", "ns1");
+ config.put("dfs.ha.namenodes.ns1", "nn1,nn2");
+ config.put("dfs.namenode.rpc-address.ns1.nn1", "127.0.0.1:8020");
+ // nn2 rpc-address missing
+ config.put("dfs.client.failover.proxy.provider.ns1",
+
"org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
+
+ IllegalArgumentException ex =
Assertions.assertThrows(IllegalArgumentException.class,
+ () -> HdfsPropertiesUtils.checkHaConfig(config));
+
Assertions.assertTrue(ex.getMessage().contains("dfs.namenode.rpc-address.ns1.nn2"));
+ }
+
+ @Test
+ public void testMissingFailoverProvider() {
+ Map<String, String> config = new HashMap<>();
+ config.put("dfs.nameservices", "ns1");
+ config.put("dfs.ha.namenodes.ns1", "nn1,nn2");
+ config.put("dfs.namenode.rpc-address.ns1.nn1", "127.0.0.1:8020");
+ config.put("dfs.namenode.rpc-address.ns1.nn2", "127.0.0.2:8020");
+ // failover provider missing
+
+ IllegalArgumentException ex =
Assertions.assertThrows(IllegalArgumentException.class,
+ () -> HdfsPropertiesUtils.checkHaConfig(config));
+
Assertions.assertTrue(ex.getMessage().contains("dfs.client.failover.proxy.provider.ns1"));
+ }
}
diff --git
a/regression-test/suites/auth_call/test_assistant_command_auth.groovy
b/regression-test/suites/auth_call/test_assistant_command_auth.groovy
index 1f150d196e1..c6b8b2d68fa 100644
--- a/regression-test/suites/auth_call/test_assistant_command_auth.groovy
+++ b/regression-test/suites/auth_call/test_assistant_command_auth.groovy
@@ -56,7 +56,9 @@ suite("test_assistant_command_auth","p0,auth_call") {
logger.info("insert_res: " + insert_res)
sql """create catalog if not exists ${catalogName} properties (
- 'type'='hms'
+ 'type'='hms',
+ 'hive.metastore.uris' = 'thrift://127.0.0.1:9083'
+
);"""
diff --git a/regression-test/suites/auth_call/test_ddl_catalog_auth.groovy
b/regression-test/suites/auth_call/test_ddl_catalog_auth.groovy
index 89ad3b4c104..f2f6a97cb48 100644
--- a/regression-test/suites/auth_call/test_ddl_catalog_auth.groovy
+++ b/regression-test/suites/auth_call/test_ddl_catalog_auth.groovy
@@ -25,7 +25,8 @@ suite("test_ddl_catalog_auth","p0,auth_call") {
String catalogNameOther = 'test_ddl_catalog_auth_catalog_other'
sql """create catalog if not exists ${catalogNameOther} properties (
- 'type'='hms'
+ 'type'='hms',
+ 'hive.metastore.uris'='thrift://127.0.0.1:9083'
);"""
try_sql("DROP USER ${user}")
@@ -46,7 +47,8 @@ suite("test_ddl_catalog_auth","p0,auth_call") {
connect(user, "${pwd}", context.config.jdbcUrl) {
test {
sql """create catalog if not exists ${catalogName} properties (
- 'type'='hms'
+ 'type'='hms',
+ 'hive.metastore.uris'='thrift://127.0.0.1:9083'
);"""
exception "denied"
}
@@ -54,13 +56,15 @@ suite("test_ddl_catalog_auth","p0,auth_call") {
assertTrue(ctl_res.size() == 1)
}
sql """create catalog if not exists ${catalogName} properties (
- 'type'='hms'
+ 'type'='hms',
+ 'hive.metastore.uris'='thrift://127.0.0.1:9083'
);"""
sql """grant Create_priv on ${catalogName}.*.* to ${user}"""
sql """drop catalog ${catalogName}"""
connect(user, "${pwd}", context.config.jdbcUrl) {
sql """create catalog if not exists ${catalogName} properties (
- 'type'='hms'
+ 'type'='hms',
+ 'hive.metastore.uris'='thrift://127.0.0.1:9083'
);"""
sql """show create catalog ${catalogName}"""
def ctl_res = sql """show catalogs;"""
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]