This is an automated email from the ASF dual-hosted git repository.
yuqi4733 pushed a commit to branch branch-lance-namepspace-dev
in repository https://gitbox.apache.org/repos/asf/gravitino.git
The following commit(s) were added to refs/heads/branch-lance-namepspace-dev by
this push:
new ea1576a86b [#8835][#8836] feat(lakehouseCatalog): supports catalog
and schema operations for lakehouse catalog (#8851)
ea1576a86b is described below
commit ea1576a86becaadc77c3d5bad35c061b58d5e636
Author: mchades <[email protected]>
AuthorDate: Wed Oct 22 14:09:52 2025 +0800
[#8835][#8836] feat(lakehouseCatalog): supports catalog and schema
operations for lakehouse catalog (#8851)
### What changes were proposed in this pull request?
supports catalog and schema operations for lakehouse catalog
### Why are the changes needed?
Fix: #8835
Fix: #8836
### Does this PR introduce _any_ user-facing change?
no
### How was this patch tested?
tests added
---
.../catalog-generic-lakehouse/build.gradle.kts | 1 +
.../GenericLakehouseCatalogCapability.java | 24 +--
.../GenericLakehouseCatalogOperations.java | 78 +++++--
.../GenericLakehouseCatalogPropertiesMetadata.java | 26 ++-
.../GenericLakehouseSchemaPropertiesMetadata.java | 24 ++-
.../TestGenericLakehouseCatalogOperations.java | 232 +++++++++++++++++++++
6 files changed, 335 insertions(+), 50 deletions(-)
diff --git a/catalogs/catalog-generic-lakehouse/build.gradle.kts
b/catalogs/catalog-generic-lakehouse/build.gradle.kts
index c3ad842ac3..fceac14304 100644
--- a/catalogs/catalog-generic-lakehouse/build.gradle.kts
+++ b/catalogs/catalog-generic-lakehouse/build.gradle.kts
@@ -42,6 +42,7 @@ dependencies {
implementation(libs.commons.io)
implementation(libs.commons.lang3)
implementation(libs.guava)
+ implementation(libs.hadoop3.client.api)
annotationProcessor(libs.lombok)
diff --git
a/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogCapability.java
b/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogCapability.java
index 08015f7fce..412b82d6a7 100644
---
a/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogCapability.java
+++
b/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogCapability.java
@@ -24,27 +24,7 @@ import
org.apache.gravitino.connector.capability.CapabilityResult;
public class GenericLakehouseCatalogCapability implements Capability {
@Override
- public CapabilityResult columnNotNull() {
- throw new UnsupportedOperationException(
- "Not implemented yet:
GenericLakehouseCatalogCapability.columnNotNull");
- }
-
- @Override
- public CapabilityResult columnDefaultValue() {
- throw new UnsupportedOperationException(
- "Not implemented yet:
GenericLakehouseCatalogCapability.columnDefaultValue");
- }
-
- @Override
- public CapabilityResult caseSensitiveOnName(Scope scope) {
- switch (scope) {
- case SCHEMA:
- case TABLE:
- case COLUMN:
- throw new UnsupportedOperationException(
- "Not implemented yet:
GenericLakehouseCatalogCapability.caseSensitiveOnName");
- default:
- return CapabilityResult.SUPPORTED;
- }
+ public CapabilityResult managedStorage(Scope scope) {
+ return CapabilityResult.SUPPORTED;
}
}
diff --git
a/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogOperations.java
b/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogOperations.java
index 64743488a0..b626aabc16 100644
---
a/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogOperations.java
+++
b/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogOperations.java
@@ -18,9 +18,18 @@
*/
package org.apache.gravitino.catalog.lakehouse;
+import com.google.common.annotations.VisibleForTesting;
import java.util.Map;
+import java.util.Optional;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.gravitino.Catalog;
+import org.apache.gravitino.EntityStore;
+import org.apache.gravitino.GravitinoEnv;
import org.apache.gravitino.NameIdentifier;
import org.apache.gravitino.Namespace;
+import org.apache.gravitino.Schema;
+import org.apache.gravitino.SchemaChange;
+import org.apache.gravitino.catalog.ManagedSchemaOperations;
import org.apache.gravitino.connector.CatalogInfo;
import org.apache.gravitino.connector.CatalogOperations;
import org.apache.gravitino.connector.HasPropertyMetadata;
@@ -39,11 +48,19 @@ import
org.apache.gravitino.rel.expressions.distributions.Distribution;
import org.apache.gravitino.rel.expressions.sorts.SortOrder;
import org.apache.gravitino.rel.expressions.transforms.Transform;
import org.apache.gravitino.rel.indexes.Index;
+import org.apache.hadoop.fs.Path;
/** Operations for interacting with a generic lakehouse catalog in Apache
Gravitino. */
public class GenericLakehouseCatalogOperations
implements CatalogOperations, SupportsSchemas, TableCatalog {
+ private static final String SLASH = "/";
+
+ private final ManagedSchemaOperations managedSchemaOps;
+
+ @SuppressWarnings("unused") // todo: remove this after implementing table
operations
+ private Optional<Path> catalogLakehouseDir;
+
/**
* Initializes the generic lakehouse catalog operations with the provided
configuration.
*
@@ -56,7 +73,30 @@ public class GenericLakehouseCatalogOperations
public void initialize(
Map<String, String> conf, CatalogInfo info, HasPropertyMetadata
propertiesMetadata)
throws RuntimeException {
- // TODO: Implement initialization logic
+ String catalogDir =
+ (String)
+ propertiesMetadata
+ .catalogPropertiesMetadata()
+ .getOrDefault(conf,
GenericLakehouseCatalogPropertiesMetadata.LAKEHOUSE_DIR);
+ this.catalogLakehouseDir =
+ StringUtils.isNotBlank(catalogDir)
+ ?
Optional.of(catalogDir).map(this::ensureTrailingSlash).map(Path::new)
+ : Optional.empty();
+ }
+
+ public GenericLakehouseCatalogOperations() {
+ this(GravitinoEnv.getInstance().entityStore());
+ }
+
+ @VisibleForTesting
+ GenericLakehouseCatalogOperations(EntityStore store) {
+ this.managedSchemaOps =
+ new ManagedSchemaOperations() {
+ @Override
+ protected EntityStore store() {
+ return store;
+ }
+ };
}
@Override
@@ -65,44 +105,38 @@ public class GenericLakehouseCatalogOperations
@Override
public void testConnection(
NameIdentifier catalogIdent,
- org.apache.gravitino.Catalog.Type type,
+ Catalog.Type type,
String provider,
String comment,
- Map<String, String> properties)
- throws Exception {
- throw new UnsupportedOperationException("Not implemented yet.");
+ Map<String, String> properties) {
+ // No-op for generic lakehouse catalog.
}
@Override
- public org.apache.gravitino.NameIdentifier[]
listSchemas(org.apache.gravitino.Namespace namespace)
- throws NoSuchCatalogException {
- throw new UnsupportedOperationException("Not implemented yet.");
+ public NameIdentifier[] listSchemas(Namespace namespace) throws
NoSuchCatalogException {
+ return managedSchemaOps.listSchemas(namespace);
}
@Override
- public org.apache.gravitino.Schema createSchema(
- org.apache.gravitino.NameIdentifier ident, String comment, Map<String,
String> properties)
+ public Schema createSchema(NameIdentifier ident, String comment, Map<String,
String> properties)
throws NoSuchCatalogException, SchemaAlreadyExistsException {
- throw new UnsupportedOperationException("Not implemented yet.");
+ return managedSchemaOps.createSchema(ident, comment, properties);
}
@Override
- public org.apache.gravitino.Schema
loadSchema(org.apache.gravitino.NameIdentifier ident)
- throws NoSuchSchemaException {
- throw new UnsupportedOperationException("Not implemented yet.");
+ public Schema loadSchema(NameIdentifier ident) throws NoSuchSchemaException {
+ return managedSchemaOps.loadSchema(ident);
}
@Override
- public org.apache.gravitino.Schema alterSchema(
- org.apache.gravitino.NameIdentifier ident,
org.apache.gravitino.SchemaChange... changes)
+ public Schema alterSchema(NameIdentifier ident, SchemaChange... changes)
throws NoSuchSchemaException {
- throw new UnsupportedOperationException("Not implemented yet.");
+ return managedSchemaOps.alterSchema(ident, changes);
}
@Override
- public boolean dropSchema(org.apache.gravitino.NameIdentifier ident, boolean
cascade)
- throws NonEmptySchemaException {
- throw new UnsupportedOperationException("Not implemented yet.");
+ public boolean dropSchema(NameIdentifier ident, boolean cascade) throws
NonEmptySchemaException {
+ return managedSchemaOps.dropSchema(ident, cascade);
}
@Override
@@ -139,4 +173,8 @@ public class GenericLakehouseCatalogOperations
public boolean dropTable(NameIdentifier ident) {
throw new UnsupportedOperationException("Not implemented yet.");
}
+
+ private String ensureTrailingSlash(String path) {
+ return path.endsWith(SLASH) ? path : path + SLASH;
+ }
}
diff --git
a/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogPropertiesMetadata.java
b/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogPropertiesMetadata.java
index 18543bd0a3..01dfc1da17 100644
---
a/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogPropertiesMetadata.java
+++
b/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseCatalogPropertiesMetadata.java
@@ -19,18 +19,36 @@
package org.apache.gravitino.catalog.lakehouse;
-import com.google.common.collect.ImmutableMap;
+import static
org.apache.gravitino.connector.PropertyEntry.stringOptionalPropertyEntry;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Maps;
+import java.util.List;
import java.util.Map;
import org.apache.gravitino.connector.BaseCatalogPropertiesMetadata;
import org.apache.gravitino.connector.PropertyEntry;
public class GenericLakehouseCatalogPropertiesMetadata extends
BaseCatalogPropertiesMetadata {
- private static final Map<String, PropertyEntry<?>>
GENERIC_LAKEHOUSE_CATALOG_PROPERTY_ENTRIES =
- ImmutableMap.<String, PropertyEntry<?>>builder().build();
+ public static final String LAKEHOUSE_DIR = "lakehouse-dir";
+
+ private static final Map<String, PropertyEntry<?>> PROPERTIES_METADATA;
+
+ static {
+ List<PropertyEntry<?>> propertyEntries =
+ ImmutableList.of(
+ stringOptionalPropertyEntry(
+ LAKEHOUSE_DIR,
+ "The root directory of the lakehouse catalog.",
+ false /* immutable */,
+ null, /* defaultValue */
+ false /* hidden */));
+
+ PROPERTIES_METADATA = Maps.uniqueIndex(propertyEntries,
PropertyEntry::getName);
+ }
@Override
protected Map<String, PropertyEntry<?>> specificPropertyEntries() {
- return GENERIC_LAKEHOUSE_CATALOG_PROPERTY_ENTRIES;
+ return PROPERTIES_METADATA;
}
}
diff --git
a/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseSchemaPropertiesMetadata.java
b/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseSchemaPropertiesMetadata.java
index 05da8443cd..52a65e7698 100644
---
a/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseSchemaPropertiesMetadata.java
+++
b/catalogs/catalog-generic-lakehouse/src/main/java/org/apache/gravitino/catalog/lakehouse/GenericLakehouseSchemaPropertiesMetadata.java
@@ -18,20 +18,36 @@
*/
package org.apache.gravitino.catalog.lakehouse;
-import com.google.common.collect.ImmutableMap;
+import static
org.apache.gravitino.connector.PropertyEntry.stringOptionalPropertyEntry;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Maps;
+import java.util.List;
import java.util.Map;
import org.apache.gravitino.connector.BasePropertiesMetadata;
import org.apache.gravitino.connector.PropertyEntry;
public class GenericLakehouseSchemaPropertiesMetadata extends
BasePropertiesMetadata {
- private static final Map<String, PropertyEntry<?>> propertiesMetadata;
+ public static final String LAKEHOUSE_DIR =
+ GenericLakehouseCatalogPropertiesMetadata.LAKEHOUSE_DIR;
+
+ private static final Map<String, PropertyEntry<?>> PROPERTIES_METADATA;
static {
- propertiesMetadata = ImmutableMap.of();
+ List<PropertyEntry<?>> propertyEntries =
+ ImmutableList.of(
+ stringOptionalPropertyEntry(
+ LAKEHOUSE_DIR,
+ "The root directory of the lakehouse schema.",
+ false /* immutable */,
+ null, /* defaultValue */
+ false /* hidden */));
+
+ PROPERTIES_METADATA = Maps.uniqueIndex(propertyEntries,
PropertyEntry::getName);
}
@Override
protected Map<String, PropertyEntry<?>> specificPropertyEntries() {
- return propertiesMetadata;
+ return PROPERTIES_METADATA;
}
}
diff --git
a/catalogs/catalog-generic-lakehouse/src/test/java/org/apache/gravitino/catalog/lakehouse/TestGenericLakehouseCatalogOperations.java
b/catalogs/catalog-generic-lakehouse/src/test/java/org/apache/gravitino/catalog/lakehouse/TestGenericLakehouseCatalogOperations.java
new file mode 100644
index 0000000000..67887c2f7a
--- /dev/null
+++
b/catalogs/catalog-generic-lakehouse/src/test/java/org/apache/gravitino/catalog/lakehouse/TestGenericLakehouseCatalogOperations.java
@@ -0,0 +1,232 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.gravitino.catalog.lakehouse;
+
+import static org.apache.gravitino.Configs.DEFAULT_ENTITY_RELATIONAL_STORE;
+import static
org.apache.gravitino.Configs.ENTITY_RELATIONAL_JDBC_BACKEND_DRIVER;
+import static
org.apache.gravitino.Configs.ENTITY_RELATIONAL_JDBC_BACKEND_MAX_CONNECTIONS;
+import static
org.apache.gravitino.Configs.ENTITY_RELATIONAL_JDBC_BACKEND_PASSWORD;
+import static org.apache.gravitino.Configs.ENTITY_RELATIONAL_JDBC_BACKEND_PATH;
+import static org.apache.gravitino.Configs.ENTITY_RELATIONAL_JDBC_BACKEND_URL;
+import static org.apache.gravitino.Configs.ENTITY_RELATIONAL_JDBC_BACKEND_USER;
+import static
org.apache.gravitino.Configs.ENTITY_RELATIONAL_JDBC_BACKEND_WAIT_MILLISECONDS;
+import static org.apache.gravitino.Configs.ENTITY_RELATIONAL_STORE;
+import static org.apache.gravitino.Configs.ENTITY_STORE;
+import static org.apache.gravitino.Configs.RELATIONAL_ENTITY_STORE;
+import static org.apache.gravitino.Configs.STORE_DELETE_AFTER_TIME;
+import static org.apache.gravitino.Configs.STORE_TRANSACTION_MAX_SKEW_TIME;
+import static org.apache.gravitino.Configs.VERSION_RETENTION_COUNT;
+import static org.mockito.Mockito.when;
+
+import java.io.File;
+import java.io.IOException;
+import java.time.Instant;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.stream.Collectors;
+import org.apache.commons.io.FileUtils;
+import org.apache.gravitino.Catalog;
+import org.apache.gravitino.Config;
+import org.apache.gravitino.Configs;
+import org.apache.gravitino.EntityStore;
+import org.apache.gravitino.EntityStoreFactory;
+import org.apache.gravitino.NameIdentifier;
+import org.apache.gravitino.Namespace;
+import org.apache.gravitino.Schema;
+import org.apache.gravitino.StringIdentifier;
+import org.apache.gravitino.exceptions.NoSuchCatalogException;
+import org.apache.gravitino.exceptions.NoSuchSchemaException;
+import org.apache.gravitino.exceptions.SchemaAlreadyExistsException;
+import org.apache.gravitino.meta.AuditInfo;
+import org.apache.gravitino.meta.BaseMetalake;
+import org.apache.gravitino.meta.CatalogEntity;
+import org.apache.gravitino.meta.SchemaVersion;
+import org.apache.gravitino.storage.IdGenerator;
+import org.apache.gravitino.storage.RandomIdGenerator;
+import org.apache.gravitino.utils.NameIdentifierUtil;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.Test;
+import org.mockito.Mockito;
+
+public class TestGenericLakehouseCatalogOperations {
+ private static final String STORE_PATH =
+ "/tmp/gravitino_test_entityStore_" +
UUID.randomUUID().toString().replace("-", "");
+ private static final String METALAKE_NAME = "metalake_for_lakehouse_test";
+ private static final String CATALOG_NAME = "lakehouse_catalog_test";
+
+ private static EntityStore store;
+ private static IdGenerator idGenerator;
+ private static GenericLakehouseCatalogOperations ops;
+
+ @BeforeAll
+ public static void setUp() throws IOException {
+ Config config = Mockito.mock(Config.class);
+ when(config.get(ENTITY_STORE)).thenReturn(RELATIONAL_ENTITY_STORE);
+
when(config.get(ENTITY_RELATIONAL_STORE)).thenReturn(DEFAULT_ENTITY_RELATIONAL_STORE);
+
when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_PATH)).thenReturn(STORE_PATH);
+
+ // The following properties are used to create the JDBC connection; they
are just for test, in
+ // the real world, they will be set automatically by the configuration
file if you set
+ // ENTITY_RELATIONAL_STORE as EMBEDDED_ENTITY_RELATIONAL_STORE.
+ when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_URL))
+ .thenReturn(String.format("jdbc:h2:%s;DB_CLOSE_DELAY=-1;MODE=MYSQL",
STORE_PATH));
+
when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_USER)).thenReturn("gravitino");
+
when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_PASSWORD)).thenReturn("gravitino");
+
when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_DRIVER)).thenReturn("org.h2.Driver");
+
Mockito.when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_MAX_CONNECTIONS)).thenReturn(100);
+
Mockito.when(config.get(ENTITY_RELATIONAL_JDBC_BACKEND_WAIT_MILLISECONDS)).thenReturn(1000L);
+
+ File f = FileUtils.getFile(STORE_PATH);
+ f.deleteOnExit();
+
+ when(config.get(VERSION_RETENTION_COUNT)).thenReturn(1L);
+ when(config.get(STORE_TRANSACTION_MAX_SKEW_TIME)).thenReturn(1000L);
+ when(config.get(STORE_DELETE_AFTER_TIME)).thenReturn(20 * 60 * 1000L);
+ Mockito.when(config.get(Configs.CACHE_ENABLED)).thenReturn(false);
+
+ store = EntityStoreFactory.createEntityStore(config);
+ store.initialize(config);
+ idGenerator = RandomIdGenerator.INSTANCE;
+
+ // Create the metalake and catalog
+ AuditInfo auditInfo =
+
AuditInfo.builder().withCreator("test").withCreateTime(Instant.now()).build();
+ BaseMetalake metalake =
+ BaseMetalake.builder()
+ .withId(idGenerator.nextId())
+ .withName(METALAKE_NAME)
+ .withVersion(SchemaVersion.V_0_1)
+ .withAuditInfo(auditInfo)
+ .withName(METALAKE_NAME)
+ .build();
+ store.put(metalake, false);
+
+ CatalogEntity catalog =
+ CatalogEntity.builder()
+ .withId(idGenerator.nextId())
+ .withName(CATALOG_NAME)
+ .withNamespace(Namespace.of(METALAKE_NAME))
+ .withProvider("generic-lakehouse")
+ .withType(Catalog.Type.RELATIONAL)
+ .withAuditInfo(auditInfo)
+ .build();
+ store.put(catalog, false);
+
+ ops = new GenericLakehouseCatalogOperations(store);
+ }
+
+ @AfterAll
+ public static void tearDown() throws IOException {
+ ops.close();
+ store.close();
+ FileUtils.deleteDirectory(new File(STORE_PATH));
+ }
+
+ @Test
+ public void testSchemaOperations() {
+ String schemaName = randomSchemaName();
+ NameIdentifier schemaIdent =
+ NameIdentifierUtil.ofSchema(METALAKE_NAME, CATALOG_NAME, schemaName);
+ StringIdentifier stringId = StringIdentifier.fromId(idGenerator.nextId());
+ Map<String, String> properties =
StringIdentifier.newPropertiesWithId(stringId, null);
+
+ ops.createSchema(schemaIdent, "schema comment", properties);
+ Schema loadedSchema = ops.loadSchema(schemaIdent);
+
+ Assertions.assertEquals(schemaName, loadedSchema.name());
+ Assertions.assertEquals("schema comment", loadedSchema.comment());
+ Assertions.assertEquals(properties, loadedSchema.properties());
+
+ // Test create schema with the same name
+ Assertions.assertThrows(
+ SchemaAlreadyExistsException.class,
+ () -> ops.createSchema(schemaIdent, "schema comment", properties));
+
+ // Test create schema in a non-existent catalog
+ Assertions.assertThrows(
+ NoSuchCatalogException.class,
+ () ->
+ ops.createSchema(
+ NameIdentifierUtil.ofSchema(METALAKE_NAME,
"non-existent-catalog", schemaName),
+ "schema comment",
+ properties));
+
+ // Test load a non-existent schema
+ Assertions.assertThrows(
+ NoSuchSchemaException.class,
+ () ->
+ ops.loadSchema(
+ NameIdentifierUtil.ofSchema(METALAKE_NAME, CATALOG_NAME,
"non-existent-schema")));
+
+ // Test load a non-existent schema in a non-existent catalog
+ Assertions.assertThrows(
+ NoSuchSchemaException.class,
+ () ->
+ ops.loadSchema(
+ NameIdentifierUtil.ofSchema(
+ METALAKE_NAME, "non-existent-catalog",
"non-existent-schema")));
+
+ // Create another schema
+ String schemaName2 = randomSchemaName();
+ NameIdentifier schemaIdent2 =
+ NameIdentifierUtil.ofSchema(METALAKE_NAME, CATALOG_NAME, schemaName2);
+ StringIdentifier stringId2 = StringIdentifier.fromId(idGenerator.nextId());
+ Map<String, String> properties2 =
StringIdentifier.newPropertiesWithId(stringId2, null);
+
+ ops.createSchema(schemaIdent2, "schema comment 2", properties2);
+
+ // Test list schemas
+ NameIdentifier[] idents = ops.listSchemas(Namespace.of(METALAKE_NAME,
CATALOG_NAME));
+
+ Set<NameIdentifier> resultSet =
Arrays.stream(idents).collect(Collectors.toSet());
+ Assertions.assertTrue(resultSet.contains(schemaIdent));
+ Assertions.assertTrue(resultSet.contains(schemaIdent2));
+
+ // Test list schemas in a non-existent catalog
+ Assertions.assertThrows(
+ NoSuchCatalogException.class,
+ () -> ops.listSchemas(Namespace.of(METALAKE_NAME,
"non-existent-catalog")));
+
+ // Test drop schema
+ Assertions.assertTrue(ops.dropSchema(schemaIdent, false));
+ Assertions.assertFalse(ops.dropSchema(schemaIdent, false));
+ Assertions.assertTrue(ops.dropSchema(schemaIdent2, false));
+ Assertions.assertFalse(ops.dropSchema(schemaIdent2, false));
+
+ // Test drop non-existent schema
+ Assertions.assertFalse(
+ ops.dropSchema(
+ NameIdentifierUtil.ofSchema(METALAKE_NAME, CATALOG_NAME,
"non-existent-schema"),
+ false));
+
+ // Test drop schema in a non-existent catalog
+ Assertions.assertFalse(
+ ops.dropSchema(
+ NameIdentifierUtil.ofSchema(METALAKE_NAME, "non-existent-catalog",
schemaName2),
+ false));
+ }
+
+ private String randomSchemaName() {
+ return "schema_" + UUID.randomUUID().toString().replace("-", "");
+ }
+}