This is an automated email from the ASF dual-hosted git repository.

zhangliang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/shardingsphere.git


The following commit(s) were added to refs/heads/master by this push:
     new 70da7661c72 Add StorageUnitManagerTest and ResourceSwitchManagerTest 
(#37257)
70da7661c72 is described below

commit 70da7661c72be9cf5a51282c12fe56a8a83afe2f
Author: Liang Zhang <[email protected]>
AuthorDate: Tue Dec 2 18:00:47 2025 +0800

    Add StorageUnitManagerTest and ResourceSwitchManagerTest (#37257)
    
    * Add StorageUnitManagerTest and ResourceSwitchManagerTest
    
    * Add StorageUnitManagerTest and ResourceSwitchManagerTest
    
    * Add StorageUnitManagerTest and ResourceSwitchManagerTest
---
 .../resource/ResourceSwitchManagerTest.java        | 150 ++++++++++++++++++
 .../manager/resource/StorageUnitManagerTest.java   | 172 +++++++++++++++++++++
 2 files changed, 322 insertions(+)

diff --git 
a/mode/core/src/test/java/org/apache/shardingsphere/mode/metadata/manager/resource/ResourceSwitchManagerTest.java
 
b/mode/core/src/test/java/org/apache/shardingsphere/mode/metadata/manager/resource/ResourceSwitchManagerTest.java
new file mode 100644
index 00000000000..17989e96f39
--- /dev/null
+++ 
b/mode/core/src/test/java/org/apache/shardingsphere/mode/metadata/manager/resource/ResourceSwitchManagerTest.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.shardingsphere.mode.metadata.manager.resource;
+
+import 
org.apache.shardingsphere.infra.datasource.pool.creator.DataSourcePoolCreator;
+import 
org.apache.shardingsphere.infra.datasource.pool.props.domain.DataSourcePoolProperties;
+import 
org.apache.shardingsphere.infra.metadata.database.resource.ResourceMetaData;
+import 
org.apache.shardingsphere.infra.metadata.database.resource.node.StorageNode;
+import 
org.apache.shardingsphere.infra.metadata.database.resource.unit.StorageUnit;
+import 
org.apache.shardingsphere.test.infra.framework.extension.mock.AutoMockExtension;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.MockedStatic;
+
+import javax.sql.DataSource;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.LinkedHashMap;
+import java.util.LinkedHashSet;
+import java.util.Map;
+
+import static org.hamcrest.MatcherAssert.assertThat;
+import static org.hamcrest.Matchers.hasEntry;
+import static org.hamcrest.Matchers.hasKey;
+import static org.hamcrest.Matchers.is;
+import static org.hamcrest.Matchers.not;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.mockStatic;
+import static org.mockito.Mockito.verifyNoInteractions;
+
+@ExtendWith(AutoMockExtension.class)
+class ResourceSwitchManagerTest {
+    
+    private final ResourceSwitchManager resourceSwitchManager = new 
ResourceSwitchManager();
+    
+    @Test
+    void assertSwitchByRegisterStorageUnit() {
+        ResourceMetaData resourceMetaData = 
createResourceMetaDataWithSingleUnit("ds_existing");
+        Map<String, DataSourcePoolProperties> toBeRegistered = new 
LinkedHashMap<>(2, 1F);
+        toBeRegistered.put("ds_existing", createDataSourcePoolProperties());
+        toBeRegistered.put("ds_new", createDataSourcePoolProperties());
+        DataSource newDataSource = mock(DataSource.class);
+        try (MockedStatic<DataSourcePoolCreator> mocked = 
mockStatic(DataSourcePoolCreator.class)) {
+            mocked.when(() -> 
DataSourcePoolCreator.create(any(DataSourcePoolProperties.class))).thenReturn(newDataSource);
+            SwitchingResource actual = 
resourceSwitchManager.switchByRegisterStorageUnit(resourceMetaData, 
toBeRegistered);
+            assertThat(actual.getNewDataSources().size(), is(1));
+            assertThat(actual.getNewDataSources(), hasKey(new 
StorageNode("ds_new")));
+            verifyNoInteractions(resourceMetaData.getDataSources().get(new 
StorageNode("ds_existing")));
+        }
+    }
+    
+    @Test
+    void assertSwitchByAlterStorageUnit() {
+        StorageNode existingNode = new StorageNode("ds_altered");
+        ResourceMetaData resourceMetaData = 
createResourceMetaData(existingNode, "unused_ds");
+        Map<String, DataSourcePoolProperties> toBeAltered = 
Collections.singletonMap("ds_altered", createDataSourcePoolProperties());
+        DataSource newDataSource = mock(DataSource.class);
+        try (MockedStatic<DataSourcePoolCreator> mocked = 
mockStatic(DataSourcePoolCreator.class)) {
+            mocked.when(() -> 
DataSourcePoolCreator.create(any(DataSourcePoolProperties.class))).thenReturn(newDataSource);
+            SwitchingResource actual = 
resourceSwitchManager.switchByAlterStorageUnit(resourceMetaData, toBeAltered);
+            assertThat(actual.getNewDataSources().get(existingNode), 
is(newDataSource));
+            assertThat(actual.getStaleDataSources().get(existingNode), 
is(resourceMetaData.getDataSources().get(existingNode)));
+            assertThat(actual.getStaleDataSources(), not(hasKey(new 
StorageNode("extra_only"))));
+            
assertTrue(actual.getStaleStorageUnitNames().contains("ds_altered"));
+            assertThat(actual.getMergedDataSourcePoolPropertiesMap(), 
hasEntry("ds_altered", createDataSourcePoolProperties()));
+        }
+    }
+    
+    @Test
+    void assertSwitchByUnregisterStorageUnit() {
+        StorageNode sharedNode = new StorageNode("shared");
+        StorageNode orphanNode = new StorageNode("orphan");
+        Map<StorageNode, DataSource> dataSources = new LinkedHashMap<>(2, 1F);
+        dataSources.put(sharedNode, mock(DataSource.class));
+        dataSources.put(orphanNode, mock(DataSource.class));
+        Map<String, StorageUnit> storageUnits = new LinkedHashMap<>(3, 1F);
+        storageUnits.put("in_use", new StorageUnit(sharedNode, 
createDataSourcePoolProperties(), dataSources.get(sharedNode)));
+        storageUnits.put("shared_copy", new StorageUnit(sharedNode, 
createDataSourcePoolProperties(), dataSources.get(sharedNode)));
+        storageUnits.put("to_remove", new StorageUnit(orphanNode, 
createDataSourcePoolProperties(), dataSources.get(orphanNode)));
+        ResourceMetaData resourceMetaData = new ResourceMetaData(dataSources, 
storageUnits);
+        SwitchingResource actual = 
resourceSwitchManager.switchByUnregisterStorageUnit(resourceMetaData, new 
LinkedHashSet<>(Arrays.asList("to_remove", "shared_copy", "missing")));
+        assertThat(actual.getStaleDataSources().get(orphanNode), 
is(dataSources.get(orphanNode)));
+        assertThat(actual.getStaleDataSources(), not(hasKey(sharedNode)));
+        assertThat(actual.getMergedDataSourcePoolPropertiesMap(), 
not(hasKey("to_remove")));
+        assertThat(actual.getMergedDataSourcePoolPropertiesMap(), 
not(hasKey("shared_copy")));
+        assertThat(actual.getMergedDataSourcePoolPropertiesMap(), 
hasKey("in_use"));
+        assertThat(actual.getMergedDataSourcePoolPropertiesMap(), 
not(hasKey("missing")));
+    }
+    
+    @Test
+    void assertCreateByUnregisterStorageUnit() {
+        StorageNode sharedNode = new StorageNode("shared");
+        StorageNode orphanNode = new StorageNode("orphan");
+        Map<StorageNode, DataSource> dataSources = new LinkedHashMap<>(2, 1F);
+        dataSources.put(sharedNode, mock(DataSource.class));
+        dataSources.put(orphanNode, mock(DataSource.class));
+        Map<String, StorageUnit> storageUnits = new LinkedHashMap<>(2, 1F);
+        storageUnits.put("keep", new StorageUnit(sharedNode, 
createDataSourcePoolProperties(), dataSources.get(sharedNode)));
+        storageUnits.put("drop", new StorageUnit(orphanNode, 
createDataSourcePoolProperties(), dataSources.get(orphanNode)));
+        ResourceMetaData resourceMetaData = new ResourceMetaData(dataSources, 
storageUnits);
+        SwitchingResource actual = 
resourceSwitchManager.createByUnregisterStorageUnit(resourceMetaData, 
Collections.singleton("drop"));
+        assertThat(actual.getStaleDataSources().get(orphanNode), 
is(dataSources.get(orphanNode)));
+        assertThat(actual.getMergedDataSourcePoolPropertiesMap(), 
hasKey("keep"));
+        assertThat(actual.getMergedDataSourcePoolPropertiesMap(), 
not(hasKey("drop")));
+        assertThat(resourceMetaData.getStorageUnits(), hasKey("drop"));
+    }
+    
+    private ResourceMetaData createResourceMetaDataWithSingleUnit(final String 
name) {
+        StorageNode storageNode = new StorageNode(name);
+        DataSource dataSource = mock(DataSource.class);
+        return new ResourceMetaData(Collections.singletonMap(storageNode, 
dataSource), Collections.singletonMap(name, new StorageUnit(storageNode, 
createDataSourcePoolProperties(), dataSource)));
+    }
+    
+    private ResourceMetaData createResourceMetaData(final StorageNode 
existingNode, final String anotherName) {
+        DataSource existingDataSource = mock(DataSource.class);
+        DataSource anotherDataSource = mock(DataSource.class);
+        Map<StorageNode, DataSource> dataSources = new LinkedHashMap<>(3, 1F);
+        dataSources.put(existingNode, existingDataSource);
+        dataSources.put(new StorageNode(anotherName), anotherDataSource);
+        dataSources.put(new StorageNode("extra_only"), mock(DataSource.class));
+        Map<String, StorageUnit> storageUnits = new LinkedHashMap<>(2, 1F);
+        storageUnits.put(existingNode.getName(), new StorageUnit(existingNode, 
createDataSourcePoolProperties(), existingDataSource));
+        storageUnits.put(anotherName, new StorageUnit(new 
StorageNode(anotherName), createDataSourcePoolProperties(), anotherDataSource));
+        return new ResourceMetaData(dataSources, storageUnits);
+    }
+    
+    private DataSourcePoolProperties createDataSourcePoolProperties() {
+        Map<String, Object> props = new LinkedHashMap<>(2, 1F);
+        props.put("url", "jdbc:mock://localhost:3306/foo_db");
+        props.put("username", "root");
+        return new DataSourcePoolProperties("Foo", props);
+    }
+}
diff --git 
a/mode/core/src/test/java/org/apache/shardingsphere/mode/metadata/manager/resource/StorageUnitManagerTest.java
 
b/mode/core/src/test/java/org/apache/shardingsphere/mode/metadata/manager/resource/StorageUnitManagerTest.java
new file mode 100644
index 00000000000..9481c0d047c
--- /dev/null
+++ 
b/mode/core/src/test/java/org/apache/shardingsphere/mode/metadata/manager/resource/StorageUnitManagerTest.java
@@ -0,0 +1,172 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.shardingsphere.mode.metadata.manager.resource;
+
+import lombok.SneakyThrows;
+import org.apache.shardingsphere.infra.instance.ComputeNodeInstanceContext;
+import 
org.apache.shardingsphere.infra.metadata.database.ShardingSphereDatabase;
+import 
org.apache.shardingsphere.infra.metadata.database.resource.ResourceMetaData;
+import 
org.apache.shardingsphere.infra.metadata.database.schema.model.ShardingSphereSchema;
+import org.apache.shardingsphere.infra.rule.ShardingSphereRule;
+import org.apache.shardingsphere.mode.metadata.MetaDataContexts;
+import org.apache.shardingsphere.mode.metadata.factory.MetaDataContextsFactory;
+import org.apache.shardingsphere.mode.metadata.persist.MetaDataPersistFacade;
+import 
org.apache.shardingsphere.test.infra.framework.extension.mock.AutoMockExtension;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.extension.ExtendWith;
+import org.mockito.MockedConstruction;
+
+import java.sql.SQLException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.Map;
+
+import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.RETURNS_DEEP_STUBS;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.lenient;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.mockConstruction;
+import static org.mockito.Mockito.never;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+import static org.mockito.Mockito.withSettings;
+
+@ExtendWith(AutoMockExtension.class)
+class StorageUnitManagerTest {
+    
+    private static final String DATABASE_NAME = "foo_db";
+    
+    @Test
+    void assertRegisterSuccess() {
+        MetaDataContexts metaDataContexts = mockMetaDataContexts();
+        ResourceSwitchManager resourceSwitchManager = 
mock(ResourceSwitchManager.class);
+        SwitchingResource switchingResource = new 
SwitchingResource(Collections.emptyMap(), Collections.emptyMap(), 
Collections.emptyList(), Collections.emptyMap());
+        
when(resourceSwitchManager.switchByRegisterStorageUnit(any(ResourceMetaData.class),
 any(Map.class))).thenReturn(switchingResource);
+        ShardingSphereDatabase reloadDatabase = 
mock(ShardingSphereDatabase.class);
+        
when(reloadDatabase.getAllSchemas()).thenReturn(Collections.singleton(new 
ShardingSphereSchema("foo_schema")));
+        MetaDataContexts reloadMetaDataContexts = mock(MetaDataContexts.class, 
RETURNS_DEEP_STUBS);
+        
when(reloadMetaDataContexts.getMetaData().getDatabase(DATABASE_NAME)).thenReturn(reloadDatabase);
+        try (
+                MockedConstruction<MetaDataContextsFactory> ignored = 
mockConstruction(MetaDataContextsFactory.class,
+                        (mock, context) -> 
when(mock.createBySwitchResource(eq(DATABASE_NAME), eq(true), 
eq(switchingResource), 
eq(metaDataContexts))).thenReturn(reloadMetaDataContexts))) {
+            createManager(metaDataContexts, 
resourceSwitchManager).register(DATABASE_NAME, Collections.emptyMap());
+        }
+        verify(metaDataContexts).update(any(MetaDataContexts.class));
+        
verify(metaDataContexts.getMetaData()).putDatabase(any(ShardingSphereDatabase.class));
+        verifyClosableRuleInvoked(metaDataContexts);
+    }
+    
+    @Test
+    void assertRegisterLogsErrorWhenSQLException() {
+        MetaDataContexts metaDataContexts = mockMetaDataContexts();
+        ResourceSwitchManager resourceSwitchManager = 
mock(ResourceSwitchManager.class);
+        doAnswer(invocation -> {
+            throw new SQLException("register error");
+        
}).when(resourceSwitchManager).switchByRegisterStorageUnit(any(ResourceMetaData.class),
 any(Map.class));
+        assertDoesNotThrow(() -> createManager(metaDataContexts, 
resourceSwitchManager).register(DATABASE_NAME, Collections.emptyMap()));
+        verify(metaDataContexts, never()).update(any(MetaDataContexts.class));
+        verify(metaDataContexts.getMetaData(), 
never()).putDatabase(any(ShardingSphereDatabase.class));
+    }
+    
+    @Test
+    void assertAlterSuccess() {
+        MetaDataContexts metaDataContexts = mockMetaDataContexts();
+        ResourceSwitchManager resourceSwitchManager = 
mock(ResourceSwitchManager.class);
+        SwitchingResource switchingResource = new 
SwitchingResource(Collections.emptyMap(), Collections.emptyMap(), 
Collections.emptyList(), Collections.emptyMap());
+        
when(resourceSwitchManager.switchByAlterStorageUnit(any(ResourceMetaData.class),
 any(Map.class))).thenReturn(switchingResource);
+        ShardingSphereDatabase reloadDatabase = 
mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS);
+        MetaDataContexts reloadMetaDataContexts = mock(MetaDataContexts.class, 
RETURNS_DEEP_STUBS);
+        
when(reloadMetaDataContexts.getMetaData().getDatabase(DATABASE_NAME)).thenReturn(reloadDatabase);
+        
when(reloadDatabase.getAllSchemas()).thenReturn(Collections.singleton(new 
ShardingSphereSchema("foo_schema")));
+        try (
+                MockedConstruction<MetaDataContextsFactory> ignored = 
mockConstruction(MetaDataContextsFactory.class,
+                        (mock, context) -> 
when(mock.createBySwitchResource(eq(DATABASE_NAME), eq(true), 
eq(switchingResource), 
eq(metaDataContexts))).thenReturn(reloadMetaDataContexts))) {
+            createManager(metaDataContexts, 
resourceSwitchManager).alter(DATABASE_NAME, Collections.emptyMap());
+        }
+        verify(metaDataContexts).update(any(MetaDataContexts.class));
+        verifyClosableRuleInvoked(metaDataContexts);
+    }
+    
+    @Test
+    void assertAlterLogsErrorWhenSQLException() {
+        MetaDataContexts metaDataContexts = mockMetaDataContexts();
+        ResourceSwitchManager resourceSwitchManager = 
mock(ResourceSwitchManager.class);
+        doAnswer(invocation -> {
+            throw new SQLException("alter error");
+        
}).when(resourceSwitchManager).switchByAlterStorageUnit(any(ResourceMetaData.class),
 any(Map.class));
+        assertDoesNotThrow(() -> createManager(metaDataContexts, 
resourceSwitchManager).alter(DATABASE_NAME, Collections.emptyMap()));
+        verify(metaDataContexts, never()).update(any(MetaDataContexts.class));
+        verify(metaDataContexts.getMetaData(), 
never()).putDatabase(any(ShardingSphereDatabase.class));
+    }
+    
+    @Test
+    void assertUnregisterSuccess() {
+        MetaDataContexts metaDataContexts = mockMetaDataContexts();
+        ResourceSwitchManager resourceSwitchManager = 
mock(ResourceSwitchManager.class);
+        SwitchingResource switchingResource = new 
SwitchingResource(Collections.emptyMap(), Collections.emptyMap(), 
Collections.emptyList(), Collections.emptyMap());
+        
when(resourceSwitchManager.switchByUnregisterStorageUnit(any(ResourceMetaData.class),
 any(Collection.class))).thenReturn(switchingResource);
+        ShardingSphereDatabase reloadDatabase = 
mock(ShardingSphereDatabase.class, RETURNS_DEEP_STUBS);
+        MetaDataContexts reloadMetaDataContexts = mock(MetaDataContexts.class, 
RETURNS_DEEP_STUBS);
+        
when(reloadMetaDataContexts.getMetaData().getDatabase(DATABASE_NAME)).thenReturn(reloadDatabase);
+        
when(reloadDatabase.getAllSchemas()).thenReturn(Collections.singleton(new 
ShardingSphereSchema("foo_schema")));
+        try (
+                MockedConstruction<MetaDataContextsFactory> ignored = 
mockConstruction(MetaDataContextsFactory.class,
+                        (mock, context) -> 
when(mock.createBySwitchResource(eq(DATABASE_NAME), eq(false), 
eq(switchingResource), 
eq(metaDataContexts))).thenReturn(reloadMetaDataContexts))) {
+            createManager(metaDataContexts, 
resourceSwitchManager).unregister(DATABASE_NAME, "ds_0");
+        }
+        verify(metaDataContexts).update(any(MetaDataContexts.class));
+        verifyClosableRuleInvoked(metaDataContexts);
+    }
+    
+    @Test
+    void assertUnregisterLogsErrorWhenSQLException() {
+        MetaDataContexts metaDataContexts = mockMetaDataContexts();
+        ResourceSwitchManager resourceSwitchManager = 
mock(ResourceSwitchManager.class);
+        doAnswer(invocation -> {
+            throw new SQLException("unregister error");
+        
}).when(resourceSwitchManager).switchByUnregisterStorageUnit(any(ResourceMetaData.class),
 any(Collection.class));
+        assertDoesNotThrow(() -> createManager(metaDataContexts, 
resourceSwitchManager).unregister(DATABASE_NAME, "ds_0"));
+        verify(metaDataContexts, never()).update(any(MetaDataContexts.class));
+        verify(metaDataContexts.getMetaData(), 
never()).putDatabase(any(ShardingSphereDatabase.class));
+    }
+    
+    private MetaDataContexts mockMetaDataContexts() {
+        MetaDataContexts result = mock(MetaDataContexts.class, 
RETURNS_DEEP_STUBS);
+        ShardingSphereDatabase database = mock(ShardingSphereDatabase.class, 
RETURNS_DEEP_STUBS);
+        when(database.getResourceMetaData()).thenReturn(new 
ResourceMetaData(Collections.emptyMap(), Collections.emptyMap()));
+        
when(database.getRuleMetaData().getRules()).thenReturn(Arrays.asList(mock(ShardingSphereRule.class,
 withSettings().extraInterfaces(AutoCloseable.class)), 
mock(ShardingSphereRule.class)));
+        
when(result.getMetaData().getDatabase(DATABASE_NAME)).thenReturn(database);
+        return result;
+    }
+    
+    private StorageUnitManager createManager(final MetaDataContexts 
metaDataContexts, final ResourceSwitchManager resourceSwitchManager) {
+        MetaDataPersistFacade metaDataPersistFacade = 
mock(MetaDataPersistFacade.class, RETURNS_DEEP_STUBS);
+        
lenient().when(metaDataPersistFacade.getDatabaseMetaDataFacade().getView().load(anyString(),
 anyString())).thenReturn(Collections.emptyList());
+        return new StorageUnitManager(metaDataContexts, 
mock(ComputeNodeInstanceContext.class), resourceSwitchManager, 
metaDataPersistFacade);
+    }
+    
+    @SneakyThrows
+    private void verifyClosableRuleInvoked(final MetaDataContexts 
metaDataContexts) {
+        verify((AutoCloseable) 
metaDataContexts.getMetaData().getDatabase(DATABASE_NAME).getRuleMetaData().getRules().iterator().next()).close();
+    }
+}

Reply via email to