This is an automated email from the ASF dual-hosted git repository.

zhaojinchao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/shardingsphere.git


The following commit(s) were added to refs/heads/master by this push:
     new 0beb0dc7698 Close temporary data source when register and alter 
storage unit (#33496)
0beb0dc7698 is described below

commit 0beb0dc7698ee2a06a2f8238c9decbd23a808b37
Author: Haoran Meng <[email protected]>
AuthorDate: Fri Nov 1 19:12:42 2024 +0800

    Close temporary data source when register and alter storage unit (#33496)
---
 .../ClusterMetaDataManagerPersistService.java      | 56 +++++++++++++++-------
 1 file changed, 40 insertions(+), 16 deletions(-)

diff --git 
a/mode/type/cluster/core/src/main/java/org/apache/shardingsphere/mode/manager/cluster/persist/ClusterMetaDataManagerPersistService.java
 
b/mode/type/cluster/core/src/main/java/org/apache/shardingsphere/mode/manager/cluster/persist/ClusterMetaDataManagerPersistService.java
index 65ca998c91e..56dc33523de 100644
--- 
a/mode/type/cluster/core/src/main/java/org/apache/shardingsphere/mode/manager/cluster/persist/ClusterMetaDataManagerPersistService.java
+++ 
b/mode/type/cluster/core/src/main/java/org/apache/shardingsphere/mode/manager/cluster/persist/ClusterMetaDataManagerPersistService.java
@@ -19,7 +19,9 @@ package 
org.apache.shardingsphere.mode.manager.cluster.persist;
 
 import org.apache.groovy.util.Maps;
 import org.apache.shardingsphere.infra.config.rule.RuleConfiguration;
+import 
org.apache.shardingsphere.infra.datasource.pool.destroyer.DataSourcePoolDestroyer;
 import 
org.apache.shardingsphere.infra.datasource.pool.props.domain.DataSourcePoolProperties;
+import 
org.apache.shardingsphere.infra.metadata.database.resource.node.StorageNode;
 import 
org.apache.shardingsphere.infra.metadata.database.schema.model.ShardingSphereSchema;
 import 
org.apache.shardingsphere.infra.metadata.database.schema.model.ShardingSphereTable;
 import 
org.apache.shardingsphere.infra.metadata.database.schema.model.ShardingSphereView;
@@ -40,9 +42,11 @@ import 
org.apache.shardingsphere.mode.persist.service.MetaDataManagerPersistServ
 import org.apache.shardingsphere.mode.spi.PersistRepository;
 import org.apache.shardingsphere.single.config.SingleRuleConfiguration;
 
+import javax.sql.DataSource;
 import java.sql.SQLException;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.Map;
 import java.util.Optional;
 import java.util.Properties;
@@ -117,27 +121,39 @@ public final class ClusterMetaDataManagerPersistService 
implements MetaDataManag
     @Override
     public void registerStorageUnits(final String databaseName, final 
Map<String, DataSourcePoolProperties> toBeRegisteredProps) throws SQLException {
         MetaDataContexts originalMetaDataContexts = 
metaDataContextManager.getMetaDataContexts().get();
-        SwitchingResource switchingResource = 
metaDataContextManager.getResourceSwitchManager()
-                
.switchByRegisterStorageUnit(originalMetaDataContexts.getMetaData().getDatabase(databaseName).getResourceMetaData(),
 toBeRegisteredProps);
-        MetaDataContexts reloadMetaDataContexts = 
MetaDataContextsFactory.createBySwitchResource(databaseName, false,
-                switchingResource, originalMetaDataContexts, 
metaDataPersistService, metaDataContextManager.getComputeNodeInstanceContext());
-        
metaDataPersistService.getDataSourceUnitService().persist(databaseName, 
toBeRegisteredProps);
-        afterStorageUnitsAltered(databaseName, originalMetaDataContexts, 
reloadMetaDataContexts);
-        reloadMetaDataContexts.close();
+        Map<StorageNode, DataSource> newDataSources = new 
HashMap<>(toBeRegisteredProps.size());
+        try {
+            SwitchingResource switchingResource = 
metaDataContextManager.getResourceSwitchManager()
+                    
.switchByRegisterStorageUnit(originalMetaDataContexts.getMetaData().getDatabase(databaseName).getResourceMetaData(),
 toBeRegisteredProps);
+            newDataSources.putAll(switchingResource.getNewDataSources());
+            MetaDataContexts reloadMetaDataContexts = 
MetaDataContextsFactory.createBySwitchResource(databaseName, false,
+                    switchingResource, originalMetaDataContexts, 
metaDataPersistService, metaDataContextManager.getComputeNodeInstanceContext());
+            
metaDataPersistService.getDataSourceUnitService().persist(databaseName, 
toBeRegisteredProps);
+            afterStorageUnitsAltered(databaseName, originalMetaDataContexts, 
reloadMetaDataContexts);
+            reloadMetaDataContexts.close();
+        } finally {
+            closeNewDataSources(newDataSources);
+        }
     }
     
     @Override
     public void alterStorageUnits(final String databaseName, final Map<String, 
DataSourcePoolProperties> toBeUpdatedProps) throws SQLException {
         MetaDataContexts originalMetaDataContexts = 
metaDataContextManager.getMetaDataContexts().get();
-        SwitchingResource switchingResource = 
metaDataContextManager.getResourceSwitchManager()
-                
.switchByAlterStorageUnit(originalMetaDataContexts.getMetaData().getDatabase(databaseName).getResourceMetaData(),
 toBeUpdatedProps);
-        MetaDataContexts reloadMetaDataContexts = 
MetaDataContextsFactory.createBySwitchResource(databaseName, false,
-                switchingResource, originalMetaDataContexts, 
metaDataPersistService, metaDataContextManager.getComputeNodeInstanceContext());
-        DataSourceUnitPersistService dataSourceService = 
metaDataPersistService.getDataSourceUnitService();
-        metaDataPersistService.getMetaDataVersionPersistService()
-                .switchActiveVersion(dataSourceService.persist(databaseName, 
toBeUpdatedProps));
-        afterStorageUnitsAltered(databaseName, originalMetaDataContexts, 
reloadMetaDataContexts);
-        reloadMetaDataContexts.close();
+        Map<StorageNode, DataSource> newDataSources = new 
HashMap<>(toBeUpdatedProps.size());
+        try {
+            SwitchingResource switchingResource = 
metaDataContextManager.getResourceSwitchManager()
+                    
.switchByAlterStorageUnit(originalMetaDataContexts.getMetaData().getDatabase(databaseName).getResourceMetaData(),
 toBeUpdatedProps);
+            newDataSources.putAll(switchingResource.getNewDataSources());
+            MetaDataContexts reloadMetaDataContexts = 
MetaDataContextsFactory.createBySwitchResource(databaseName, false,
+                    switchingResource, originalMetaDataContexts, 
metaDataPersistService, metaDataContextManager.getComputeNodeInstanceContext());
+            DataSourceUnitPersistService dataSourceService = 
metaDataPersistService.getDataSourceUnitService();
+            metaDataPersistService.getMetaDataVersionPersistService()
+                    
.switchActiveVersion(dataSourceService.persist(databaseName, toBeUpdatedProps));
+            afterStorageUnitsAltered(databaseName, originalMetaDataContexts, 
reloadMetaDataContexts);
+            reloadMetaDataContexts.close();
+        } finally {
+            closeNewDataSources(newDataSources);
+        }
     }
     
     @Override
@@ -154,6 +170,14 @@ public final class ClusterMetaDataManagerPersistService 
implements MetaDataManag
         }
     }
     
+    private void closeNewDataSources(final Map<StorageNode, DataSource> 
newDataSources) {
+        for (Map.Entry<StorageNode, DataSource> entry : 
newDataSources.entrySet()) {
+            if (null != entry.getValue()) {
+                new DataSourcePoolDestroyer(entry.getValue()).asyncDestroy();
+            }
+        }
+    }
+    
     private Collection<String> getToBeDroppedResourceNames(final String 
databaseName, final Collection<String> toBeDroppedResourceNames) {
         Map<String, DataSourcePoolProperties> propsMap = 
metaDataPersistService.getDataSourceUnitService().load(databaseName);
         return 
toBeDroppedResourceNames.stream().filter(propsMap::containsKey).collect(Collectors.toList());

Reply via email to