This is an automated email from the ASF dual-hosted git repository.

dahn pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/cloudstack.git

commit 287a47266b05098221d82565593e759a6d441425
Merge: cf1428ddcca 6e6a2766637
Author: Daan Hoogland <d...@onecht.net>
AuthorDate: Wed Aug 14 12:33:47 2024 +0200

    Merge branch '4.19'

 .../com/cloud/upgrade/DatabaseUpgradeChecker.java  | 160 +++++++++++++--------
 .../cloudstack/affinity/dao/AffinityGroupDao.java  |   3 +
 .../affinity/dao/AffinityGroupDaoImpl.java         |  12 ++
 .../cloud.add_guest_os_and_hypervisor_mapping.sql  |  49 +++++++
 .../db/procedures/cloud.idempotent_add_column.sql  |  27 ++++
 .../db/procedures/cloud.idempotent_add_key.sql     |  27 ++++
 .../cloud.idempotent_add_unique_index.sql          |  26 ++++
 .../procedures/cloud.idempotent_add_unique_key.sql |  26 ++++
 .../procedures/cloud.idempotent_change_column.sql  |  27 ++++
 .../cloud.idempotent_create_unique_index.sql       |  27 ++++
 .../cloud.idempotent_drop_foreign_key.sql          |  25 ++++
 .../db/procedures/cloud.idempotent_drop_index.sql  |  27 ++++
 ...dempotent_insert_guestos_hypervisor_mapping.sql |  48 +++++++
 .../db/procedures/usage.idempotent_add_column.sql  |  26 ++++
 .../procedures/usage.idempotent_change_column.sql  |  27 ++++
 .../db/procedures/usage.idempotent_drop_index.sql  |  25 ++++
 .../resources/META-INF/db/schema-41000to41100.sql  |  45 ------
 .../resources/META-INF/db/schema-41520to41600.sql  |  47 ------
 .../resources/META-INF/db/schema-41600to41610.sql  |  54 -------
 .../resources/META-INF/db/schema-41610to41700.sql  |  45 ------
 .../resources/META-INF/db/schema-41720to41800.sql  |  59 --------
 .../resources/META-INF/db/schema-4910to4920.sql    |  38 -----
 .../cloudstack/affinity/HostAffinityProcessor.java |  39 +++--
 .../affinity/HostAntiAffinityProcessor.java        |  99 +++++++++----
 24 files changed, 602 insertions(+), 386 deletions(-)

diff --cc 
engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java
index d390a480e41,b908455c1fe..cb219007325
--- a/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java
+++ b/engine/schema/src/main/java/com/cloud/upgrade/DatabaseUpgradeChecker.java
@@@ -127,9 -125,10 +127,10 @@@ import com.cloud.utils.exception.CloudR
  import com.google.common.annotations.VisibleForTesting;
  
  public class DatabaseUpgradeChecker implements SystemIntegrityChecker {
 -    private static final Logger s_logger = 
Logger.getLogger(DatabaseUpgradeChecker.class);
 +    protected static Logger LOGGER = 
LogManager.getLogger(DatabaseUpgradeChecker.class);
      private final DatabaseVersionHierarchy hierarchy;
      private static final String VIEWS_DIRECTORY = Paths.get("META-INF", "db", 
"views").toString();
+     private static final String PROCEDURES_DIRECTORY = Paths.get("META-INF", 
"db", "procedures").toString();
  
      @Inject
      VersionDao _dao;
@@@ -298,83 -296,120 +299,120 @@@
      }
  
      protected void upgrade(CloudStackVersion dbVersion, CloudStackVersion 
currentVersion) {
+         executeProcedureScripts();
+         final DbUpgrade[] upgrades = executeUpgrades(dbVersion, 
currentVersion);
+ 
+         executeViewScripts();
+         updateSystemVmTemplates(upgrades);
+     }
+ 
+     protected void executeProcedureScripts() {
 -        s_logger.info(String.format("Executing Stored Procedure scripts that 
are under resource directory [%s].", PROCEDURES_DIRECTORY));
++        LOGGER.info(String.format("Executing Stored Procedure scripts that 
are under resource directory [%s].", PROCEDURES_DIRECTORY));
+         List<String> filesPathUnderViewsDirectory = 
FileUtil.getFilesPathsUnderResourceDirectory(PROCEDURES_DIRECTORY);
+ 
+         try (TransactionLegacy txn = 
TransactionLegacy.open("execute-procedure-scripts")) {
+             Connection conn = txn.getConnection();
+ 
+             for (String filePath : filesPathUnderViewsDirectory) {
 -                s_logger.debug(String.format("Executing PROCEDURE script 
[%s].", filePath));
++                LOGGER.debug(String.format("Executing PROCEDURE script 
[%s].", filePath));
+ 
+                 InputStream viewScript = 
Thread.currentThread().getContextClassLoader().getResourceAsStream(filePath);
+                 runScript(conn, viewScript);
+             }
+ 
 -            s_logger.info(String.format("Finished execution of PROCEDURE 
scripts that are under resource directory [%s].", PROCEDURES_DIRECTORY));
++            LOGGER.info(String.format("Finished execution of PROCEDURE 
scripts that are under resource directory [%s].", PROCEDURES_DIRECTORY));
+         } catch (SQLException e) {
+             String message = String.format("Unable to execute PROCEDURE 
scripts due to [%s].", e.getMessage());
 -            s_logger.error(message, e);
++            LOGGER.error(message, e);
+             throw new CloudRuntimeException(message, e);
+         }
+     }
+ 
+     private DbUpgrade[] executeUpgrades(CloudStackVersion dbVersion, 
CloudStackVersion currentVersion) {
 -        s_logger.info("Database upgrade must be performed from " + dbVersion 
+ " to " + currentVersion);
 +        LOGGER.info("Database upgrade must be performed from " + dbVersion + 
" to " + currentVersion);
  
          final DbUpgrade[] upgrades = calculateUpgradePath(dbVersion, 
currentVersion);
  
          for (DbUpgrade upgrade : upgrades) {
-             VersionVO version;
-             LOGGER.debug("Running upgrade " + 
upgrade.getClass().getSimpleName() + " to upgrade from " + 
upgrade.getUpgradableVersionRange()[0] + "-" + upgrade
-                 .getUpgradableVersionRange()[1] + " to " + 
upgrade.getUpgradedVersion());
-             TransactionLegacy txn = TransactionLegacy.open("Upgrade");
-             txn.start();
-             try {
-                 Connection conn;
-                 try {
-                     conn = txn.getConnection();
-                 } catch (SQLException e) {
-                     String errorMessage = "Unable to upgrade the database";
-                     LOGGER.error(errorMessage, e);
-                     throw new CloudRuntimeException(errorMessage, e);
-                 }
-                 InputStream[] scripts = upgrade.getPrepareScripts();
-                 if (scripts != null) {
-                     for (InputStream script : scripts) {
-                         runScript(conn, script);
-                     }
-                 }
- 
-                 upgrade.performDataMigration(conn);
- 
-                 version = new VersionVO(upgrade.getUpgradedVersion());
-                 version = _dao.persist(version);
+             VersionVO version = executeUpgrade(upgrade);
+             executeUpgradeCleanup(upgrade, version);
+         }
+         return upgrades;
+     }
  
-                 txn.commit();
-             } catch (CloudRuntimeException e) {
+     private VersionVO executeUpgrade(DbUpgrade upgrade) {
+         VersionVO version;
 -        s_logger.debug("Running upgrade " + 
upgrade.getClass().getSimpleName() + " to upgrade from " + 
upgrade.getUpgradableVersionRange()[0] + "-" + upgrade
++        LOGGER.debug("Running upgrade " + upgrade.getClass().getSimpleName() 
+ " to upgrade from " + upgrade.getUpgradableVersionRange()[0] + "-" + upgrade
+             .getUpgradableVersionRange()[1] + " to " + 
upgrade.getUpgradedVersion());
+         TransactionLegacy txn = TransactionLegacy.open("Upgrade");
+         txn.start();
+         try {
+             Connection conn;
+             try {
+                 conn = txn.getConnection();
+             } catch (SQLException e) {
                  String errorMessage = "Unable to upgrade the database";
 -                s_logger.error(errorMessage, e);
 +                LOGGER.error(errorMessage, e);
                  throw new CloudRuntimeException(errorMessage, e);
-             } finally {
-                 txn.close();
+             }
+             InputStream[] scripts = upgrade.getPrepareScripts();
+             if (scripts != null) {
+                 for (InputStream script : scripts) {
+                     runScript(conn, script);
+                 }
              }
  
-             // Run the corresponding '-cleanup.sql' script
-             txn = TransactionLegacy.open("Cleanup");
-             try {
-                 LOGGER.info("Cleanup upgrade " + 
upgrade.getClass().getSimpleName() + " to upgrade from " + 
upgrade.getUpgradableVersionRange()[0] + "-" + upgrade
-                     .getUpgradableVersionRange()[1] + " to " + 
upgrade.getUpgradedVersion());
+             upgrade.performDataMigration(conn);
  
-                 txn.start();
-                 Connection conn;
-                 try {
-                     conn = txn.getConnection();
-                 } catch (SQLException e) {
-                     LOGGER.error("Unable to cleanup the database", e);
-                     throw new CloudRuntimeException("Unable to cleanup the 
database", e);
-                 }
+             version = new VersionVO(upgrade.getUpgradedVersion());
+             version = _dao.persist(version);
  
-                 InputStream[] scripts = upgrade.getCleanupScripts();
-                 if (scripts != null) {
-                     for (InputStream script : scripts) {
-                         runScript(conn, script);
-                         LOGGER.debug("Cleanup script " + 
upgrade.getClass().getSimpleName() + " is executed successfully");
-                     }
-                 }
-                 txn.commit();
+             txn.commit();
+         } catch (CloudRuntimeException e) {
+             String errorMessage = "Unable to upgrade the database";
 -            s_logger.error(errorMessage, e);
++            LOGGER.error(errorMessage, e);
+             throw new CloudRuntimeException(errorMessage, e);
+         } finally {
+             txn.close();
+         }
+         return version;
+     }
  
-                 txn.start();
-                 version.setStep(Step.Complete);
-                 version.setUpdated(new Date());
-                 _dao.update(version.getId(), version);
-                 txn.commit();
-                 LOGGER.debug("Upgrade completed for version " + 
version.getVersion());
-             } finally {
-                 txn.close();
+     private void executeUpgradeCleanup(DbUpgrade upgrade, VersionVO version) {
+         TransactionLegacy txn;
+         // Run the corresponding '-cleanup.sql' script
+         txn = TransactionLegacy.open("Cleanup");
+         try {
 -            s_logger.info("Cleanup upgrade " + 
upgrade.getClass().getSimpleName() + " to upgrade from " + 
upgrade.getUpgradableVersionRange()[0] + "-" + upgrade
++            LOGGER.info("Cleanup upgrade " + 
upgrade.getClass().getSimpleName() + " to upgrade from " + 
upgrade.getUpgradableVersionRange()[0] + "-" + upgrade
+                 .getUpgradableVersionRange()[1] + " to " + 
upgrade.getUpgradedVersion());
+ 
+             txn.start();
+             Connection conn;
+             try {
+                 conn = txn.getConnection();
+             } catch (SQLException e) {
 -                s_logger.error("Unable to cleanup the database", e);
++                LOGGER.error("Unable to cleanup the database", e);
+                 throw new CloudRuntimeException("Unable to cleanup the 
database", e);
              }
-         }
  
-         executeViewScripts();
-         updateSystemVmTemplates(upgrades);
+             InputStream[] scripts = upgrade.getCleanupScripts();
+             if (scripts != null) {
+                 for (InputStream script : scripts) {
+                     runScript(conn, script);
 -                    s_logger.debug("Cleanup script " + 
upgrade.getClass().getSimpleName() + " is executed successfully");
++                    LOGGER.debug("Cleanup script " + 
upgrade.getClass().getSimpleName() + " is executed successfully");
+                 }
+             }
+             txn.commit();
+ 
+             txn.start();
+             version.setStep(Step.Complete);
+             version.setUpdated(new Date());
+             _dao.update(version.getId(), version);
+             txn.commit();
 -            s_logger.debug("Upgrade completed for version " + 
version.getVersion());
++            LOGGER.debug("Upgrade completed for version " + 
version.getVersion());
+         } finally {
+             txn.close();
+         }
      }
  
      protected void executeViewScripts() {
diff --cc 
plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java
index 7f316fe7a91,072eff09215..4a5bbe8e787
--- 
a/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java
+++ 
b/plugins/affinity-group-processors/host-affinity/src/main/java/org/apache/cloudstack/affinity/HostAffinityProcessor.java
@@@ -26,7 -27,12 +27,11 @@@ import java.util.stream.Collectors
  
  import javax.inject.Inject;
  
+ import com.cloud.utils.db.Transaction;
+ import com.cloud.utils.db.TransactionCallback;
+ import com.cloud.utils.db.TransactionCallbackNoReturn;
+ import com.cloud.utils.db.TransactionStatus;
  import org.apache.commons.collections.CollectionUtils;
 -import org.apache.log4j.Logger;
  
  import org.apache.cloudstack.affinity.dao.AffinityGroupDao;
  import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
diff --cc 
plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java
index 9feeeed2b6d,970632906c6..cd7708b8648
--- 
a/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java
+++ 
b/plugins/affinity-group-processors/host-anti-affinity/src/main/java/org/apache/cloudstack/affinity/HostAntiAffinityProcessor.java
@@@ -23,6 -24,13 +24,7 @@@ import java.util.stream.Collectors
  import javax.inject.Inject;
  import javax.naming.ConfigurationException;
  
 -import com.cloud.utils.DateUtil;
 -import com.cloud.utils.db.Transaction;
 -import com.cloud.utils.db.TransactionCallback;
 -import com.cloud.utils.db.TransactionCallbackNoReturn;
 -import com.cloud.utils.db.TransactionStatus;
+ import org.apache.commons.collections.CollectionUtils;
 -import org.apache.log4j.Logger;
  
  import org.apache.cloudstack.affinity.dao.AffinityGroupDao;
  import org.apache.cloudstack.affinity.dao.AffinityGroupVMMapDao;
@@@ -35,8 -43,7 +37,12 @@@ import com.cloud.deploy.DeployDestinati
  import com.cloud.deploy.DeploymentPlan;
  import com.cloud.deploy.DeploymentPlanner.ExcludeList;
  import com.cloud.exception.AffinityConflictException;
 +import com.cloud.utils.DateUtil;
  import com.cloud.utils.NumbersUtil;
++import com.cloud.utils.db.Transaction;
++import com.cloud.utils.db.TransactionCallback;
++import com.cloud.utils.db.TransactionCallbackNoReturn;
++import com.cloud.utils.db.TransactionStatus;
  import com.cloud.vm.VMInstanceVO;
  import com.cloud.vm.VirtualMachine;
  import com.cloud.vm.VirtualMachineProfile;
@@@ -65,34 -73,49 +71,49 @@@ public class HostAntiAffinityProcessor 
          VirtualMachine vm = vmProfile.getVirtualMachine();
          List<AffinityGroupVMMapVO> vmGroupMappings = 
_affinityGroupVMMapDao.findByVmIdType(vm.getId(), getType());
  
-         for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) {
-             if (vmGroupMapping != null) {
-                 AffinityGroupVO group = 
_affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId());
- 
-                 if (logger.isDebugEnabled()) {
-                     logger.debug("Processing affinity group " + 
group.getName() + " for VM Id: " + vm.getId());
+         if (CollectionUtils.isEmpty(vmGroupMappings)) {
+             return;
+         }
+         List<Long> affinityGroupIds = 
vmGroupMappings.stream().map(AffinityGroupVMMapVO::getAffinityGroupId).collect(Collectors.toList());
+         Transaction.execute(new TransactionCallbackNoReturn() {
+             @Override
+             public void doInTransactionWithoutResult(TransactionStatus 
status) {
+                 _affinityGroupDao.listByIds(affinityGroupIds, true);
+                 for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) {
+                     processAffinityGroup(vmGroupMapping, avoid, vm);
                  }
+             }
+         });
  
-                 List<Long> groupVMIds = 
_affinityGroupVMMapDao.listVmIdsByAffinityGroup(group.getId());
-                 groupVMIds.remove(vm.getId());
+     }
  
-                 for (Long groupVMId : groupVMIds) {
-                     VMInstanceVO groupVM = _vmInstanceDao.findById(groupVMId);
-                     if (groupVM != null && !groupVM.isRemoved()) {
-                         if (groupVM.getHostId() != null) {
-                             avoid.addHost(groupVM.getHostId());
-                             if (logger.isDebugEnabled()) {
-                                 logger.debug("Added host " + 
groupVM.getHostId() + " to avoid set, since VM " + groupVM.getId() + " is 
present on the host");
-                             }
-                         } else if 
(Arrays.asList(VirtualMachine.State.Starting, 
VirtualMachine.State.Stopped).contains(groupVM.getState()) && 
groupVM.getLastHostId() != null) {
-                             long secondsSinceLastUpdate = 
(DateUtil.currentGMTTime().getTime() - groupVM.getUpdateTime().getTime()) / 
1000;
-                             if (secondsSinceLastUpdate < 
_vmCapacityReleaseInterval) {
-                                 avoid.addHost(groupVM.getLastHostId());
-                                 if (logger.isDebugEnabled()) {
-                                     logger.debug("Added host " + 
groupVM.getLastHostId() + " to avoid set, since VM " + groupVM.getId() +
-                                         " is present on the host, in Stopped 
state but has reserved capacity");
-                                 }
-                             }
+     protected void processAffinityGroup(AffinityGroupVMMapVO vmGroupMapping, 
ExcludeList avoid, VirtualMachine vm) {
+         if (vmGroupMapping != null) {
+             AffinityGroupVO group = 
_affinityGroupDao.findById(vmGroupMapping.getAffinityGroupId());
+ 
 -            if (s_logger.isDebugEnabled()) {
 -                s_logger.debug("Processing affinity group " + group.getName() 
+ " for VM Id: " + vm.getId());
++            if (logger.isDebugEnabled()) {
++                logger.debug("Processing affinity group " + group.getName() + 
" for VM Id: " + vm.getId());
+             }
+ 
+             List<Long> groupVMIds = 
_affinityGroupVMMapDao.listVmIdsByAffinityGroup(group.getId());
+             groupVMIds.remove(vm.getId());
+ 
+             for (Long groupVMId : groupVMIds) {
+                 VMInstanceVO groupVM = _vmInstanceDao.findById(groupVMId);
+                 if (groupVM != null && !groupVM.isRemoved()) {
+                     if (groupVM.getHostId() != null) {
+                         avoid.addHost(groupVM.getHostId());
 -                        if (s_logger.isDebugEnabled()) {
 -                            s_logger.debug("Added host " + 
groupVM.getHostId() + " to avoid set, since VM " + groupVM.getId() + " is 
present on the host");
++                        if (logger.isDebugEnabled()) {
++                            logger.debug("Added host " + groupVM.getHostId() 
+ " to avoid set, since VM " + groupVM.getId() + " is present on the host");
+                         }
+                     }
+                 } else if (Arrays.asList(VirtualMachine.State.Starting, 
VirtualMachine.State.Stopped).contains(groupVM.getState()) && 
groupVM.getLastHostId() != null) {
+                     long secondsSinceLastUpdate = 
(DateUtil.currentGMTTime().getTime() - groupVM.getUpdateTime().getTime()) / 
1000;
+                     if (secondsSinceLastUpdate < _vmCapacityReleaseInterval) {
+                         avoid.addHost(groupVM.getLastHostId());
 -                        if (s_logger.isDebugEnabled()) {
 -                            s_logger.debug("Added host " + 
groupVM.getLastHostId() + " to avoid set, since VM " + groupVM.getId() +
++                        if (logger.isDebugEnabled()) {
++                            logger.debug("Added host " + 
groupVM.getLastHostId() + " to avoid set, since VM " + groupVM.getId() +
+                                     " is present on the host, in Stopped 
state but has reserved capacity");
                          }
                      }
                  }
@@@ -119,25 -141,35 +139,50 @@@
          VirtualMachine vm = vmProfile.getVirtualMachine();
  
          List<AffinityGroupVMMapVO> vmGroupMappings = 
_affinityGroupVMMapDao.findByVmIdType(vm.getId(), getType());
 -        if (CollectionUtils.isEmpty(vmGroupMappings)) {
 -            return true;
 +
 +        for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) {
 +            // if more than 1 VM's are present in the group then check for
 +            // conflict due to parallel deployment
 +            List<Long> groupVMIds = 
_affinityGroupVMMapDao.listVmIdsByAffinityGroup(vmGroupMapping.getAffinityGroupId());
 +            groupVMIds.remove(vm.getId());
 +
 +            for (Long groupVMId : groupVMIds) {
 +                VMReservationVO vmReservation = 
_reservationDao.findByVmId(groupVMId);
 +                if (vmReservation != null && vmReservation.getHostId() != 
null && vmReservation.getHostId().equals(plannedHostId)) {
 +                    if (logger.isDebugEnabled()) {
 +                        logger.debug("Planned destination for VM " + 
vm.getId() + " conflicts with an existing VM " + vmReservation.getVmId() +
 +                            " reserved on the same host " + plannedHostId);
 +                    }
 +                    return false;
 +                }
 +            }
          }
-         return true;
+ 
+         List<Long> affinityGroupIds = 
vmGroupMappings.stream().map(AffinityGroupVMMapVO::getAffinityGroupId).collect(Collectors.toList());
+         return Transaction.execute(new TransactionCallback<Boolean>() {
+             @Override
+             public Boolean doInTransaction(TransactionStatus status) {
+                 _affinityGroupDao.listByIds(affinityGroupIds, true);
+                 for (AffinityGroupVMMapVO vmGroupMapping : vmGroupMappings) {
+                     // if more than 1 VM's are present in the group then 
check for
+                     // conflict due to parallel deployment
+                     List<Long> groupVMIds = 
_affinityGroupVMMapDao.listVmIdsByAffinityGroup(vmGroupMapping.getAffinityGroupId());
+                     groupVMIds.remove(vm.getId());
+ 
+                     for (Long groupVMId : groupVMIds) {
+                         VMReservationVO vmReservation = 
_reservationDao.findByVmId(groupVMId);
+                         if (vmReservation != null && 
vmReservation.getHostId() != null && 
vmReservation.getHostId().equals(plannedHostId)) {
 -                            if (s_logger.isDebugEnabled()) {
 -                                s_logger.debug("Planned destination for VM " 
+ vm.getId() + " conflicts with an existing VM " + vmReservation.getVmId() +
++                            if (logger.isDebugEnabled()) {
++                                logger.debug("Planned destination for VM " + 
vm.getId() + " conflicts with an existing VM " + vmReservation.getVmId() +
+                                         " reserved on the same host " + 
plannedHostId);
+                             }
+                             return false;
+                         }
+                     }
+                 }
+                 return true;
+             }
+         });
      }
  
  }

Reply via email to