This is an automated email from the ASF dual-hosted git repository.

dahn pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/cloudstack.git

commit e61f3bae4daf752b3d4cf23d1c4685e074d07c4d
Merge: 54926c324ff 3de1f8b4baf
Author: Daan Hoogland <d...@onecht.net>
AuthorDate: Mon Apr 29 11:37:40 2024 +0200

    Merge branch '4.19'

 .github/workflows/ci.yml                           |   1 +
 .../service/VolumeOrchestrationService.java        |   2 +-
 .../engine/orchestration/VolumeOrchestrator.java   |  11 +-
 .../upgrade/SystemVmTemplateRegistration.java      |  11 +-
 .../storage/volume/VolumeServiceImpl.java          |   2 +-
 .../hypervisor/kvm/resource/LibvirtVMDef.java      |   4 +
 .../hypervisor/kvm/resource/MigrateKVMAsync.java   |  45 +++-
 .../wrapper/LibvirtMigrateCommandWrapper.java      |  29 ++-
 .../kvm/resource/MigrateKVMAsyncTest.java          |  83 ++++++++
 .../wrapper/LibvirtMigrateCommandWrapperTest.java  |  78 ++++++-
 .../kvm/storage/LinstorStorageAdaptor.java         | 114 ++++++----
 .../java/com/cloud/api/query/QueryManagerImpl.java |   2 +-
 .../configuration/ConfigurationManagerImpl.java    |   2 +-
 .../main/java/com/cloud/vm/UserVmManagerImpl.java  |  10 +-
 test/integration/smoke/test_restore_vm.py          | 108 ++++++++++
 ui/src/views/compute/ReinstallVm.vue               |  18 +-
 .../main/java/com/cloud/utils/net/MacAddress.java  | 231 +++------------------
 .../java/com/cloud/utils/net/MacAddressTest.java   |  14 +-
 18 files changed, 487 insertions(+), 278 deletions(-)

diff --cc 
plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java
index c24214dd68c,aebbaa4119d..b97cb666de0
--- 
a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java
+++ 
b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateCommandWrapper.java
@@@ -188,15 -191,17 +189,17 @@@ public final class LibvirtMigrateComman
              // migrateStorage's value should always only be associated with 
the initial state of mapMigrateStorage.
              final boolean migrateStorage = 
MapUtils.isNotEmpty(mapMigrateStorage);
              final boolean migrateStorageManaged = 
command.isMigrateStorageManaged();
+             Set<String> migrateDiskLabels = null;
  
              if (migrateStorage) {
 -                if (s_logger.isDebugEnabled()) {
 -                    s_logger.debug(String.format("Changing VM [%s] volumes 
during migration to host: [%s].", vmName, target));
 +                if (logger.isDebugEnabled()) {
 +                    logger.debug(String.format("Changing VM [%s] volumes 
during migration to host: [%s].", vmName, target));
                  }
                  xmlDesc = replaceStorage(xmlDesc, mapMigrateStorage, 
migrateStorageManaged);
 -                if (s_logger.isDebugEnabled()) {
 -                    s_logger.debug(String.format("Changed VM [%s] XML 
configuration of used storage. New XML configuration is [%s].", vmName, 
xmlDesc));
 +                if (logger.isDebugEnabled()) {
 +                    logger.debug(String.format("Changed VM [%s] XML 
configuration of used storage. New XML configuration is [%s].", vmName, 
xmlDesc));
                  }
+                 migrateDiskLabels = getMigrateStorageDeviceLabels(disks, 
mapMigrateStorage);
              }
  
              Map<String, DpdkTO> dpdkPortsMapping = 
command.getDpdkInterfaceMapping();
@@@ -364,6 -368,30 +367,30 @@@
          return new MigrateAnswer(command, result == null, result, null);
      }
  
+     /**
+      * Gets the disk labels (vda, vdb...) of the disks mapped for migration 
on mapMigrateStorage.
+      * @param diskDefinitions list of all the disksDefinitions of the VM.
+      * @param mapMigrateStorage map of the disks that should be migrated.
+      * @return set with the labels of the disks that should be migrated.
+      * */
+     protected Set<String> getMigrateStorageDeviceLabels(List<DiskDef> 
diskDefinitions, Map<String, MigrateCommand.MigrateDiskInfo> mapMigrateStorage) 
{
+         HashSet<String> setOfLabels = new HashSet<>();
 -        s_logger.debug(String.format("Searching for disk labels of disks 
[%s].", mapMigrateStorage.keySet()));
++        logger.debug("Searching for disk labels of disks [{}].", 
mapMigrateStorage.keySet());
+         for (String fileName : mapMigrateStorage.keySet()) {
+             for (DiskDef diskDef : diskDefinitions) {
+                 String diskPath = diskDef.getDiskPath();
+                 if (diskPath != null && diskPath.contains(fileName)) {
+                     setOfLabels.add(diskDef.getDiskLabel());
 -                    s_logger.debug(String.format("Found label [%s] for disk 
[%s].", diskDef.getDiskLabel(), fileName));
++                    logger.debug("Found label [{}] for disk [{}].", 
diskDef.getDiskLabel(), fileName);
+                     break;
+                 }
+             }
+         }
+ 
+         return setOfLabels;
+     }
+ 
+ 
      /**
       * Checks if the CPU shares are equal in the source host and destination 
host.
       *  <ul>
diff --cc 
plugins/storage/volume/linstor/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStorageAdaptor.java
index c5537eb9dd8,3a327b158d2..0ab51dbf112
--- 
a/plugins/storage/volume/linstor/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStorageAdaptor.java
+++ 
b/plugins/storage/volume/linstor/src/main/java/com/cloud/hypervisor/kvm/storage/LinstorStorageAdaptor.java
@@@ -296,16 -296,83 +301,83 @@@ public class LinstorStorageAdaptor impl
          return true;
      }
  
+     private boolean tryDisconnectLinstor(String volumePath, KVMStoragePool 
pool)
+     {
+         if (volumePath == null) {
+             return false;
+         }
+ 
 -        s_logger.debug("Linstor: Using storage pool: " + pool.getUuid());
++        logger.debug("Linstor: Using storage pool: " + pool.getUuid());
+         final DevelopersApi api = getLinstorAPI(pool);
+ 
+         Optional<ResourceWithVolumes> optRsc;
+         try
+         {
+             List<ResourceWithVolumes> resources = api.viewResources(
+                     Collections.singletonList(localNodeName),
+                     null,
+                     null,
+                     null,
+                     null,
+                     null);
+ 
+             optRsc = getResourceByPath(resources, volumePath);
+         } catch (ApiException apiEx) {
+             // couldn't query linstor controller
 -            s_logger.error(apiEx.getBestMessage());
++            logger.error(apiEx.getBestMessage());
+             return false;
+         }
+ 
+ 
+         if (optRsc.isPresent()) {
+             try {
+                 Resource rsc = optRsc.get();
+ 
+                 // if diskless resource remove it, in the worst case it will 
be transformed to a tiebreaker
+                 if (rsc.getFlags() != null &&
+                         rsc.getFlags().contains(ApiConsts.FLAG_DRBD_DISKLESS) 
&&
+                         !rsc.getFlags().contains(ApiConsts.FLAG_TIE_BREAKER)) 
{
+                     ApiCallRcList delAnswers = 
api.resourceDelete(rsc.getName(), localNodeName);
+                     logLinstorAnswers(delAnswers);
+                 }
+ 
+                 // remove allow-two-primaries
+                 ResourceDefinitionModify rdm = new ResourceDefinitionModify();
+                 
rdm.deleteProps(Collections.singletonList("DrbdOptions/Net/allow-two-primaries"));
+                 ApiCallRcList answers = 
api.resourceDefinitionModify(rsc.getName(), rdm);
+                 if (answers.hasError()) {
 -                    s_logger.error(
++                    logger.error(
+                             String.format("Failed to remove 
'allow-two-primaries' on %s: %s",
+                                     rsc.getName(), 
LinstorUtil.getBestErrorMessage(answers)));
+                     // do not fail here as removing allow-two-primaries 
property isn't fatal
+                 }
+             } catch (ApiException apiEx) {
 -                s_logger.error(apiEx.getBestMessage());
++                logger.error(apiEx.getBestMessage());
+                 // do not fail here as removing allow-two-primaries property 
or deleting diskless isn't fatal
+             }
+ 
+             return true;
+         }
+ 
 -        s_logger.warn("Linstor: Couldn't find resource for this path: " + 
volumePath);
++        logger.warn("Linstor: Couldn't find resource for this path: " + 
volumePath);
+         return false;
+     }
+ 
      @Override
      public boolean disconnectPhysicalDisk(String volumePath, KVMStoragePool 
pool)
      {
 -        s_logger.debug("Linstor: disconnectPhysicalDisk " + pool.getUuid() + 
":" + volumePath);
 +        logger.debug("Linstor: disconnectPhysicalDisk {}:{}", pool.getUuid(), 
volumePath);
+         if (MapStorageUuidToStoragePool.containsValue(pool)) {
+             return tryDisconnectLinstor(volumePath, pool);
+         }
          return false;
      }
  
      @Override
      public boolean disconnectPhysicalDisk(Map<String, String> 
volumeToDisconnect)
      {
+         // as of now this is only relevant for iscsi targets
 -        s_logger.info("Linstor: disconnectPhysicalDisk(Map<String, String> 
volumeToDisconnect) called?");
++        logger.info("Linstor: disconnectPhysicalDisk(Map<String, String> 
volumeToDisconnect) called?");
          return false;
      }
  
@@@ -330,49 -397,11 +402,11 @@@
          Optional<KVMStoragePool> optFirstPool = 
MapStorageUuidToStoragePool.values().stream().findFirst();
          if (optFirstPool.isPresent())
          {
 -            s_logger.debug("Linstor: disconnectPhysicalDiskByPath " + 
localPath);
 +            logger.debug("Linstor: disconnectPhysicalDiskByPath " + 
localPath);
              final KVMStoragePool pool = optFirstPool.get();
  
-             logger.debug("Linstor: Using storpool: " + pool.getUuid());
-             final DevelopersApi api = getLinstorAPI(pool);
- 
-             Optional<ResourceWithVolumes> optRsc;
-             try {
-                 List<ResourceWithVolumes> resources = api.viewResources(
-                         Collections.singletonList(localNodeName),
-                         null,
-                         null,
-                         null,
-                         null,
-                         null);
- 
-                 optRsc = getResourceByPath(resources, localPath);
-             } catch (ApiException apiEx) {
-                 // couldn't query linstor controller
-                 logger.error(apiEx.getBestMessage());
-                 return false;
-             }
- 
-             if (optRsc.isPresent()) {
-                 try {
-                     Resource rsc = optRsc.get();
-                     ResourceDefinitionModify rdm = new 
ResourceDefinitionModify();
-                     
rdm.deleteProps(Collections.singletonList("DrbdOptions/Net/allow-two-primaries"));
-                     ApiCallRcList answers = 
api.resourceDefinitionModify(rsc.getName(), rdm);
-                     if (answers.hasError()) {
-                         logger.error(
-                                 String.format("Failed to remove 
'allow-two-primaries' on %s: %s",
-                                         rsc.getName(), 
LinstorUtil.getBestErrorMessage(answers)));
-                         // do not fail here as removing allow-two-primaries 
property isn't fatal
-                     }
-                 } catch(ApiException apiEx){
-                     logger.error(apiEx.getBestMessage());
-                     // do not fail here as removing allow-two-primaries 
property isn't fatal
-                     return true;
-                 }
-             }
+             return tryDisconnectLinstor(localPath, pool);
          }
-         logger.info("Linstor: Couldn't find resource for this path: {}", 
localPath);
          return false;
      }
  
diff --cc server/src/main/java/com/cloud/vm/UserVmManagerImpl.java
index a1e0e770ee6,e574d9887c3..a9d9ad09095
--- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java
+++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java
@@@ -7979,9 -7843,10 +7979,9 @@@ public class UserVmManagerImpl extends 
              }
          }
  
 -        DiskOffering diskOffering = rootDiskOfferingId != null ? 
_diskOfferingDao.findById(rootDiskOfferingId) : null;
          for (VolumeVO root : rootVols) {
              if ( !Volume.State.Allocated.equals(root.getState()) || 
newTemplateId != null ) {
-                 _volumeService.validateDestroyVolume(root, caller, expunge, 
false);
+                 _volumeService.validateDestroyVolume(root, caller, 
Volume.State.Allocated.equals(root.getState()) || expunge, false);
                  final UserVmVO userVm = vm;
                  Pair<UserVmVO, Volume> vmAndNewVol = Transaction.execute(new 
TransactionCallbackWithException<Pair<UserVmVO, Volume>, 
CloudRuntimeException>() {
                      @Override
@@@ -8044,13 -7909,8 +8044,13 @@@
  
                  // Detach, destroy and create the usage event for the old 
root volume.
                  _volsDao.detachVolume(root.getId());
-                 _volumeService.destroyVolume(root.getId(), caller, expunge, 
false);
+                 _volumeService.destroyVolume(root.getId(), caller, 
Volume.State.Allocated.equals(root.getState()) || expunge, false);
  
 +                if (currentTemplate.getId() != template.getId() && 
VirtualMachine.Type.User.equals(vm.type) && 
!VirtualMachineManager.ResourceCountRunningVMsonly.value()) {
 +                    ServiceOfferingVO serviceOffering = 
serviceOfferingDao.findById(vm.getId(), vm.getServiceOfferingId());
 +                    
_resourceLimitMgr.updateVmResourceCountForTemplateChange(vm.getAccountId(), 
vm.isDisplay(), serviceOffering, currentTemplate, template);
 +                }
 +
                  // For VMware hypervisor since the old root volume is 
replaced by the new root volume, force expunge old root volume if it has been 
created in storage
                  if (vm.getHypervisorType() == HypervisorType.VMware) {
                      VolumeInfo volumeInStorage = 
volFactory.getVolume(root.getId());
diff --cc utils/src/main/java/com/cloud/utils/net/MacAddress.java
index 5a72c48dd69,76f3f6c24ac..7555c03c2c0
--- a/utils/src/main/java/com/cloud/utils/net/MacAddress.java
+++ b/utils/src/main/java/com/cloud/utils/net/MacAddress.java
@@@ -19,26 -19,19 +19,24 @@@
  
  package com.cloud.utils.net;
  
- import static com.cloud.utils.AutoCloseableUtil.closeAutoCloseable;
- 
- import java.io.BufferedReader;
- import java.io.File;
- import java.io.IOException;
- import java.io.InputStreamReader;
  import java.net.InetAddress;
+ import java.net.NetworkInterface;
+ import java.net.SocketException;
  import java.net.UnknownHostException;
+ import java.util.Collections;
  import java.util.Formatter;
 +
 +import org.apache.logging.log4j.Logger;
 +import org.apache.logging.log4j.LogManager;
+ import java.util.List;
  
  /**
   * This class retrieves the (first) MAC address for the machine is it is 
loaded on and stores it statically for retrieval.
   * It can also be used for formatting MAC addresses.
-  * copied fnd addpeted rom the public domain utility from John Burkard.
   **/
  public class MacAddress {
 +    protected static Logger LOGGER = LogManager.getLogger(MacAddress.class);
++
      private long _addr = 0;
  
      protected MacAddress() {

Reply via email to