This is an automated email from the ASF dual-hosted git repository.

harikrishna pushed a commit to branch LiveStoragMigrationScaleIOMain
in repository https://gitbox.apache.org/repos/asf/cloudstack.git

commit 46f5df0db0c289390ae1df55d479af5e32e3b38b
Author: Harikrishna Patnala <harikrishna.patn...@gmail.com>
AuthorDate: Thu Apr 6 13:36:24 2023 +0530

    Formatting code
---
 .../LibvirtMigrateVolumeCommandWrapper.java        | 125 +++++++++++++++------
 .../com/cloud/storage/VolumeApiServiceImpl.java    |  54 ++++++---
 2 files changed, 127 insertions(+), 52 deletions(-)

diff --git 
a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java
 
b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java
index a083061d227..a31cb765b59 100644
--- 
a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java
+++ 
b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/wrapper/LibvirtMigrateVolumeCommandWrapper.java
@@ -51,6 +51,9 @@ import org.libvirt.DomainInfo;
 import org.libvirt.TypedParameter;
 import org.libvirt.TypedUlongParameter;
 import org.libvirt.LibvirtException;
+import org.libvirt.event.BlockJobListener;
+import org.libvirt.event.BlockJobStatus;
+import org.libvirt.event.BlockJobType;
 
 @ResourceWrapper(handles =  MigrateVolumeCommand.class)
 public final class LibvirtMigrateVolumeCommandWrapper extends 
CommandWrapper<MigrateVolumeCommand, Answer, LibvirtComputingResource> {
@@ -116,47 +119,11 @@ public final class LibvirtMigrateVolumeCommandWrapper 
extends CommandWrapper<Mig
             TypedParameter[] parameters = new TypedParameter[1];
             parameters[0] = parameter;
 
-            Domain finalDm = dm;
-            final Boolean[] copyStatus = {true};
             dm.blockCopy(destDiskLabel, diskdef.toString(), parameters, 
Domain.BlockCopyFlags.REUSE_EXT);
             LOGGER.info(String.format("Block copy has started for the volume 
%s : %s ", diskdef.getDiskLabel(), srcPath));
 
-            int timeBetweenTries = 1000; // Try more frequently (every sec) 
and return early if disk is found
-            int waitTimeInSec = command.getWait();
-            while (waitTimeInSec > 0) {
-                DomainBlockJobInfo blockJobInfo = 
dm.getBlockJobInfo(destDiskLabel, 0);
-                if (blockJobInfo != null) {
-                    LOGGER.debug(String.format("Volume %s : %s block copy 
progress: %s%% ", destDiskLabel, srcPath, 100 * (blockJobInfo.cur / 
blockJobInfo.end)));
-                    if (blockJobInfo.cur == blockJobInfo.end) {
-                        LOGGER.debug(String.format("Block copy completed for 
the volume %s : %s", destDiskLabel, srcPath));
-                        dm.blockJobAbort(destDiskLabel, 
Domain.BlockJobAbortFlags.PIVOT);
-                        break;
-                    }
-                } else {
-                    LOGGER.debug("Failed to get the block copy status, trying 
to abort the job");
-                    dm.blockJobAbort(destDiskLabel, 
Domain.BlockJobAbortFlags.ASYNC);
-                }
-                waitTimeInSec--;
-
-                try {
-                    Thread.sleep(timeBetweenTries);
-                } catch (Exception ex) {
-                    // don't do anything
-                }
-            }
+            return checkBlockJobStatus(command, dm, destDiskLabel, srcPath, 
destPath);
 
-            if (waitTimeInSec <= 0) {
-                String msg = "Block copy is taking long time, failing the job";
-                LOGGER.error(msg);
-                try {
-                    dm.blockJobAbort(destDiskLabel, 
Domain.BlockJobAbortFlags.ASYNC);
-                } catch (LibvirtException ex) {
-                    LOGGER.error("Migrate volume failed while aborting the 
block job due to " + ex.getMessage());
-                }
-                return new MigrateVolumeAnswer(command, false, msg, null);
-            }
-
-            return new MigrateVolumeAnswer(command, true, null, destPath);
         } catch (Exception e) {
             String msg = "Migrate volume failed due to " + e.toString();
             LOGGER.warn(msg, e);
@@ -179,6 +146,90 @@ public final class LibvirtMigrateVolumeCommandWrapper 
extends CommandWrapper<Mig
         }
     }
 
+    private MigrateVolumeAnswer checkBlockJobStatus(MigrateVolumeCommand 
command, Domain dm, String diskLabel, String srcPath, String destPath) throws 
LibvirtException {
+        int timeBetweenTries = 1000; // Try more frequently (every sec) and 
return early if disk is found
+        int waitTimeInSec = command.getWait();
+        while (waitTimeInSec > 0) {
+            DomainBlockJobInfo blockJobInfo = dm.getBlockJobInfo(diskLabel, 0);
+            if (blockJobInfo != null) {
+                LOGGER.debug(String.format("Volume %s : %s block copy 
progress: %s%% ", diskLabel, srcPath, 100 * (blockJobInfo.cur / 
blockJobInfo.end)));
+                if (blockJobInfo.cur == blockJobInfo.end) {
+                    LOGGER.debug(String.format("Block copy completed for the 
volume %s : %s", diskLabel, srcPath));
+                    dm.blockJobAbort(diskLabel, 
Domain.BlockJobAbortFlags.PIVOT);
+                    break;
+                }
+            } else {
+                LOGGER.debug("Failed to get the block copy status, trying to 
abort the job");
+                dm.blockJobAbort(diskLabel, Domain.BlockJobAbortFlags.ASYNC);
+            }
+            waitTimeInSec--;
+
+            try {
+                Thread.sleep(timeBetweenTries);
+            } catch (Exception ex) {
+                // don't do anything
+            }
+        }
+
+        if (waitTimeInSec <= 0) {
+            String msg = "Block copy is taking long time, failing the job";
+            LOGGER.error(msg);
+            try {
+                dm.blockJobAbort(diskLabel, Domain.BlockJobAbortFlags.ASYNC);
+            } catch (LibvirtException ex) {
+                LOGGER.error("Migrate volume failed while aborting the block 
job due to " + ex.getMessage());
+            }
+            return new MigrateVolumeAnswer(command, false, msg, null);
+        }
+
+        return new MigrateVolumeAnswer(command, true, null, destPath);
+    }
+
+    private MigrateVolumeAnswer 
checkBlockJobStatusUsingListener(MigrateVolumeCommand command, Domain dm, 
String diskLabel, String srcPath, String destPath) throws LibvirtException, 
InterruptedException {
+        final Boolean[] copyStatus = {false};
+
+        BlockJobListener listener = new BlockJobListener() {
+            @Override
+            public void onEvent(Domain domain, String diskPath, BlockJobType 
type, BlockJobStatus status) {
+                LOGGER.info("waiting for the event");
+                if (type == BlockJobType.COPY && status == 
BlockJobStatus.READY) {
+                    try {
+                        domain.blockJobAbort(diskPath, 
Domain.BlockJobAbortFlags.PIVOT);
+                        copyStatus[0] = true;
+                    } catch (LibvirtException e) {
+                        throw new RuntimeException(e);
+                    }
+                }
+            }
+        };
+        dm.addBlockJobListener(listener);
+
+        int timeBetweenTries = 1000; // Try more frequently (every sec) and 
return early if disk is found
+        int waitTimeInSec = command.getWait();
+        while (waitTimeInSec > 0 && !copyStatus[0]) {
+            waitTimeInSec--;
+            try {
+                Thread.sleep(timeBetweenTries);
+            } catch (Exception ex) {
+                // don't do anything
+            }
+            LOGGER.info("Waiting for the block copy to complete");
+        }
+
+        if (!copyStatus[0]) {
+            String msg = "Block copy is taking long time, failing the job";
+            LOGGER.error(msg);
+            try {
+                dm.blockJobAbort(diskLabel, Domain.BlockJobAbortFlags.ASYNC);
+            } catch (LibvirtException ex) {
+                LOGGER.error("Migrate volume failed while aborting the block 
job due to " + ex.getMessage());
+            }
+            return new MigrateVolumeAnswer(command, false, msg, null);
+        }
+
+        return new MigrateVolumeAnswer(command, true, null, destPath);
+    }
+
     private LibvirtVMDef.DiskDef generateDestinationDiskDefinition(Domain dm, 
String srcVolumeId, String srcPath, String diskFilePath) throws 
InternalErrorException, LibvirtException {
         final LibvirtDomainXMLParser parser = new LibvirtDomainXMLParser();
         final String domXml = dm.getXMLDesc(0);
diff --git a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java 
b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
index 87af524d0f7..2711f6ecd8d 100644
--- a/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
+++ b/server/src/main/java/com/cloud/storage/VolumeApiServiceImpl.java
@@ -88,12 +88,7 @@ import org.apache.cloudstack.storage.command.AttachAnswer;
 import org.apache.cloudstack.storage.command.AttachCommand;
 import org.apache.cloudstack.storage.command.DettachCommand;
 import org.apache.cloudstack.storage.command.TemplateOrVolumePostUploadCommand;
-import org.apache.cloudstack.storage.datastore.db.PrimaryDataStoreDao;
-import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreDao;
-import org.apache.cloudstack.storage.datastore.db.SnapshotDataStoreVO;
-import org.apache.cloudstack.storage.datastore.db.StoragePoolVO;
-import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreDao;
-import org.apache.cloudstack.storage.datastore.db.VolumeDataStoreVO;
+import org.apache.cloudstack.storage.datastore.db.*;
 import org.apache.cloudstack.storage.image.datastore.ImageStoreEntity;
 import org.apache.cloudstack.utils.bytescale.ByteScaleUtils;
 import org.apache.cloudstack.utils.identity.ManagementServerNode;
@@ -157,12 +152,7 @@ import com.cloud.service.ServiceOfferingVO;
 import com.cloud.service.dao.ServiceOfferingDao;
 import com.cloud.service.dao.ServiceOfferingDetailsDao;
 import com.cloud.storage.Storage.ImageFormat;
-import com.cloud.storage.dao.DiskOfferingDao;
-import com.cloud.storage.dao.SnapshotDao;
-import com.cloud.storage.dao.StoragePoolTagsDao;
-import com.cloud.storage.dao.VMTemplateDao;
-import com.cloud.storage.dao.VolumeDao;
-import com.cloud.storage.dao.VolumeDetailsDao;
+import com.cloud.storage.dao.*;
 import com.cloud.storage.snapshot.SnapshotApiService;
 import com.cloud.storage.snapshot.SnapshotManager;
 import com.cloud.template.TemplateManager;
@@ -326,6 +316,8 @@ public class VolumeApiServiceImpl extends ManagerBase 
implements VolumeApiServic
 
     @Inject
     protected ProjectManager projectManager;
+    @Inject
+    protected StoragePoolDetailsDao storagePoolDetailsDao;
 
     protected Gson _gson;
 
@@ -1098,8 +1090,8 @@ public class VolumeApiServiceImpl extends ManagerBase 
implements VolumeApiServic
                 if (isNotPossibleToResize(volume, diskOffering)) {
                     throw new InvalidParameterValueException(
                             "Failed to resize Root volume. The service 
offering of this Volume has been configured with a root disk size; "
-                                    + "on such case a Root Volume can only be 
resized when changing to another Service Offering with a Root disk size. "
-                                    + "For more details please check out the 
Official Resizing Volumes documentation.");
+                            "on such case a Root Volume can only be resized 
when changing to another Service Offering with a Root disk size. "
+                            "For more details please check out the Official 
Resizing Volumes documentation.");
                 }
 
                 // convert from bytes to GiB
@@ -1246,7 +1238,7 @@ public class VolumeApiServiceImpl extends ManagerBase 
implements VolumeApiServic
              */
             if (currentSize > newSize && !shrinkOk) {
                 throw new InvalidParameterValueException("Going from existing 
size of " + currentSize + " to size of " + newSize + " would shrink the volume."
-                        + "Need to sign off by supplying the shrinkok 
parameter with value of true.");
+                     "Need to sign off by supplying the shrinkok parameter 
with value of true.");
             }
 
             if (newSize > currentSize) {
@@ -3019,6 +3011,14 @@ public class VolumeApiServiceImpl extends ManagerBase 
implements VolumeApiServic
             }
         }
 
+        // Offline volume migration check for scaleIO volumes across scaleio 
clusters
+        if (vm == null || !State.Running.equals(vm.getState())) {
+            StoragePoolVO sourceStoragePoolVO = 
_storagePoolDao.findById(vol.getPoolId());
+            if 
(sourceStoragePoolVO.getPoolType().equals(Storage.StoragePoolType.PowerFlex) && 
isScaleIOVolumeOnDifferentScaleIOStorageInstances(vol.getPoolId(), 
storagePoolId)) {
+                throw new InvalidParameterValueException("Volume needs to be 
attached to a VM to move across ScaleIO storages in different ScaleIO 
clusters");
+            }
+        }
+
         if (vm != null &&
                 HypervisorType.VMware.equals(vm.getHypervisorType()) &&
                 State.Stopped.equals(vm.getState())) {
@@ -3152,6 +3152,30 @@ public class VolumeApiServiceImpl extends ManagerBase 
implements VolumeApiServic
         return orchestrateMigrateVolume(vol, destPool, liveMigrateVolume, 
newDiskOffering);
     }
 
+    private boolean isScaleIOVolumeOnDifferentScaleIOStorageInstances(long 
srcPoolId, long destPoolId) {
+        String srcPoolSystemId = null;
+        StoragePoolDetailVO srcPoolSystemIdDetail = 
storagePoolDetailsDao.findDetail(srcPoolId, 
ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID);
+        if (srcPoolSystemIdDetail != null) {
+            srcPoolSystemId = srcPoolSystemIdDetail.getValue();
+        }
+
+        String destPoolSystemId = null;
+        StoragePoolDetailVO destPoolSystemIdDetail = 
storagePoolDetailsDao.findDetail(destPoolId, 
ScaleIOGatewayClient.STORAGE_POOL_SYSTEM_ID);
+        if (destPoolSystemIdDetail != null) {
+            destPoolSystemId = destPoolSystemIdDetail.getValue();
+        }
+
+        if (StringUtils.isAnyEmpty(srcPoolSystemId, destPoolSystemId)) {
+            throw new CloudRuntimeException("Failed to validate PowerFlex 
pools compatibility for migration as storage instance details are not 
available");
+        }
+
+        if (srcPoolSystemId.equals(destPoolSystemId)) {
+            return true;
+        }
+
+        return false;
+    }
+
     private boolean isSourceOrDestNotOnStorPool(StoragePoolVO storagePoolVO, 
StoragePoolVO destinationStoragePoolVo) {
         return storagePoolVO.getPoolType() != Storage.StoragePoolType.StorPool
                 || destinationStoragePoolVo.getPoolType() != 
Storage.StoragePoolType.StorPool;

Reply via email to