This is an automated email from the ASF dual-hosted git repository.
weizhou pushed a commit to branch 4.20
in repository https://gitbox.apache.org/repos/asf/cloudstack.git
The following commit(s) were added to refs/heads/4.20 by this push:
new 6d16ac2113a ScaleIO/PowerFlex smoke tests improvements, and some fixes
(#11554)
6d16ac2113a is described below
commit 6d16ac2113a8e8d7b02458c655329574519585d2
Author: Suresh Kumar Anaparti <[email protected]>
AuthorDate: Fri Sep 12 19:47:20 2025 +0530
ScaleIO/PowerFlex smoke tests improvements, and some fixes (#11554)
* ScaleIO/PowerFlex smoke tests improvements, and some fixes
* Fix test_volumes.py, encrypted volume size check (for powerflex volumes)
* Fix test_over_provisioning.py (over provisioning supported for powerflex)
* Update vm snapshot tests
* Update volume size delta in primary storage resource count for user vm
volumes only
The VR volumes resource count for PowerFlex volumes is updated here,
resulting in resource count discrepancy
(which is re-calculated through ResourceCountCheckTask later, and skips the
VR volumes)
* Fix test_import_unmanage_volumes.py (unsupported for powerflex)
* Fix test_sharedfs_lifecycle.py (volume size check for powerflex)
* Update powerflex.connect.on.demand config default to true
---
.../engine/orchestration/VolumeOrchestrator.java | 25 +++++----
.../kvm/resource/LibvirtComputingResource.java | 14 ++++-
.../cloudstack/metrics/MetricsServiceImpl.java | 2 +-
.../driver/ScaleIOPrimaryDataStoreDriver.java | 6 +--
.../datastore/manager/ScaleIOSDCManagerImpl.java | 2 +-
.../resourcelimit/ResourceLimitManagerImpl.java | 6 +--
.../main/java/com/cloud/server/StatsCollector.java | 2 +-
.../cloud/storage/snapshot/SnapshotManager.java | 2 +-
.../main/java/com/cloud/vm/UserVmManagerImpl.java | 2 +-
.../smoke/test_deploy_vm_root_resize.py | 9 ++++
.../smoke/test_import_unmanage_volumes.py | 22 +++++++-
test/integration/smoke/test_over_provisioning.py | 8 ++-
test/integration/smoke/test_restore_vm.py | 40 +++++++++++---
test/integration/smoke/test_sharedfs_lifecycle.py | 20 +++++--
test/integration/smoke/test_snapshots.py | 52 ++++++++++++++----
test/integration/smoke/test_usage.py | 27 ++++++++--
test/integration/smoke/test_vm_autoscaling.py | 36 +++++++++++--
test/integration/smoke/test_vm_life_cycle.py | 4 +-
test/integration/smoke/test_vm_snapshot_kvm.py | 61 +++++++++++++---------
test/integration/smoke/test_vm_snapshots.py | 24 +++++++--
test/integration/smoke/test_volumes.py | 51 +++++++++++++++---
tools/marvin/marvin/lib/utils.py | 53 ++++++++++++++++++-
22 files changed, 375 insertions(+), 93 deletions(-)
diff --git
a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
index db0119febde..a6a43388665 100644
---
a/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
+++
b/engine/orchestration/src/main/java/org/apache/cloudstack/engine/orchestration/VolumeOrchestrator.java
@@ -177,6 +177,7 @@ import com.cloud.vm.dao.SecondaryStorageVmDao;
import com.cloud.vm.dao.UserVmCloneSettingDao;
import com.cloud.vm.dao.UserVmDao;
import com.cloud.vm.dao.UserVmDetailsDao;
+import com.cloud.vm.dao.VMInstanceDao;
public class VolumeOrchestrator extends ManagerBase implements
VolumeOrchestrationService, Configurable {
@@ -257,6 +258,8 @@ public class VolumeOrchestrator extends ManagerBase
implements VolumeOrchestrati
StoragePoolHostDao storagePoolHostDao;
@Inject
DiskOfferingDao diskOfferingDao;
+ @Inject
+ VMInstanceDao vmInstanceDao;
@Inject
protected SnapshotHelper snapshotHelper;
@@ -933,9 +936,7 @@ public class VolumeOrchestrator extends ManagerBase
implements VolumeOrchestrati
// Create event and update resource count for volumes if vm is a user
vm
if (vm.getType() == VirtualMachine.Type.User) {
-
Long offeringId = null;
-
if (!offering.isComputeOnly()) {
offeringId = offering.getId();
}
@@ -1868,14 +1869,18 @@ public class VolumeOrchestrator extends ManagerBase
implements VolumeOrchestrati
if (newSize != vol.getSize()) {
DiskOfferingVO diskOffering =
diskOfferingDao.findByIdIncludingRemoved(vol.getDiskOfferingId());
- if (newSize > vol.getSize()) {
-
_resourceLimitMgr.checkPrimaryStorageResourceLimit(_accountMgr.getActiveAccountById(vol.getAccountId()),
- vol.isDisplay(), newSize - vol.getSize(),
diskOffering);
-
_resourceLimitMgr.incrementVolumePrimaryStorageResourceCount(vol.getAccountId(),
vol.isDisplay(),
- newSize - vol.getSize(), diskOffering);
- } else {
-
_resourceLimitMgr.decrementVolumePrimaryStorageResourceCount(vol.getAccountId(),
vol.isDisplay(),
- vol.getSize() - newSize, diskOffering);
+ VMInstanceVO vm = vol.getInstanceId() != null ?
vmInstanceDao.findById(vol.getInstanceId()) : null;
+ if (vm == null || vm.getType() == VirtualMachine.Type.User) {
+ // Update resource count for user vm volumes when volume is
attached
+ if (newSize > vol.getSize()) {
+
_resourceLimitMgr.checkPrimaryStorageResourceLimit(_accountMgr.getActiveAccountById(vol.getAccountId()),
+ vol.isDisplay(), newSize - vol.getSize(),
diskOffering);
+
_resourceLimitMgr.incrementVolumePrimaryStorageResourceCount(vol.getAccountId(),
vol.isDisplay(),
+ newSize - vol.getSize(), diskOffering);
+ } else {
+
_resourceLimitMgr.decrementVolumePrimaryStorageResourceCount(vol.getAccountId(),
vol.isDisplay(),
+ vol.getSize() - newSize, diskOffering);
+ }
}
vol.setSize(newSize);
_volsDao.persist(vol);
diff --git
a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
index 2d63a669661..a5aba34a031 100644
---
a/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
+++
b/plugins/hypervisors/kvm/src/main/java/com/cloud/hypervisor/kvm/resource/LibvirtComputingResource.java
@@ -3101,7 +3101,7 @@ public class LibvirtComputingResource extends
ServerResourceBase implements Serv
}
if (vmSpec.getOs().toLowerCase().contains("window")) {
- isWindowsTemplate =true;
+ isWindowsTemplate = true;
}
for (final DiskTO volume : disks) {
KVMPhysicalDisk physicalDisk = null;
@@ -3220,6 +3220,9 @@ public class LibvirtComputingResource extends
ServerResourceBase implements Serv
disk.defNetworkBasedDisk(physicalDisk.getPath().replace("rbd:", ""),
pool.getSourceHost(), pool.getSourcePort(), pool.getAuthUserName(),
pool.getUuid(), devId, diskBusType,
DiskProtocol.RBD, DiskDef.DiskFmtType.RAW);
} else if (pool.getType() == StoragePoolType.PowerFlex) {
+ if (isWindowsTemplate && isUefiEnabled) {
+ diskBusTypeData = DiskDef.DiskBus.SATA;
+ }
disk.defBlockBasedDisk(physicalDisk.getPath(), devId,
diskBusTypeData);
if
(physicalDisk.getFormat().equals(PhysicalDiskFormat.QCOW2)) {
disk.setDiskFormatType(DiskDef.DiskFmtType.QCOW2);
@@ -3250,7 +3253,6 @@ public class LibvirtComputingResource extends
ServerResourceBase implements Serv
disk.defFileBasedDisk(physicalDisk.getPath(),
devId, diskBusType, DiskDef.DiskFmtType.QCOW2);
}
}
-
}
pool.customizeLibvirtDiskDef(disk);
}
@@ -4527,6 +4529,14 @@ public class LibvirtComputingResource extends
ServerResourceBase implements Serv
return token[1];
}
} else if (token.length > 3) {
+ // for powerflex/scaleio, path =
/dev/disk/by-id/emc-vol-2202eefc4692120f-540fd8fa00000003
+ if (token.length > 4 && StringUtils.isNotBlank(token[4]) &&
token[4].startsWith("emc-vol-")) {
+ final String[] emcVolToken = token[4].split("-");
+ if (emcVolToken.length == 4) {
+ return emcVolToken[3];
+ }
+ }
+
// for example, path = /mnt/pool_uuid/disk_path/
return token[3];
}
diff --git
a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java
b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java
index 3cd6bd33837..0ef094d3d4e 100644
---
a/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java
+++
b/plugins/metrics/src/main/java/org/apache/cloudstack/metrics/MetricsServiceImpl.java
@@ -234,7 +234,7 @@ public class MetricsServiceImpl extends
MutualExclusiveIdsManagerBase implements
@Override
public ListResponse<VolumeMetricsStatsResponse>
searchForVolumeMetricsStats(ListVolumesUsageHistoryCmd cmd) {
Pair<List<VolumeVO>, Integer> volumeList =
searchForVolumesInternal(cmd);
- Map<Long,List<VolumeStatsVO>> volumeStatsList =
searchForVolumeMetricsStatsInternal(cmd, volumeList.first());
+ Map<Long, List<VolumeStatsVO>> volumeStatsList =
searchForVolumeMetricsStatsInternal(cmd, volumeList.first());
return createVolumeMetricsStatsResponse(volumeList, volumeStatsList);
}
diff --git
a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java
b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java
index 3d2ca5b1d09..7eb106ef9f8 100644
---
a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java
+++
b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/driver/ScaleIOPrimaryDataStoreDriver.java
@@ -571,8 +571,8 @@ public class ScaleIOPrimaryDataStoreDriver implements
PrimaryDataStoreDriver {
}
}
} else {
- logger.debug("No encryption configured for data volume [id:
{}, uuid: {}, name: {}]",
- volumeInfo.getId(), volumeInfo.getUuid(),
volumeInfo.getName());
+ logger.debug("No encryption configured for volume [id: {},
uuid: {}, name: {}]",
+ volumeInfo.getId(), volumeInfo.getUuid(),
volumeInfo.getName());
}
return answer;
@@ -1512,7 +1512,7 @@ public class ScaleIOPrimaryDataStoreDriver implements
PrimaryDataStoreDriver {
* @return true if resize is required
*/
private boolean needsExpansionForEncryptionHeader(long srcSize, long
dstSize) {
- int headerSize = 32<<20; // ensure we have 32MiB for encryption header
+ int headerSize = 32 << 20; // ensure we have 32MiB for encryption
header
return srcSize + headerSize > dstSize;
}
diff --git
a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java
b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java
index 5f098badaa1..8ec64802ee2 100644
---
a/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java
+++
b/plugins/storage/volume/scaleio/src/main/java/org/apache/cloudstack/storage/datastore/manager/ScaleIOSDCManagerImpl.java
@@ -61,7 +61,7 @@ public class ScaleIOSDCManagerImpl implements
ScaleIOSDCManager, Configurable {
static ConfigKey<Boolean> ConnectOnDemand = new ConfigKey<>("Storage",
Boolean.class,
"powerflex.connect.on.demand",
- Boolean.FALSE.toString(),
+ Boolean.TRUE.toString(),
"Connect PowerFlex client on Host when first Volume is mapped to
SDC and disconnect when last Volume is unmapped from SDC," +
" otherwise no action (that is connection remains in the
same state whichever it is, connected or disconnected).",
Boolean.TRUE,
diff --git
a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java
b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java
index 85cca63546c..b890b72f758 100644
--- a/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java
+++ b/server/src/main/java/com/cloud/resourcelimit/ResourceLimitManagerImpl.java
@@ -511,7 +511,7 @@ public class ResourceLimitManagerImpl extends ManagerBase
implements ResourceLim
String convCurrentResourceReservation =
String.valueOf(currentResourceReservation);
String convNumResources = String.valueOf(numResources);
- if (type == ResourceType.secondary_storage || type ==
ResourceType.primary_storage){
+ if (type == ResourceType.secondary_storage || type ==
ResourceType.primary_storage) {
convDomainResourceLimit =
toHumanReadableSize(domainResourceLimit);
convCurrentDomainResourceCount =
toHumanReadableSize(currentDomainResourceCount);
convCurrentResourceReservation =
toHumanReadableSize(currentResourceReservation);
@@ -554,7 +554,7 @@ public class ResourceLimitManagerImpl extends ManagerBase
implements ResourceLim
String convertedCurrentResourceReservation =
String.valueOf(currentResourceReservation);
String convertedNumResources = String.valueOf(numResources);
- if (type == ResourceType.secondary_storage || type ==
ResourceType.primary_storage){
+ if (type == ResourceType.secondary_storage || type ==
ResourceType.primary_storage) {
convertedAccountResourceLimit =
toHumanReadableSize(accountResourceLimit);
convertedCurrentResourceCount =
toHumanReadableSize(currentResourceCount);
convertedCurrentResourceReservation =
toHumanReadableSize(currentResourceReservation);
@@ -1137,7 +1137,7 @@ public class ResourceLimitManagerImpl extends ManagerBase
implements ResourceLim
}
if (logger.isDebugEnabled()) {
String convertedDelta = String.valueOf(delta);
- if (type == ResourceType.secondary_storage || type ==
ResourceType.primary_storage){
+ if (type == ResourceType.secondary_storage || type ==
ResourceType.primary_storage) {
convertedDelta = toHumanReadableSize(delta);
}
String typeStr = StringUtils.isNotEmpty(tag) ? String.format("%s
(tag: %s)", type, tag) : type.getName();
diff --git a/server/src/main/java/com/cloud/server/StatsCollector.java
b/server/src/main/java/com/cloud/server/StatsCollector.java
index 27ac0bb725d..a32dac398a8 100644
--- a/server/src/main/java/com/cloud/server/StatsCollector.java
+++ b/server/src/main/java/com/cloud/server/StatsCollector.java
@@ -1459,7 +1459,7 @@ public class StatsCollector extends ManagerBase
implements ComponentMethodInterc
for (VmDiskStats vmDiskStat : vmDiskStats) {
VmDiskStatsEntry vmDiskStatEntry =
(VmDiskStatsEntry)vmDiskStat;
SearchCriteria<VolumeVO> sc_volume =
_volsDao.createSearchCriteria();
- sc_volume.addAnd("path",
SearchCriteria.Op.EQ, vmDiskStatEntry.getPath());
+ sc_volume.addAnd("path",
SearchCriteria.Op.LIKE, vmDiskStatEntry.getPath() + "%");
List<VolumeVO> volumes =
_volsDao.search(sc_volume, null);
if (CollectionUtils.isEmpty(volumes))
diff --git
a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManager.java
b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManager.java
index cce580d4106..329ed9bc710 100644
--- a/server/src/main/java/com/cloud/storage/snapshot/SnapshotManager.java
+++ b/server/src/main/java/com/cloud/storage/snapshot/SnapshotManager.java
@@ -57,7 +57,7 @@ public interface SnapshotManager extends Configurable {
public static final ConfigKey<Integer> BackupRetryInterval = new
ConfigKey<Integer>(Integer.class, "backup.retry.interval", "Advanced", "300",
"Time in seconds between retries in backing up snapshot to
secondary", false, ConfigKey.Scope.Global, null);
- public static final ConfigKey<Boolean> VmStorageSnapshotKvm = new
ConfigKey<>(Boolean.class, "kvm.vmstoragesnapshot.enabled", "Snapshots",
"false", "For live snapshot of virtual machine instance on KVM hypervisor
without memory. Requieres qemu version 1.6+ (on NFS or Local file system) and
qemu-guest-agent installed on guest VM", true, ConfigKey.Scope.Global, null);
+ public static final ConfigKey<Boolean> VmStorageSnapshotKvm = new
ConfigKey<>(Boolean.class, "kvm.vmstoragesnapshot.enabled", "Snapshots",
"false", "For live snapshot of virtual machine instance on KVM hypervisor
without memory. Requires qemu version 1.6+ (on NFS or Local file system) and
qemu-guest-agent installed on guest VM", true, ConfigKey.Scope.Global, null);
void deletePoliciesForVolume(Long volumeId);
diff --git a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java
b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java
index 81b652169bf..b8710943164 100644
--- a/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java
+++ b/server/src/main/java/com/cloud/vm/UserVmManagerImpl.java
@@ -5860,7 +5860,7 @@ public class UserVmManagerImpl extends ManagerBase
implements UserVmManager, Vir
for (VmDiskStatsEntry vmDiskStat : vmDiskStats) {
SearchCriteria<VolumeVO> sc_volume =
_volsDao.createSearchCriteria();
- sc_volume.addAnd("path", SearchCriteria.Op.EQ,
vmDiskStat.getPath());
+ sc_volume.addAnd("path", SearchCriteria.Op.LIKE,
vmDiskStat.getPath() + "%");
List<VolumeVO> volumes =
_volsDao.search(sc_volume, null);
if ((volumes == null) || (volumes.size() == 0)) {
break;
diff --git a/test/integration/smoke/test_deploy_vm_root_resize.py
b/test/integration/smoke/test_deploy_vm_root_resize.py
index 1ef5d7d6ea6..b9d14e5bdca 100644
--- a/test/integration/smoke/test_deploy_vm_root_resize.py
+++ b/test/integration/smoke/test_deploy_vm_root_resize.py
@@ -32,6 +32,7 @@ from marvin.codes import FAILED, INVALID_INPUT, PASS,\
RESOURCE_PRIMARY_STORAGE
from nose.plugins.attrib import attr
from marvin.sshClient import SshClient
+import math
import time
import re
from marvin.cloudstackAPI import updateTemplate,registerTemplate
@@ -276,6 +277,14 @@ class TestDeployVmRootSize(cloudstackTestCase):
self.assertNotEqual(res[2], INVALID_INPUT, "Invalid list VM "
"response")
rootvolume = list_volume_response[0]
+ list_volume_pool_response = list_storage_pools(
+ self.apiclient,
+ id=rootvolume.storageid
+ )
+ rootvolume_pool = list_volume_pool_response[0]
+ if rootvolume_pool.type.lower() == "powerflex":
+ newrootsize = (int(math.ceil(newrootsize / 8) * 8))
+
success = False
if rootvolume is not None and rootvolume.size == (newrootsize <<
30):
success = True
diff --git a/test/integration/smoke/test_import_unmanage_volumes.py
b/test/integration/smoke/test_import_unmanage_volumes.py
index 9001e97a79e..fc1c558d70f 100644
--- a/test/integration/smoke/test_import_unmanage_volumes.py
+++ b/test/integration/smoke/test_import_unmanage_volumes.py
@@ -26,7 +26,11 @@ from marvin.lib.base import (Account,
ServiceOffering,
DiskOffering,
VirtualMachine)
-from marvin.lib.common import (get_domain, get_zone,
get_suitable_test_template)
+from marvin.lib.common import (get_domain,
+ get_zone,
+ get_suitable_test_template,
+ list_volumes,
+ list_storage_pools)
# Import System modules
from nose.plugins.attrib import attr
@@ -107,6 +111,22 @@ class TestImportAndUnmanageVolumes(cloudstackTestCase):
def test_01_detach_unmanage_import_volume(self):
"""Test attach/detach/unmanage/import volume
"""
+
+ volumes = list_volumes(
+ self.apiclient,
+ virtualmachineid=self.virtual_machine.id,
+ type='ROOT',
+ listall=True
+ )
+ volume = volumes[0]
+ volume_pool_response = list_storage_pools(
+ self.apiclient,
+ id=volume.storageid
+ )
+ volume_pool = volume_pool_response[0]
+ if volume_pool.type.lower() == "powerflex":
+ self.skipTest("This test is not supported for storage pool type %s
on hypervisor KVM" % volume_pool.type)
+
# Create DATA volume
volume = Volume.create(
self.apiclient,
diff --git a/test/integration/smoke/test_over_provisioning.py
b/test/integration/smoke/test_over_provisioning.py
index 94e4096b1ef..c2b1a5ac205 100644
--- a/test/integration/smoke/test_over_provisioning.py
+++ b/test/integration/smoke/test_over_provisioning.py
@@ -60,9 +60,10 @@ class TestUpdateOverProvision(cloudstackTestCase):
"The environment don't have storage pools required
for test")
for pool in storage_pools:
- if pool.type == "NetworkFilesystem" or pool.type == "VMFS":
+ if pool.type == "NetworkFilesystem" or pool.type == "VMFS" or
pool.type == "PowerFlex":
break
- if pool.type != "NetworkFilesystem" and pool.type != "VMFS":
+
+ if pool.type != "NetworkFilesystem" and pool.type != "VMFS" and
pool.type != "PowerFlex":
raise self.skipTest("Storage overprovisioning currently not
supported on " + pool.type + " pools")
self.poolId = pool.id
@@ -101,6 +102,9 @@ class TestUpdateOverProvision(cloudstackTestCase):
"""Reset the storage.overprovisioning.factor back to its original value
@return:
"""
+ if not hasattr(self, 'poolId'):
+ return
+
storage_pools = StoragePool.list(
self.apiClient,
id = self.poolId
diff --git a/test/integration/smoke/test_restore_vm.py
b/test/integration/smoke/test_restore_vm.py
index 3798bef852a..b961bee39f2 100644
--- a/test/integration/smoke/test_restore_vm.py
+++ b/test/integration/smoke/test_restore_vm.py
@@ -16,10 +16,13 @@
# under the License.
""" P1 tests for Scaling up Vm
"""
+
+import math
+
# Import Local Modules
from marvin.cloudstackTestCase import cloudstackTestCase
from marvin.lib.base import (VirtualMachine, Volume, DiskOffering,
ServiceOffering, Template)
-from marvin.lib.common import (get_zone, get_domain)
+from marvin.lib.common import (get_zone, get_domain, list_storage_pools)
from nose.plugins.attrib import attr
_multiprocess_shared_ = True
@@ -78,8 +81,13 @@ class TestRestoreVM(cloudstackTestCase):
self._cleanup.append(virtual_machine)
old_root_vol = Volume.list(self.apiclient,
virtualmachineid=virtual_machine.id)[0]
+ old_root_vol_pool_res = list_storage_pools(self.apiclient,
id=old_root_vol.storageid)
+ old_root_vol_pool = old_root_vol_pool_res[0]
+ expected_old_root_vol_size = self.template_t1.size
+ if old_root_vol_pool.type.lower() == "powerflex":
+ expected_old_root_vol_size =
(int(math.ceil((expected_old_root_vol_size / (1024 ** 3)) / 8) * 8)) * (1024 **
3)
self.assertEqual(old_root_vol.state, 'Ready', "Volume should be in
Ready state")
- self.assertEqual(old_root_vol.size, self.template_t1.size, "Size of
volume and template should match")
+ self.assertEqual(old_root_vol.size, expected_old_root_vol_size, "Size
of volume and template should match")
virtual_machine.restore(self.apiclient, self.template_t2.id,
expunge=True)
@@ -88,8 +96,13 @@ class TestRestoreVM(cloudstackTestCase):
self.assertEqual(restored_vm.templateid, self.template_t2.id, "VM's
template after restore is incorrect")
root_vol = Volume.list(self.apiclient,
virtualmachineid=restored_vm.id)[0]
+ root_vol_pool_res = list_storage_pools(self.apiclient,
id=root_vol.storageid)
+ root_vol_pool = root_vol_pool_res[0]
+ expected_root_vol_size = self.template_t2.size
+ if root_vol_pool.type.lower() == "powerflex":
+ expected_root_vol_size = (int(math.ceil((expected_root_vol_size /
(1024 ** 3)) / 8) * 8)) * (1024 ** 3)
self.assertEqual(root_vol.state, 'Ready', "Volume should be in Ready
state")
- self.assertEqual(root_vol.size, self.template_t2.size, "Size of volume
and template should match")
+ self.assertEqual(root_vol.size, expected_root_vol_size, "Size of
volume and template should match")
old_root_vol = Volume.list(self.apiclient, id=old_root_vol.id)
self.assertEqual(old_root_vol, None, "Old volume should be deleted")
@@ -105,8 +118,13 @@ class TestRestoreVM(cloudstackTestCase):
self._cleanup.append(virtual_machine)
old_root_vol = Volume.list(self.apiclient,
virtualmachineid=virtual_machine.id)[0]
+ old_root_vol_pool_res = list_storage_pools(self.apiclient,
id=old_root_vol.storageid)
+ old_root_vol_pool = old_root_vol_pool_res[0]
+ expected_old_root_vol_size = self.template_t1.size
+ if old_root_vol_pool.type.lower() == "powerflex":
+ expected_old_root_vol_size =
(int(math.ceil((expected_old_root_vol_size / (1024 ** 3)) / 8) * 8)) * (1024 **
3)
self.assertEqual(old_root_vol.state, 'Ready', "Volume should be in
Ready state")
- self.assertEqual(old_root_vol.size, self.template_t1.size, "Size of
volume and template should match")
+ self.assertEqual(old_root_vol.size, expected_old_root_vol_size, "Size
of volume and template should match")
virtual_machine.restore(self.apiclient, self.template_t2.id,
self.disk_offering.id, expunge=True)
@@ -115,9 +133,14 @@ class TestRestoreVM(cloudstackTestCase):
self.assertEqual(restored_vm.templateid, self.template_t2.id, "VM's
template after restore is incorrect")
root_vol = Volume.list(self.apiclient,
virtualmachineid=restored_vm.id)[0]
+ root_vol_pool_res = list_storage_pools(self.apiclient,
id=root_vol.storageid)
+ root_vol_pool = root_vol_pool_res[0]
+ expected_root_vol_size = self.disk_offering.disksize
+ if root_vol_pool.type.lower() == "powerflex":
+ expected_root_vol_size = (int(math.ceil(expected_root_vol_size /
8) * 8))
self.assertEqual(root_vol.diskofferingid, self.disk_offering.id, "Disk
offering id should match")
self.assertEqual(root_vol.state, 'Ready', "Volume should be in Ready
state")
- self.assertEqual(root_vol.size, self.disk_offering.disksize * 1024 *
1024 * 1024,
+ self.assertEqual(root_vol.size, expected_root_vol_size * 1024 * 1024 *
1024,
"Size of volume and disk offering should match")
old_root_vol = Volume.list(self.apiclient, id=old_root_vol.id)
@@ -134,8 +157,13 @@ class TestRestoreVM(cloudstackTestCase):
self._cleanup.append(virtual_machine)
old_root_vol = Volume.list(self.apiclient,
virtualmachineid=virtual_machine.id)[0]
+ old_root_vol_pool_res = list_storage_pools(self.apiclient,
id=old_root_vol.storageid)
+ old_root_vol_pool = old_root_vol_pool_res[0]
+ expected_old_root_vol_size = self.template_t1.size
+ if old_root_vol_pool.type.lower() == "powerflex":
+ expected_old_root_vol_size =
(int(math.ceil((expected_old_root_vol_size / (1024 ** 3)) / 8) * 8)) * (1024 **
3)
self.assertEqual(old_root_vol.state, 'Ready', "Volume should be in
Ready state")
- self.assertEqual(old_root_vol.size, self.template_t1.size, "Size of
volume and template should match")
+ self.assertEqual(old_root_vol.size, expected_old_root_vol_size, "Size
of volume and template should match")
virtual_machine.restore(self.apiclient, self.template_t2.id,
self.disk_offering.id, rootdisksize=16)
diff --git a/test/integration/smoke/test_sharedfs_lifecycle.py
b/test/integration/smoke/test_sharedfs_lifecycle.py
index f4b2c2fc593..4daf0d7696a 100644
--- a/test/integration/smoke/test_sharedfs_lifecycle.py
+++ b/test/integration/smoke/test_sharedfs_lifecycle.py
@@ -38,7 +38,8 @@ from marvin.lib.base import (Account,
)
from marvin.lib.common import (get_domain,
get_zone,
- get_template)
+ get_template,
+ list_storage_pools)
from marvin.codes import FAILED
from marvin.lib.decoratorGenerators import skipTestIf
@@ -258,15 +259,23 @@ class TestSharedFSLifecycle(cloudstackTestCase):
def test_resize_shared_fs(self):
"""Resize the shared filesystem by changing the disk offering and
validate
"""
+ sharedfs_pool_response = list_storage_pools(self.apiclient,
id=self.sharedfs.storageid)
+ sharedfs_pool = sharedfs_pool_response[0]
+
self.mountSharedFSOnVM(self.vm1_ssh_client, self.sharedfs)
result = self.vm1_ssh_client.execute("df -Th /mnt/fs1 | grep nfs")[0]
self.debug(result)
size = result.split()[-5]
self.debug("Size of the filesystem is " + size)
- self.assertEqual(size, "2.0G", "SharedFS size should be 2.0G")
+ if sharedfs_pool.type.lower() == "powerflex":
+ self.assertEqual(size, "8.0G", "SharedFS size should be 8.0G")
+ new_size = 9
+ else:
+ self.assertEqual(size, "2.0G", "SharedFS size should be 2.0G")
+ new_size = 3
response = SharedFS.stop(self.sharedfs, self.apiclient)
- response = SharedFS.changediskoffering(self.sharedfs, self.apiclient,
self.disk_offering.id, 3)
+ response = SharedFS.changediskoffering(self.sharedfs, self.apiclient,
self.disk_offering.id, new_size)
self.debug(response)
response = SharedFS.start(self.sharedfs, self.apiclient)
time.sleep(10)
@@ -274,4 +283,7 @@ class TestSharedFSLifecycle(cloudstackTestCase):
result = self.vm1_ssh_client.execute("df -Th /mnt/fs1 | grep nfs")[0]
size = result.split()[-5]
self.debug("Size of the filesystem is " + size)
- self.assertEqual(size, "3.0G", "SharedFS size should be 3.0G")
+ if sharedfs_pool.type.lower() == "powerflex":
+ self.assertEqual(size, "16G", "SharedFS size should be 16G")
+ else:
+ self.assertEqual(size, "3.0G", "SharedFS size should be 3.0G")
diff --git a/test/integration/smoke/test_snapshots.py
b/test/integration/smoke/test_snapshots.py
index f8346093c64..b1a2569d969 100644
--- a/test/integration/smoke/test_snapshots.py
+++ b/test/integration/smoke/test_snapshots.py
@@ -18,8 +18,10 @@
from marvin.codes import FAILED
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
+from marvin.cloudstackException import CloudstackAPIException
from marvin.lib.utils import (cleanup_resources,
is_snapshot_on_nfs,
+ is_snapshot_on_powerflex,
validateList)
from marvin.lib.base import (VirtualMachine,
Account,
@@ -146,10 +148,16 @@ class TestSnapshotRootDisk(cloudstackTestCase):
type='ROOT',
listall=True
)
+ volume = volumes[0]
+ volume_pool_response = list_storage_pools(
+ self.apiclient,
+ id=volume.storageid
+ )
+ volume_pool = volume_pool_response[0]
snapshot = Snapshot.create(
self.apiclient,
- volumes[0].id,
+ volume.id,
account=self.account.name,
domainid=self.account.domainid
)
@@ -209,6 +217,11 @@ class TestSnapshotRootDisk(cloudstackTestCase):
"Check if backup_snap_id is not null"
)
+ if volume_pool.type.lower() == "powerflex":
+ self.assertTrue(is_snapshot_on_powerflex(
+ self.apiclient, self.dbclient, self.config, self.zone.id,
snapshot.id))
+ return
+
self.assertTrue(is_snapshot_on_nfs(
self.apiclient, self.dbclient, self.config, self.zone.id,
snapshot.id))
return
@@ -246,6 +259,11 @@ class TestSnapshotRootDisk(cloudstackTestCase):
PASS,
"Invalid response returned for list volumes")
vol_uuid = vol_res[0].id
+ volume_pool_response = list_storage_pools(self.apiclient,
+ id=vol_res[0].storageid)
+ volume_pool = volume_pool_response[0]
+ if volume_pool.type.lower() != 'networkfilesystem':
+ self.skipTest("This test is not supported for volume created on
storage pool type %s" % volume_pool.type)
clusters = list_clusters(
self.apiclient,
zoneid=self.zone.id
@@ -437,15 +455,16 @@ class TestSnapshotStandaloneBackup(cloudstackTestCase):
)
cls._cleanup.append(cls.virtual_machine)
- volumes =Volume.list(
+ volumes = Volume.list(
cls.userapiclient,
virtualmachineid=cls.virtual_machine.id,
type='ROOT',
listall=True
)
+ cls.volume = volumes[0]
cls.snapshot = Snapshot.create(
cls.userapiclient,
- volumes[0].id,
+ cls.volume.id,
account=cls.account.name,
domainid=cls.account.domainid
)
@@ -475,13 +494,28 @@ class TestSnapshotStandaloneBackup(cloudstackTestCase):
"""Test creating volume from snapshot
"""
self.services['volume_from_snapshot']['zoneid'] = self.zone.id
- self.volume_from_snap = Volume.create_from_snapshot(
- self.userapiclient,
- snapshot_id=self.snapshot.id,
- services=self.services["volume_from_snapshot"],
- account=self.account.name,
- domainid=self.account.domainid
+ snapshot_volume_pool_response = list_storage_pools(
+ self.apiclient,
+ id=self.volume.storageid
)
+ snapshot_volume_pool = snapshot_volume_pool_response[0]
+ try:
+ self.volume_from_snap = Volume.create_from_snapshot(
+ self.userapiclient,
+ snapshot_id=self.snapshot.id,
+ services=self.services["volume_from_snapshot"],
+ account=self.account.name,
+ domainid=self.account.domainid
+ )
+ except CloudstackAPIException as cs:
+ self.debug(cs.errorMsg)
+ if snapshot_volume_pool.type.lower() == "powerflex":
+ self.assertTrue(
+ cs.errorMsg.find("Create volume from snapshot is not
supported for PowerFlex volume snapshots") > 0,
+ msg="Other than unsupported error while creating volume
from snapshot for volume on PowerFlex pool")
+ return
+ self.fail("Failed to create volume from snapshot: %s" % cs)
+
self.cleanup.append(self.volume_from_snap)
self.assertEqual(
diff --git a/test/integration/smoke/test_usage.py
b/test/integration/smoke/test_usage.py
index 1a6ff37cedb..9ec5205403e 100644
--- a/test/integration/smoke/test_usage.py
+++ b/test/integration/smoke/test_usage.py
@@ -40,6 +40,7 @@ from marvin.lib.base import (Account,
from marvin.lib.common import (get_zone,
get_domain,
get_suitable_test_template,
+ list_storage_pools,
find_storage_pool_type)
@@ -611,17 +612,17 @@ class TestVolumeUsage(cloudstackTestCase):
except Exception as e:
self.fail("Failed to stop instance: %s" % e)
- volume_response = Volume.list(
+ data_volume_response = Volume.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
type='DATADISK',
listall=True)
self.assertEqual(
- isinstance(volume_response, list),
+ isinstance(data_volume_response, list),
True,
"Check for valid list volumes response"
)
- data_volume = volume_response[0]
+ data_volume = data_volume_response[0]
# Detach data Disk
self.debug("Detaching volume ID: %s VM with ID: %s" % (
@@ -769,7 +770,25 @@ class TestVolumeUsage(cloudstackTestCase):
"Running",
"VM state should be running after deployment"
)
- self.virtual_machine.attach_volume(self.apiclient,volume_uploaded)
+ root_volume_response = Volume.list(
+ self.apiclient,
+ virtualmachineid=self.virtual_machine.id,
+ type='ROOT',
+ listall=True)
+ root_volume = root_volume_response[0]
+ rool_volume_pool_response = list_storage_pools(
+ self.apiclient,
+ id=root_volume.storageid
+ )
+ rool_volume_pool = rool_volume_pool_response[0]
+ try:
+ self.virtual_machine.attach_volume(self.apiclient,volume_uploaded)
+ except Exception as e:
+ self.debug("Exception %s: " % e)
+ if rool_volume_pool.type.lower() == "powerflex" and "this
operation is unsupported on storage pool type PowerFlex" in str(e):
+ return
+ self.fail(e)
+
self.debug("select type from usage_event where offering_id = 6 and
volume_id = '%s';"
% volume_id)
diff --git a/test/integration/smoke/test_vm_autoscaling.py
b/test/integration/smoke/test_vm_autoscaling.py
index 7ae61ce57da..782d2bce3ad 100644
--- a/test/integration/smoke/test_vm_autoscaling.py
+++ b/test/integration/smoke/test_vm_autoscaling.py
@@ -22,6 +22,7 @@ Tests of VM Autoscaling
import logging
import time
import datetime
+import math
from nose.plugins.attrib import attr
from marvin.cloudstackTestCase import cloudstackTestCase
@@ -53,7 +54,8 @@ from marvin.lib.base import (Account,
from marvin.lib.common import (get_domain,
get_zone,
- get_template)
+ get_template,
+ list_storage_pools)
from marvin.lib.utils import wait_until
MIN_MEMBER = 1
@@ -466,8 +468,10 @@ class TestVmAutoScaling(cloudstackTestCase):
def verifyVmProfile(self, vm, autoscalevmprofileid, networkid=None,
projectid=None):
self.message("Verifying profiles of new VM %s (%s)" % (vm.name, vm.id))
datadisksizeInBytes = None
+ datadiskpoolid = None
diskofferingid = None
rootdisksizeInBytes = None
+ rootdiskpoolid = None
sshkeypairs = None
affinitygroupIdsArray = []
@@ -496,10 +500,24 @@ class TestVmAutoScaling(cloudstackTestCase):
for volume in volumes:
if volume.type == 'ROOT':
rootdisksizeInBytes = volume.size
+ rootdiskpoolid = volume.storageid
elif volume.type == 'DATADISK':
datadisksizeInBytes = volume.size
+ datadiskpoolid = volume.storageid
diskofferingid = volume.diskofferingid
+ rootdisk_pool_response = list_storage_pools(
+ self.apiclient,
+ id=rootdiskpoolid
+ )
+ rootdisk_pool = rootdisk_pool_response[0]
+
+ datadisk_pool_response = list_storage_pools(
+ self.apiclient,
+ id=datadiskpoolid
+ )
+ datadisk_pool = datadisk_pool_response[0]
+
vmprofiles_list = AutoScaleVmProfile.list(
self.regular_user_apiclient,
listall=True,
@@ -522,18 +540,26 @@ class TestVmAutoScaling(cloudstackTestCase):
self.assertEquals(templateid, vmprofile.templateid)
self.assertEquals(serviceofferingid, vmprofile.serviceofferingid)
+ rootdisksize = None
if vmprofile_otherdeployparams.rootdisksize:
- self.assertEquals(int(rootdisksizeInBytes),
int(vmprofile_otherdeployparams.rootdisksize) * (1024 ** 3))
+ rootdisksize = int(vmprofile_otherdeployparams.rootdisksize)
elif vmprofile_otherdeployparams.overridediskofferingid:
self.assertEquals(vmprofile_otherdeployparams.overridediskofferingid,
self.disk_offering_override.id)
- self.assertEquals(int(rootdisksizeInBytes),
int(self.disk_offering_override.disksize) * (1024 ** 3))
+ rootdisksize = int(self.disk_offering_override.disksize)
else:
- self.assertEquals(int(rootdisksizeInBytes), int(self.templatesize)
* (1024 ** 3))
+ rootdisksize = int(self.templatesize)
+
+ if rootdisk_pool.type.lower() == "powerflex":
+ rootdisksize = (int(math.ceil(rootdisksize / 8) * 8))
+ self.assertEquals(int(rootdisksizeInBytes), rootdisksize * (1024 ** 3))
if vmprofile_otherdeployparams.diskofferingid:
self.assertEquals(diskofferingid,
vmprofile_otherdeployparams.diskofferingid)
if vmprofile_otherdeployparams.disksize:
- self.assertEquals(int(datadisksizeInBytes),
int(vmprofile_otherdeployparams.disksize) * (1024 ** 3))
+ datadisksize = int(vmprofile_otherdeployparams.disksize)
+ if datadisk_pool.type.lower() == "powerflex":
+ datadisksize = (int(math.ceil(datadisksize / 8) * 8))
+ self.assertEquals(int(datadisksizeInBytes), datadisksize * (1024
** 3))
if vmprofile_otherdeployparams.keypairs:
self.assertEquals(sshkeypairs,
vmprofile_otherdeployparams.keypairs)
diff --git a/test/integration/smoke/test_vm_life_cycle.py
b/test/integration/smoke/test_vm_life_cycle.py
index c7c9a01bd32..8df0b994a55 100644
--- a/test/integration/smoke/test_vm_life_cycle.py
+++ b/test/integration/smoke/test_vm_life_cycle.py
@@ -1710,8 +1710,8 @@ class TestKVMLiveMigration(cloudstackTestCase):
def get_target_pool(self, volid):
target_pools = StoragePool.listForMigration(self.apiclient, id=volid)
- if len(target_pools) < 1:
- self.skipTest("Not enough storage pools found")
+ if target_pools is None or len(target_pools) == 0:
+ self.skipTest("Not enough storage pools found for migration")
return target_pools[0]
diff --git a/test/integration/smoke/test_vm_snapshot_kvm.py
b/test/integration/smoke/test_vm_snapshot_kvm.py
index 5c133f6e762..9dd7c529de5 100644
--- a/test/integration/smoke/test_vm_snapshot_kvm.py
+++ b/test/integration/smoke/test_vm_snapshot_kvm.py
@@ -77,6 +77,18 @@ class TestVmSnapshot(cloudstackTestCase):
Configurations.update(cls.apiclient,
name = "kvm.vmstoragesnapshot.enabled",
value = "true")
+
+ cls.services["domainid"] = cls.domain.id
+ cls.services["small"]["zoneid"] = cls.zone.id
+ cls.services["zoneid"] = cls.zone.id
+
+ cls.account = Account.create(
+ cls.apiclient,
+ cls.services["account"],
+ domainid=cls.domain.id
+ )
+ cls._cleanup.append(cls.account)
+
#The version of CentOS has to be supported
templ = {
"name": "CentOS8",
@@ -91,36 +103,33 @@ class TestVmSnapshot(cloudstackTestCase):
"directdownload": True,
}
- template = Template.register(cls.apiclient, templ, zoneid=cls.zone.id,
hypervisor=cls.hypervisor)
+ template = Template.register(
+ cls.apiclient,
+ templ,
+ zoneid=cls.zone.id,
+ account=cls.account.name,
+ domainid=cls.account.domainid,
+ hypervisor=cls.hypervisor
+ )
if template == FAILED:
assert False, "get_template() failed to return template\
with description %s" % cls.services["ostype"]
- cls.services["domainid"] = cls.domain.id
- cls.services["small"]["zoneid"] = cls.zone.id
cls.services["templates"]["ostypeid"] = template.ostypeid
- cls.services["zoneid"] = cls.zone.id
- cls.account = Account.create(
- cls.apiclient,
- cls.services["account"],
- domainid=cls.domain.id
- )
- cls._cleanup.append(cls.account)
-
- service_offerings_nfs = {
+ service_offering_nfs = {
"name": "nfs",
- "displaytext": "nfs",
- "cpunumber": 1,
- "cpuspeed": 500,
- "memory": 512,
- "storagetype": "shared",
- "customizediops": False,
- }
+ "displaytext": "nfs",
+ "cpunumber": 1,
+ "cpuspeed": 500,
+ "memory": 512,
+ "storagetype": "shared",
+ "customizediops": False,
+ }
cls.service_offering = ServiceOffering.create(
cls.apiclient,
- service_offerings_nfs,
+ service_offering_nfs,
)
cls._cleanup.append(cls.service_offering)
@@ -138,7 +147,7 @@ class TestVmSnapshot(cloudstackTestCase):
rootdisksize=20,
)
cls.random_data_0 = random_gen(size=100)
- cls.test_dir = "/tmp"
+ cls.test_dir = "$HOME"
cls.random_data = "random.data"
return
@@ -201,8 +210,8 @@ class TestVmSnapshot(cloudstackTestCase):
self.apiclient,
self.virtual_machine.id,
MemorySnapshot,
- "TestSnapshot",
- "Display Text"
+ "TestVmSnapshot",
+ "Test VM Snapshot"
)
self.assertEqual(
vm_snapshot.state,
@@ -269,6 +278,8 @@ class TestVmSnapshot(cloudstackTestCase):
self.virtual_machine.start(self.apiclient)
+ time.sleep(30)
+
try:
ssh_client = self.virtual_machine.get_ssh_client(reconnect=True)
@@ -288,7 +299,7 @@ class TestVmSnapshot(cloudstackTestCase):
self.assertEqual(
self.random_data_0,
result[0],
- "Check the random data is equal with the ramdom file!"
+ "Check the random data is equal with the random file!"
)
@attr(tags=["advanced", "advancedns", "smoke"], required_hardware="true")
@@ -320,7 +331,7 @@ class TestVmSnapshot(cloudstackTestCase):
list_snapshot_response = VmSnapshot.list(
self.apiclient,
virtualmachineid=self.virtual_machine.id,
- listall=False)
+ listall=True)
self.debug('list_snapshot_response -------------------- %s' %
list_snapshot_response)
self.assertIsNone(list_snapshot_response, "snapshot is already
deleted")
diff --git a/test/integration/smoke/test_vm_snapshots.py
b/test/integration/smoke/test_vm_snapshots.py
index 07779e78c58..8c106f05a9f 100644
--- a/test/integration/smoke/test_vm_snapshots.py
+++ b/test/integration/smoke/test_vm_snapshots.py
@@ -27,7 +27,9 @@ from marvin.lib.base import (Account,
from marvin.lib.common import (get_zone,
get_domain,
get_suitable_test_template,
+ list_volumes,
list_snapshots,
+ list_storage_pools,
list_virtual_machines)
import time
@@ -87,6 +89,18 @@ class TestVmSnapshot(cloudstackTestCase):
serviceofferingid=cls.service_offering.id,
mode=cls.zone.networktype
)
+ volumes = list_volumes(
+ cls.apiclient,
+ virtualmachineid=cls.virtual_machine.id,
+ type='ROOT',
+ listall=True
+ )
+ volume = volumes[0]
+ volume_pool_response = list_storage_pools(
+ cls.apiclient,
+ id=volume.storageid
+ )
+ cls.volume_pool = volume_pool_response[0]
cls.random_data_0 = random_gen(size=100)
cls.test_dir = "$HOME"
cls.random_data = "random.data"
@@ -146,15 +160,15 @@ class TestVmSnapshot(cloudstackTestCase):
#KVM VM Snapshot needs to set snapshot with memory
MemorySnapshot = False
- if self.hypervisor.lower() in (KVM.lower()):
+ if self.hypervisor.lower() in (KVM.lower()) and
self.volume_pool.type.lower() != "powerflex":
MemorySnapshot = True
vm_snapshot = VmSnapshot.create(
self.apiclient,
self.virtual_machine.id,
MemorySnapshot,
- "TestSnapshot",
- "Display Text"
+ "TestVmSnapshot",
+ "Test VM Snapshot"
)
self.assertEqual(
vm_snapshot.state,
@@ -214,7 +228,7 @@ class TestVmSnapshot(cloudstackTestCase):
)
#We don't need to stop the VM when taking a VM Snapshot on KVM
- if self.hypervisor.lower() in (KVM.lower()):
+ if self.hypervisor.lower() in (KVM.lower()) and
self.volume_pool.type.lower() != "powerflex":
pass
else:
self.virtual_machine.stop(self.apiclient)
@@ -224,7 +238,7 @@ class TestVmSnapshot(cloudstackTestCase):
list_snapshot_response[0].id)
#We don't need to start the VM when taking a VM Snapshot on KVM
- if self.hypervisor.lower() in (KVM.lower()):
+ if self.hypervisor.lower() in (KVM.lower()) and
self.volume_pool.type.lower() != "powerflex":
pass
else:
self.virtual_machine.start(self.apiclient)
diff --git a/test/integration/smoke/test_volumes.py
b/test/integration/smoke/test_volumes.py
index 28a029adf70..6cf3f082bc2 100644
--- a/test/integration/smoke/test_volumes.py
+++ b/test/integration/smoke/test_volumes.py
@@ -19,6 +19,7 @@
import os
import tempfile
import time
+import math
import unittest
import urllib.error
import urllib.parse
@@ -42,6 +43,7 @@ from marvin.lib.common import (get_domain,
get_zone,
find_storage_pool_type,
get_pod,
+ list_storage_pools,
list_disk_offering)
from marvin.lib.utils import (cleanup_resources, checkVolumeSize)
from marvin.lib.utils import (format_volume_to_ext3,
@@ -235,7 +237,6 @@ class TestCreateVolume(cloudstackTestCase):
"Failed to start VM (ID: %s) " % vm.id)
timeout = timeout - 1
- vol_sz = str(list_volume_response[0].size)
ssh = self.virtual_machine.get_ssh_client(
reconnect=True
)
@@ -243,6 +244,7 @@ class TestCreateVolume(cloudstackTestCase):
list_volume_response = Volume.list(
self.apiClient,
id=volume.id)
+ vol_sz = str(list_volume_response[0].size)
if list_volume_response[0].hypervisor.lower() ==
XEN_SERVER.lower():
volume_name = "/dev/xvd" + chr(ord('a') +
int(list_volume_response[0].deviceid))
self.debug(" Using XenServer volume_name: %s" % (volume_name))
@@ -533,6 +535,17 @@ class TestVolumes(cloudstackTestCase):
# Sleep to ensure the current state will reflected in other calls
time.sleep(self.services["sleep"])
+ list_volume_response = Volume.list(
+ self.apiClient,
+ id=self.volume.id
+ )
+ volume = list_volume_response[0]
+
+ list_volume_pool_response = list_storage_pools(self.apiClient,
id=volume.storageid)
+ volume_pool = list_volume_pool_response[0]
+ if volume_pool.type.lower() == "powerflex":
+ self.skipTest("Extract volume operation is unsupported for volumes
on storage pool type %s" % volume_pool.type)
+
cmd = extractVolume.extractVolumeCmd()
cmd.id = self.volume.id
cmd.mode = "HTTP_DOWNLOAD"
@@ -658,7 +671,15 @@ class TestVolumes(cloudstackTestCase):
type='DATADISK'
)
for vol in list_volume_response:
- if vol.id == self.volume.id and int(vol.size) ==
(int(disk_offering_20_GB.disksize) * (1024 ** 3)) and vol.state == 'Ready':
+ list_volume_pool_response = list_storage_pools(
+ self.apiClient,
+ id=vol.storageid
+ )
+ volume_pool = list_volume_pool_response[0]
+ disksize = (int(disk_offering_20_GB.disksize))
+ if volume_pool.type.lower() == "powerflex":
+ disksize = (int(math.ceil(disksize / 8) * 8))
+ if vol.id == self.volume.id and int(vol.size) == disksize *
(1024 ** 3) and vol.state == 'Ready':
success = True
if success:
break
@@ -925,7 +946,15 @@ class TestVolumes(cloudstackTestCase):
type='DATADISK'
)
for vol in list_volume_response:
- if vol.id == self.volume.id and int(vol.size) == (20 * (1024
** 3)) and vol.state == 'Ready':
+ list_volume_pool_response = list_storage_pools(
+ self.apiClient,
+ id=vol.storageid
+ )
+ volume_pool = list_volume_pool_response[0]
+ disksize = 20
+ if volume_pool.type.lower() == "powerflex":
+ disksize = (int(math.ceil(disksize / 8) * 8))
+ if vol.id == self.volume.id and int(vol.size) == disksize *
(1024 ** 3) and vol.state == 'Ready':
success = True
if success:
break
@@ -1283,7 +1312,6 @@ class TestVolumeEncryption(cloudstackTestCase):
"Failed to start VM (ID: %s) " % vm.id)
timeout = timeout - 1
- vol_sz = str(list_volume_response[0].size)
ssh = virtual_machine.get_ssh_client(
reconnect=True
)
@@ -1292,6 +1320,7 @@ class TestVolumeEncryption(cloudstackTestCase):
list_volume_response = Volume.list(
self.apiclient,
id=volume.id)
+ vol_sz = str(list_volume_response[0].size)
volume_name = "/dev/vd" + chr(ord('a') +
int(list_volume_response[0].deviceid))
self.debug(" Using KVM volume_name: %s" % (volume_name))
@@ -1410,7 +1439,6 @@ class TestVolumeEncryption(cloudstackTestCase):
"Failed to start VM (ID: %s) " % vm.id)
timeout = timeout - 1
- vol_sz = str(list_volume_response[0].size)
ssh = virtual_machine.get_ssh_client(
reconnect=True
)
@@ -1419,6 +1447,12 @@ class TestVolumeEncryption(cloudstackTestCase):
list_volume_response = Volume.list(
self.apiclient,
id=volume.id)
+ vol_sz = str(list_volume_response[0].size)
+ list_volume_pool_response = list_storage_pools(self.apiclient,
id=list_volume_response[0].storageid)
+ volume_pool = list_volume_pool_response[0]
+ if volume_pool.type.lower() == "powerflex":
+ vol_sz = int(vol_sz)
+ vol_sz = str(vol_sz - (128 << 20) - ((vol_sz >> 30) * 200704))
volume_name = "/dev/vd" + chr(ord('a') +
int(list_volume_response[0].deviceid))
self.debug(" Using KVM volume_name: %s" % (volume_name))
@@ -1543,7 +1577,6 @@ class TestVolumeEncryption(cloudstackTestCase):
"Failed to start VM (ID: %s) " % vm.id)
timeout = timeout - 1
- vol_sz = str(list_volume_response[0].size)
ssh = virtual_machine.get_ssh_client(
reconnect=True
)
@@ -1552,6 +1585,12 @@ class TestVolumeEncryption(cloudstackTestCase):
list_volume_response = Volume.list(
self.apiclient,
id=volume.id)
+ vol_sz = str(list_volume_response[0].size)
+ list_volume_pool_response = list_storage_pools(self.apiclient,
id=list_volume_response[0].storageid)
+ volume_pool = list_volume_pool_response[0]
+ if volume_pool.type.lower() == "powerflex":
+ vol_sz = int(vol_sz)
+ vol_sz = str(vol_sz - (128 << 20) - ((vol_sz >> 30) * 200704))
volume_name = "/dev/vd" + chr(ord('a') +
int(list_volume_response[0].deviceid))
self.debug(" Using KVM volume_name: %s" % (volume_name))
diff --git a/tools/marvin/marvin/lib/utils.py b/tools/marvin/marvin/lib/utils.py
index f80eccf1159..c822a587dfc 100644
--- a/tools/marvin/marvin/lib/utils.py
+++ b/tools/marvin/marvin/lib/utils.py
@@ -300,12 +300,63 @@ def get_hypervisor_version(apiclient):
assert hosts_list_validation_result[0] == PASS, "host list validation
failed"
return hosts_list_validation_result[1].hypervisorversion
+def is_snapshot_on_powerflex(apiclient, dbconn, config, zoneid, snapshotid):
+ """
+ Checks whether a snapshot with id (not UUID) `snapshotid` is present on
the powerflex storage
+
+ @param apiclient: api client connection
+ @param dbconn: connection to the cloudstack db
+ @param config: marvin configuration file
+ @param zoneid: uuid of the zone on which the secondary nfs storage pool is
mounted
+ @param snapshotid: uuid of the snapshot
+ @return: True if snapshot is found, False otherwise
+ """
+
+ qresultset = dbconn.execute(
+ "SELECT id FROM snapshots WHERE uuid = '%s';" \
+ % str(snapshotid)
+ )
+ if len(qresultset) == 0:
+ raise Exception(
+ "No snapshot found in cloudstack with id %s" % snapshotid)
+
+
+ snapshotid = qresultset[0][0]
+ qresultset = dbconn.execute(
+ "SELECT install_path, store_id FROM snapshot_store_ref WHERE
snapshot_id='%s' AND store_role='Primary';" % snapshotid
+ )
+
+ assert isinstance(qresultset, list), "Invalid db query response for
snapshot %s" % snapshotid
+
+ if len(qresultset) == 0:
+ #Snapshot does not exist
+ return False
+
+ from .base import StoragePool
+ #pass store_id to get the exact storage pool where snapshot is stored
+ primaryStores = StoragePool.list(apiclient, zoneid=zoneid,
id=int(qresultset[0][1]))
+
+ assert isinstance(primaryStores, list), "Not a valid response for
listStoragePools"
+ assert len(primaryStores) != 0, "No storage pools found in zone %s" %
zoneid
+
+ primaryStore = primaryStores[0]
+
+ if str(primaryStore.provider).lower() != "powerflex":
+ raise Exception(
+ "is_snapshot_on_powerflex works only against powerflex storage
pool. found %s" % str(primaryStore.provider))
+
+ snapshotPath = str(qresultset[0][0])
+ if not snapshotPath:
+ return False
+
+ return True
+
def is_snapshot_on_nfs(apiclient, dbconn, config, zoneid, snapshotid):
"""
Checks whether a snapshot with id (not UUID) `snapshotid` is present on
the nfs storage
@param apiclient: api client connection
- @param @dbconn: connection to the cloudstack db
+ @param dbconn: connection to the cloudstack db
@param config: marvin configuration file
@param zoneid: uuid of the zone on which the secondary nfs storage pool is
mounted
@param snapshotid: uuid of the snapshot