Github user gauravaradhye commented on a diff in the pull request:

    https://github.com/apache/cloudstack/pull/117#discussion_r26301476
  
    --- Diff: test/integration/testpaths/testpath_volume_snapshot.py ---
    @@ -0,0 +1,745 @@
    +# Licensed to the Apache Software Foundation (ASF) under one
    +# or more contributor license agreements.  See the NOTICE file
    +# distributed with this work for additional information
    +# regarding copyright ownership.  The ASF licenses this file
    +# to you under the Apache License, Version 2.0 (the
    +# "License"); you may not use this file except in compliance
    +# with the License.  You may obtain a copy of the License at
    +#
    +#   http://www.apache.org/licenses/LICENSE-2.0
    +#
    +# Unless required by applicable law or agreed to in writing,
    +# software distributed under the License is distributed on an
    +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
    +# KIND, either express or implied.  See the License for the
    +# specific language governing permissions and limitations
    +# under the License.
    +""" Test cases for VM/Volume snapshot Test Path
    +"""
    +from nose.plugins.attrib import attr
    +from marvin.cloudstackTestCase import cloudstackTestCase, unittest
    +from marvin.lib.utils import (cleanup_resources,
    +                              random_gen,
    +                              format_volume_to_ext3,
    +                              is_snapshot_on_nfs,
    +                              validateList)
    +from marvin.lib.base import (Account,
    +                             ServiceOffering,
    +                             DiskOffering,
    +                             Template,
    +                             VirtualMachine,
    +                             Snapshot
    +                             )
    +from marvin.lib.common import (get_domain,
    +                               get_zone,
    +                               get_template,
    +                               list_volumes,
    +                               list_snapshots,
    +                               list_events,
    +                               )
    +
    +
    +import hashlib
    +from marvin.sshClient import SshClient
    +
    +from marvin.codes import PASS
    +
    +
    +def createChecksum(self, virtual_machine, disk, disk_type):
    +    """ Write data on the disk and return the md5 checksum"""
    +
    +    random_data_0 = random_gen(size=100)
    +    # creating checksum(MD5)
    +    m = hashlib.md5()
    +    m.update(random_data_0)
    +    ckecksum_random_data_0 = m.hexdigest()
    +    try:
    +        ssh_client = SshClient(
    +            virtual_machine.ssh_ip,
    +            virtual_machine.ssh_port,
    +            virtual_machine.username,
    +            virtual_machine.password
    +        )
    +    except Exception as e:
    +        self.fail("SSH failed for VM: %s" %
    +                  e)
    +
    +    self.debug("Formatting volume: %s to ext3" % disk.id)
    +    # Format partition using ext3
    +    # Note that this is the second data disk partition of virtual machine
    +    # as it was already containing data disk before attaching the new 
volume,
    +    # Hence datadiskdevice_2
    +
    +    format_volume_to_ext3(
    +        ssh_client,
    +        self.testdata["volume_write_path"][
    +            virtual_machine.hypervisor][disk_type]
    +    )
    +    cmds = ["fdisk -l",
    +            "mkdir -p %s" % self.testdata["data_write_paths"]["mount_dir"],
    +            "mount -t ext3 %s1 %s" % (
    +                self.testdata["volume_write_path"][
    +                    virtual_machine.hypervisor][disk_type],
    +                self.testdata["data_write_paths"]["mount_dir"]
    +            ),
    +            "mkdir -p %s/%s/%s " % (
    +                self.testdata["data_write_paths"]["mount_dir"],
    +                self.testdata["data_write_paths"]["sub_dir"],
    +                self.testdata["data_write_paths"]["sub_lvl_dir1"],
    +            ),
    +            "echo %s > %s/%s/%s/%s" % (
    +                random_data_0,
    +                self.testdata["data_write_paths"]["mount_dir"],
    +                self.testdata["data_write_paths"]["sub_dir"],
    +                self.testdata["data_write_paths"]["sub_lvl_dir1"],
    +                self.testdata["data_write_paths"]["random_data"]
    +            ),
    +            "cat %s/%s/%s/%s" % (
    +                self.testdata["data_write_paths"]["mount_dir"],
    +                self.testdata["data_write_paths"]["sub_dir"],
    +                self.testdata["data_write_paths"]["sub_lvl_dir1"],
    +                self.testdata["data_write_paths"]["random_data"]
    +            )
    +            ]
    +
    +    for c in cmds:
    +        self.debug("Command: %s" % c)
    +        result = ssh_client.execute(c)
    +        self.debug(result)
    +
    +    # Unmount the storage
    +    cmds = [
    +        "umount %s" % (self.testdata["data_write_paths"]["mount_dir"]),
    +    ]
    +
    +    for c in cmds:
    +        self.debug("Command: %s" % c)
    +        ssh_client.execute(c)
    +
    +    return ckecksum_random_data_0
    +
    +
    +def compareChecksum(
    +        self,
    +        original_checksum,
    +        disk_type,
    +        virt_machine=None,
    +        disk=None,
    +        new_vm=False):
    +    """
    +    Create md5 checksum of the data present on the disk and compare
    +    it with the given checksum
    +    """
    +
    +    if disk_type == "datadiskdevice_1" and new_vm:
    +        new_virtual_machine = VirtualMachine.create(
    +            self.userapiclient,
    +            self.testdata["small"],
    +            templateid=self.template.id,
    +            accountid=self.account.name,
    +            domainid=self.account.domainid,
    +            serviceofferingid=self.service_offering_cluster1.id,
    +            zoneid=self.zone.id,
    +            mode=self.zone.networktype
    +        )
    +
    +        new_virtual_machine.start(self.userapiclient)
    +
    +        self.debug("Attaching volume: %s to VM: %s" % (
    +            disk.id,
    +            new_virtual_machine.id
    +        ))
    +
    +        new_virtual_machine.attach_volume(
    +            self.apiclient,
    +            disk
    +        )
    +
    +        # Rebooting is required so that newly attached disks are detected
    +        self.debug("Rebooting : %s" % new_virtual_machine.id)
    +        new_virtual_machine.reboot(self.apiclient)
    +
    +    else:
    +        # If the disk is root disk then no need to create new VM
    +        # Just start the original machine on which root disk is
    +        new_virtual_machine = virt_machine
    +        if new_virtual_machine.state != "Running":
    +            new_virtual_machine.start(self.userapiclient)
    +
    +    try:
    +        # Login to VM to verify test directories and files
    +
    +        self.debug(
    +            "SSH into (Public IP: ) %s " % new_virtual_machine.ssh_ip)
    +        ssh = SshClient(
    +            new_virtual_machine.ssh_ip,
    +            new_virtual_machine.ssh_port,
    +            new_virtual_machine.username,
    +            new_virtual_machine.password
    +        )
    +    except Exception as e:
    +        self.fail("SSH access failed for VM: %s, Exception: %s" %
    +                  (new_virtual_machine.ipaddress, e))
    +
    +    # Mount datadiskdevice_1 because this is the first data disk of the new
    +    # virtual machine
    +    cmds = ["blkid",
    +            "fdisk -l",
    +            "mkdir -p %s" % self.testdata["data_write_paths"]["mount_dir"],
    +            "mount -t ext3 %s1 %s" % (
    +                self.testdata["volume_write_path"][
    +                    new_virtual_machine.hypervisor][disk_type],
    +                self.testdata["data_write_paths"]["mount_dir"]
    +            ),
    +            ]
    +
    +    for c in cmds:
    +        self.debug("Command: %s" % c)
    +        result = ssh.execute(c)
    +        self.debug(result)
    +
    +    returned_data_0 = ssh.execute(
    +        "cat %s/%s/%s/%s" % (
    +            self.testdata["data_write_paths"]["mount_dir"],
    +            self.testdata["data_write_paths"]["sub_dir"],
    +            self.testdata["data_write_paths"]["sub_lvl_dir1"],
    +            self.testdata["data_write_paths"]["random_data"]
    +        ))
    +
    +    n = hashlib.md5()
    +    n.update(returned_data_0[0])
    +    ckecksum_returned_data_0 = n.hexdigest()
    +
    +    self.debug("returned_data_0: %s" % returned_data_0[0])
    +
    +    # Verify returned data
    +    self.assertEqual(
    +        original_checksum,
    +        ckecksum_returned_data_0,
    +        "Cheskum does not match with checksum of original data"
    +    )
    +
    +    # Unmount the Sec Storage
    +    cmds = [
    +        "umount %s" % (self.testdata["data_write_paths"]["mount_dir"]),
    +    ]
    +
    +    for c in cmds:
    +        self.debug("Command: %s" % c)
    +        ssh.execute(c)
    +
    +    if new_vm:
    +        new_virtual_machine.detach_volume(
    +            self.apiclient,
    +            disk
    +        )
    +
    +        new_virtual_machine.delete(self.apiclient)
    +
    +    return
    +
    +
    +class TestVolumeSnapshot(cloudstackTestCase):
    +
    +    @classmethod
    +    def setUpClass(cls):
    +        testClient = super(TestVolumeSnapshot, cls).getClsTestClient()
    +        cls.apiclient = testClient.getApiClient()
    +        cls.testdata = testClient.getParsedTestDataConfig()
    +        cls.hypervisor = cls.testClient.getHypervisorInfo()
    +
    +        # Get Zone, Domain and templates
    +        cls.domain = get_domain(cls.apiclient)
    +        cls.zone = get_zone(cls.apiclient, testClient.getZoneForTests())
    +
    +        cls.template = get_template(
    +            cls.apiclient,
    +            cls.zone.id,
    +            cls.testdata["ostype"])
    +
    +        cls._cleanup = []
    +
    +        if cls.hypervisor.lower() not in [
    +                "vmware",
    +                "kvm",
    +                "xenserver"]:
    +            raise unittest.SkipTest(
    +                "Storage migration not supported on %s" %
    +                cls.hypervisor)
    +
    +        try:
    +
    +            # Create an account
    +            cls.account = Account.create(
    +                cls.apiclient,
    +                cls.testdata["account"],
    +                domainid=cls.domain.id
    +            )
    +            cls._cleanup.append(cls.account)
    +
    +            # Create user api client of the account
    +            cls.userapiclient = testClient.getUserApiClient(
    +                UserName=cls.account.name,
    +                DomainName=cls.account.domain
    +            )
    +
    +            # Create Service offering
    +
    +            cls.service_offering_cluster1 = ServiceOffering.create(
    +                cls.apiclient,
    +                cls.testdata["service_offering"],
    +            )
    +            cls._cleanup.append(cls.service_offering_cluster1)
    +
    +            # Create Disk offering
    +            cls.disk_offering_cluster1 = DiskOffering.create(
    +                cls.apiclient,
    +                cls.testdata["disk_offering"],
    +            )
    +            cls._cleanup.append(cls.disk_offering_cluster1)
    +
    +        except Exception as e:
    +            cls.tearDownClass()
    +            raise e
    +        return
    +
    +    @classmethod
    +    def tearDownClass(cls):
    +        try:
    +            cleanup_resources(cls.apiclient, cls._cleanup)
    +        except Exception as e:
    +            raise Exception("Warning: Exception during cleanup : %s" % e)
    +
    +    def setUp(self):
    +        self.apiclient = self.testClient.getApiClient()
    +        self.dbclient = self.testClient.getDbConnection()
    +        self.cleanup = []
    +
    +    def tearDown(self):
    +        try:
    +            cleanup_resources(self.apiclient, self.cleanup)
    +        except Exception as e:
    +            raise Exception("Warning: Exception during cleanup : %s" % e)
    +        return
    +
    +    @attr(tags=["advanced", "basic"])
    +    def test_01_volume_snapshot(self):
    +        """ Test Volume (root) Snapshot
    +
    +        # 1. Deploy a VM on cluster wide primary storage.
    --- End diff --
    
    I don't think we need to create VM on CWPS. It can be created on any 
available storage. Please modify the comment.


---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at infrastruct...@apache.org or file a JIRA ticket
with INFRA.
---

Reply via email to