This test would setup an lvm over two images and then format the lvm and
finally check the fs using fsck.

Signed-off-by: Yolkfull Chow

Remove the progress of filling up.
Add a params of clean which could prevent the umount and volume removing
command and let this case usd by the following benchmark or stress test.
Add the dbench into the lvm tests.

Signed-off-by: Jason Wang <[email protected]>

This test depends on fillup_disk test and ioquit test.
Signed-off-by: Qingtang Zhou <[email protected]>
---
 client/tests/kvm/tests_base.cfg.sample |   36 ++++++++++++++
 client/virt/tests/lvm.py               |   83 ++++++++++++++++++++++++++++++++
 2 files changed, 119 insertions(+), 0 deletions(-)
 create mode 100644 client/virt/tests/lvm.py

diff --git a/client/tests/kvm/tests_base.cfg.sample 
b/client/tests/kvm/tests_base.cfg.sample
index 0d72d29..af08398 100644
--- a/client/tests/kvm/tests_base.cfg.sample
+++ b/client/tests/kvm/tests_base.cfg.sample
@@ -875,6 +875,34 @@ variants:
         fillup_cmd = "dd if=/dev/zero of=/%s/fillup.%d bs=%dM count=1 
oflag=direct"
         kill_vm = yes
 
+    - lvm:
+        only Linux
+        images += ' stg1 stg2'
+        image_name_stg1 = 'storage_4k'
+        image_name_stg2 = 'storage_64k'
+        image_format_stg1 = qcow2
+        image_format_stg2 = qcow2
+        guest_testdir = /mnt
+        disks = "/dev/sdb /dev/sdc"
+        kill_vm = no
+        post_command_noncritical = no
+        variants:
+            lvm_create:
+                type = lvm
+                clean = no
+                pre_command = 'qemu-img create -f qcow2 -o cluster_size=4096 
/tmp/kvm_autotest_root/images/storage_4k.qcow2 1G && qemu-img create -f qcow2 
-o cluster_size=65536 /tmp/kvm_autotest_root/images/storage_64k.qcow2 1G'
+            lvm_fill: lvm_create
+                type = fillup_disk
+                fillup_timeout = 120
+                fillup_size = 20
+                fillup_cmd = "dd if=/dev/zero of=/%s/fillup.%d bs=%dM count=1 
oflag=direct"
+            lvm_ioquit: lvm_create
+                type = ioquit
+                kill_vm = yes
+                background_cmd = "for i in 1 2 3 4; do (dd if=/dev/urandom 
of=/tmp/file bs=102400 count=10000000 &); done"
+                check_cmd = pgrep dd
+                check_img = yes
+
     - ioquit:
         only Linux
         type = ioquit
@@ -1652,6 +1680,8 @@ variants:
                             md5sum_1m_cd1 = 127081cbed825d7232331a2083975528
                         fillup_disk:
                             fillup_cmd = "dd if=/dev/zero of=/%s/fillup.%d 
bs=%dM count=1"
+                        lvm.lvm_fill:
+                            fillup_cmd = "dd if=/dev/zero of=/%s/fillup.%d 
bs=%dM count=1"
 
                     - 4.7.x86_64:
                         no setup autotest
@@ -1673,6 +1703,8 @@ variants:
                             md5sum_1m_cd1 = 58fa63eaee68e269f4cb1d2edf479792
                         fillup_disk:
                             fillup_cmd = "dd if=/dev/zero of=/%s/fillup.%d 
bs=%dM count=1"
+                        lvm.lvm_fill:
+                            fillup_cmd = "dd if=/dev/zero of=/%s/fillup.%d 
bs=%dM count=1"
 
                     - 4.8.i386:
                         no setup autotest
@@ -1692,6 +1724,8 @@ variants:
                             sys_path = "/sys/class/net/%s/driver"
                         fillup_disk:
                             fillup_cmd = "dd if=/dev/zero of=/%s/fillup.%d 
bs=%dM count=1"
+                        lvm.lvm_fill:
+                            fillup_cmd = "dd if=/dev/zero of=/%s/fillup.%d 
bs=%dM count=1"
 
 
                     - 4.8.x86_64:
@@ -1712,6 +1746,8 @@ variants:
                             sys_path = "/sys/class/net/%s/driver"
                         fillup_disk:
                             fillup_cmd = "dd if=/dev/zero of=/%s/fillup.%d 
bs=%dM count=1"
+                        lvm.lvm_fill:
+                            fillup_cmd = "dd if=/dev/zero of=/%s/fillup.%d 
bs=%dM count=1"
 
 
                     - 5.3.i386:
diff --git a/client/virt/tests/lvm.py b/client/virt/tests/lvm.py
new file mode 100644
index 0000000..4a798ba
--- /dev/null
+++ b/client/virt/tests/lvm.py
@@ -0,0 +1,83 @@
+import logging
+from autotest_lib.client.common_lib import error
+
+
+def run_lvm(test, params, env):
+    """
+    KVM reboot test:
+    1) Log into a guest
+    2) Create a volume group and add both disks as pv to the Group
+    3) Create a logical volume on the VG
+    5) `fsck' to check the partition that LV locates
+
+    @param test: kvm test object
+    @param params: Dictionary with the test parameters
+    @param env: Dictionary with test environment.
+    """
+    vm = env.get_vm(params["main_vm"])
+    vm.verify_alive()
+    timeout = int(params.get("login_timeout", 360))
+    session = vm.wait_for_login(timeout=timeout)
+
+    vg_name = "volgrp_kvm_test"
+    lv_name = "lv_kvm_test"
+    lv_path = "/dev/%s/%s" % (vg_name, lv_name)
+    disks = params.get("disks", "/dev/hdb /dev/hdc")
+    clean = params.get("clean", "yes")
+    timeout = params.get("lvm_timeout", "600")
+
+    try:
+        s, o = session.cmd_status_output("pvcreate %s" % disks)
+        if s != 0:
+            raise error.TestFail("Create physical volume failed: %s" % o)
+        logging.info("Physical volumes created")
+
+        s, o = session.cmd_status_output("vgcreate %s %s" %
+                                                 (vg_name, disks))
+        if s != 0:
+            raise error.TestFail("Create volume group failed: %s" % o)
+        logging.info("Volume group %s created" % vg_name)
+
+        # Activate the volume group
+        s, o = session.cmd_status_output("vgchange -ay %s" % vg_name)
+        if s != 0:
+            raise error.TestFail("Activate volume group failed: %s" % o)
+
+        # Create a logical volume on this VG
+        s, o = session.cmd_status_output("lvcreate -L2000 -n %s %s" %
+                                                 (lv_name, vg_name))
+        if s != 0:
+            raise error.TestFail("Create volume failed: %s" % o)
+
+        # Create file system on the logical volume
+        s, o = session.cmd_status_output("yes|mkfs.ext3 %s" % lv_path,
+                                                 timeout = int(timeout))
+        if s != 0:
+            raise error.TestFail("Failed to create file system on lv: %s" % o)
+
+        s, o = session.cmd_status_output("mount %s /mnt" % lv_path)
+        if s != 0:
+            raise error.TestFail("Failed to mount the logical volume: %s" % o)
+
+        s, o = session.cmd_status_output("umount %s" % lv_path)
+        if s != 0 :
+            logging.error(o)
+            raise error.TestFail("Failed to umount the lv %s" % lv_path)
+
+        s, o = session.cmd_status_output("fsck %s" % lv_path,
+                                                 timeout = int(timeout))
+        if s != 0:
+            logging.error(o)
+            raise error.TestFail("Error occurred while fsck: %s" % o)
+
+        # need remount it for the following test
+        if clean == "no":
+            s, o = session.cmd_status_output("mount %s /mnt" % lv_path)
+            if s != 0:
+                raise error.TestFail("Fail to mount the logical volume: %s" % 
o)
+            logging.info(session.cmd_output("mount"))
+
+    finally:
+        if clean == "yes":
+            cmd = "umount /mnt;lvremove %s;vgchange -a n %s;vgremove -f %s"
+            session.cmd_status(cmd % (lv_name, vg_name, vg_name))
-- 
1.7.4.1

_______________________________________________
Autotest mailing list
[email protected]
http://test.kernel.org/cgi-bin/mailman/listinfo/autotest

Reply via email to