Tests the cpuset.cpus cgroup feature. It stresses all VM's CPUs
and changes the CPU affinity. Verifies correct behaviour.

* Add TestCpusetCpus test
* import cleanup
* private function names cleanup
---
 client/tests/cgroup/cgroup_common.py |    2 +
 client/tests/kvm/tests/cgroup.py     |  211 +++++++++++++++++++++++++++++++---
 2 files changed, 194 insertions(+), 19 deletions(-)

diff --git a/client/tests/cgroup/cgroup_common.py 
b/client/tests/cgroup/cgroup_common.py
index 186bf09..fe1601b 100755
--- a/client/tests/cgroup/cgroup_common.py
+++ b/client/tests/cgroup/cgroup_common.py
@@ -152,6 +152,8 @@ class Cgroup(object):
         """
         if pwd == None:
             pwd = self.root
+        if isinstance(pwd, int):
+            pwd = self.cgroups[pwd]
         try:
             # Remove tailing '\n' from each line
             ret = [_[:-1] for _ in open(pwd+prop, 'r').readlines()]
diff --git a/client/tests/kvm/tests/cgroup.py b/client/tests/kvm/tests/cgroup.py
index ee6ef2e..23ae622 100644
--- a/client/tests/kvm/tests/cgroup.py
+++ b/client/tests/kvm/tests/cgroup.py
@@ -7,8 +7,8 @@ import logging, os, re, sys, tempfile, time
 from random import random
 from autotest_lib.client.common_lib import error
 from autotest_lib.client.bin import utils
-from autotest_lib.client.tests.cgroup.cgroup_common import Cgroup, 
CgroupModules
-from autotest_lib.client.virt import virt_utils, virt_env_process
+from autotest_lib.client.tests.cgroup.cgroup_common import (Cgroup,
+                                            CgroupModules, get_load_per_cpu)
 from autotest_lib.client.virt.aexpect import ExpectTimeoutError
 from autotest_lib.client.virt.aexpect import ExpectProcessTerminatedError
 
@@ -839,7 +839,7 @@ def run_cgroup(test, params, env):
              * Freezes the guest and thaws it again couple of times
              * verifies that guest is frozen and runs when expected
             """
-            def get_stat(pid):
+            def _get_stat(pid):
                 """
                 Gather statistics of pid+1st level subprocesses cpu usage
                 @param pid: PID of the desired process
@@ -877,9 +877,9 @@ def run_cgroup(test, params, env):
                 _ = cgroup.get_property('freezer.state', cgroup.cgroups[0])
                 if 'FROZEN' not in _:
                     raise error.TestFail("Couldn't freeze the VM: state %s" % 
_)
-                stat_ = get_stat(pid)
+                stat_ = _get_stat(pid)
                 time.sleep(tsttime)
-                stat = get_stat(pid)
+                stat = _get_stat(pid)
                 if stat != stat_:
                     raise error.TestFail('Process was running in FROZEN state; 
'
                                          'stat=%s, stat_=%s, diff=%s' %
@@ -887,9 +887,9 @@ def run_cgroup(test, params, env):
                 logging.info("THAWING (%ss)", tsttime)
                 self.cgroup.set_property('freezer.state', 'THAWED',
                                          self.cgroup.cgroups[0])
-                stat_ = get_stat(pid)
+                stat_ = _get_stat(pid)
                 time.sleep(tsttime)
-                stat = get_stat(pid)
+                stat = _get_stat(pid)
                 if (stat - stat_) < (90*tsttime):
                     raise error.TestFail('Process was not active in FROZEN'
                                          'state; stat=%s, stat_=%s, diff=%s' %
@@ -1186,7 +1186,7 @@ def run_cgroup(test, params, env):
             Let each of 3 scenerios (described in test specification) stabilize
             and then measure the CPU utilisation for time_test time.
             """
-            def get_stat(f_stats, _stats=None):
+            def _get_stat(f_stats, _stats=None):
                 """ Reads CPU times from f_stats[] files and sumarize them. """
                 if _stats is None:
                     _stats = []
@@ -1218,27 +1218,27 @@ def run_cgroup(test, params, env):
             for thread_count in range(0, host_cpus):
                 sessions[thread_count].sendline(cmd)
             time.sleep(time_init)
-            _stats = get_stat(f_stats)
+            _stats = _get_stat(f_stats)
             time.sleep(time_test)
-            stats.append(get_stat(f_stats, _stats))
+            stats.append(_get_stat(f_stats, _stats))
 
             thread_count += 1
             sessions[thread_count].sendline(cmd)
             if host_cpus % no_speeds == 0 and no_speeds <= host_cpus:
                 time.sleep(time_init)
-                _stats = get_stat(f_stats)
+                _stats = _get_stat(f_stats)
                 time.sleep(time_test)
-                stats.append(get_stat(f_stats, _stats))
+                stats.append(_get_stat(f_stats, _stats))
 
             for i in range(thread_count+1, no_threads):
                 sessions[i].sendline(cmd)
             time.sleep(time_init)
-            _stats = get_stat(f_stats)
+            _stats = _get_stat(f_stats)
             for j in range(3):
-                __stats = get_stat(f_stats)
+                __stats = _get_stat(f_stats)
                 time.sleep(time_test)
-                stats.append(get_stat(f_stats, __stats))
-            stats.append(get_stat(f_stats, _stats))
+                stats.append(_get_stat(f_stats, __stats))
+            stats.append(_get_stat(f_stats, _stats))
 
             # Verify results
             err = ""
@@ -1292,7 +1292,7 @@ def run_cgroup(test, params, env):
         """
         1:10 variant of _TestCpuShare test.
         """
-        def __init__(self, vms, module):
+        def __init__(self, vms, modules):
             """
             Initialization
             @param vms: list of vms
@@ -1306,7 +1306,7 @@ def run_cgroup(test, params, env):
         """
         1:1 variant of _TestCpuShare test.
         """
-        def __init__(self, vms, module):
+        def __init__(self, vms, modules):
             """
             Initialization
             @param vms: list of vms
@@ -1316,6 +1316,177 @@ def run_cgroup(test, params, env):
             self.speeds = [100000, 100000]
 
 
+    class TestCpusetCpus:
+        """
+        Tests the cpuset.cpus cgroup feature. It stresses all VM's CPUs
+        and changes the CPU affinity. Verifies correct behaviour.
+        """
+        def __init__(self, vms, modules):
+            """
+            Initialization
+            @param vms: list of vms
+            @param modules: initialized cgroup module class
+            """
+            self.vm = vms[0]      # Virt machines
+            self.modules = modules          # cgroup module handler
+            self.cgroup = Cgroup('cpuset', '')   # cgroup handler
+            self.sessions = []
+
+
+        def cleanup(self):
+            """ Cleanup """
+            err = ""
+            try:
+                del(self.cgroup)
+            except Exception, failure_detail:
+                err += "\nCan't remove Cgroup: %s" % failure_detail
+
+            self.sessions[0].sendline('rm -f /tmp/cgroup-cpu-lock')
+            for i in range(len(self.sessions)):
+                try:
+                    self.sessions[i].close()
+                except Exception, failure_detail:
+                    err += ("\nCan't close the %dst ssh connection" % i)
+
+            if err:
+                logging.error("Some cleanup operations failed: %s", err)
+                raise error.TestError("Some cleanup operations failed: %s" %
+                                      err)
+
+
+        def init(self):
+            """
+            Prepares cgroup, moves VM into it and execute stressers.
+            """
+            self.cgroup.initialize(self.modules)
+            # We need only machine with more than 1 CPU
+            all_cpus = self.cgroup.get_property("cpuset.cpus")[0]
+            all_mems = self.cgroup.get_property("cpuset.mems")[0]
+            vm_cpus = int(params.get('smp', 1)) # cpus per VM
+            if all_cpus == "0" or vm_cpus < 2:
+                raise error.TestFail("This test needs at least 2 CPUs on "
+                                     "host and the first guest")
+            try:
+                no_cpus = int(all_cpus.split('-')[1]) + 1
+            except Exception:
+                raise error.TestFail("Failed to get #CPU from root cgroup.")
+            self.cgroup.mk_cgroup()
+            self.cgroup.set_property('cpuset.cpus', all_cpus, 0)
+            self.cgroup.set_property('cpuset.mems', all_mems, 0)
+            assign_vm_into_cgroup(self.vm, self.cgroup, 0)
+
+            cmd = "renice -n 10 $$; " # new ssh login should pass
+            cmd += "while [ -e /tmp/cgroup-cpu-lock ]; do :; done"
+            for i in range(vm_cpus):
+                self.sessions.append(self.vm.wait_for_login(timeout=30))
+                self.sessions[i].cmd("touch /tmp/cgroup-cpu-lock")
+                self.sessions[i].sendline(cmd)
+
+
+        def run(self):
+            """
+            Actual test; stress VM and verifies the impact on host.
+            """
+            def _test_it(tst_time):
+                """ Helper; gets stat differences during test_time period. """
+                _load = get_load_per_cpu()
+                time.sleep(tst_time)
+                return (get_load_per_cpu(_load)[1:])
+
+            tst_time = 1    # 1s
+            vm_cpus = int(params.get('smp', 1)) # cpus per VM
+            all_cpus = self.cgroup.get_property("cpuset.cpus")[0]
+            no_cpus = int(all_cpus.split('-')[1]) + 1
+            stats = []
+
+            # Comments are for vm_cpus=2, no_cpus=4, _SC_CLK_TCK=100
+            # All CPUs are used, utilisation should be maximal
+            # CPUs: oooo, Stat: 200
+            cpus = False
+            stats.append((_test_it(tst_time), cpus))
+
+            if no_cpus > vm_cpus:
+                # CPUs: OO__, Stat: 200
+                cpus = '0-%d' % (vm_cpus-1)
+                self.cgroup.set_property('cpuset.cpus', cpus, 0)
+                stats.append((_test_it(tst_time), cpus))
+
+            if no_cpus > vm_cpus:
+                # CPUs: __OO, Stat: 200
+                cpus = "%d-%d" % (no_cpus-vm_cpus-1, no_cpus-1)
+                self.cgroup.set_property('cpuset.cpus', cpus, 0)
+                stats.append((_test_it(tst_time), cpus))
+
+            # CPUs: O___, Stat: 100
+            cpus = "0"
+            self.cgroup.set_property('cpuset.cpus', cpus, 0)
+            stats.append((_test_it(tst_time), cpus))
+
+            # CPUs: _OO_, Stat: 200
+            if no_cpus == 2:
+                cpus = "1"
+            else:
+                cpus = "1-%d" % min(no_cpus, vm_cpus-1)
+            self.cgroup.set_property('cpuset.cpus', cpus, 0)
+            stats.append((_test_it(tst_time), cpus))
+
+            # CPUs: O_O_, Stat: 200
+            cpus = "0"
+            for i in range(2, min(vm_cpus*2, no_cpus)):
+                cpus += ",%d" % i*2
+            self.cgroup.set_property('cpuset.cpus', cpus, 0)
+            stats.append((_test_it(tst_time), cpus))
+
+            # CPUs: oooo, Stat: 200
+            cpus = False
+            self.cgroup.set_property('cpuset.cpus', all_cpus, 0)
+            stats.append((_test_it(tst_time), cpus))
+
+            err = ""
+
+            max_cpu_stat = os.sysconf(os.sysconf_names['SC_CLK_TCK'])
+            max_stat = max_cpu_stat * vm_cpus
+            for i in range(len(stats)):
+                if stats[i][1] is False:
+                    dist = distance(sum(stats[i][0]), max_stat)
+                    if dist > 0.25:
+                        err += "%d, " % i
+                        logging.error("%s part; incorrect utilisation, 
dist=%s,"
+                                      "%s", i, dist. stats[i])
+                    continue    # No restrictions, don't check per_cpu_load
+
+                if stats[i][1].count('-') == 1:
+                    # cpus defined by range
+                    cpus = []
+                    _ = stats[i][1].split('-')
+                    for cpu in range(_[0], _[1] + 1):
+                        cpus.append(cpu)
+                else:
+                    # cpus defined by ',' separated list
+                    cpus = [int(_) for _ in stats[i][1].split(',')]
+
+                for cpu in range(no_cpus):
+                    dist = distance(stats[i][0][cpu], max_cpu_stat)
+                    if cpu in cpus:
+                        if dist > 0.2:
+                            err += "%d, " % cpu
+                            logging.error("%s part; per_cpu_load failed; dist="
+                                          "%s, cpu=%s, stat=%s", i, dist, cpu,
+                                          stats[i])
+                    else:
+                        if dist < 0.75: # this CPU serves other processes
+                            err += "%d, " % i
+                            logging.error("%s part; per_cpu_load failed; 
1-dist"
+                                          "=%s, cpu=%s, stat=%s", i, 1-dist,
+                                          cpu, stats[i])
+
+            if err:
+                err = "[%s] parts worked incorrectly, check the log" % err[:-2]
+                logging.error(err)
+                raise error.TestFail(err)
+
+            return ("All clear")
+
     # Setup
     # TODO: Add all new tests here
     tests = {"blkio_bandwidth_weigth_read"  : TestBlkioBandwidthWeigthRead,
@@ -1330,9 +1501,11 @@ def run_cgroup(test, params, env):
              "memory_limit"                 : TestMemoryLimit,
              "cpu_share_10"                 : TestCpuShare10,
              "cpu_share_50"                 : TestCpuShare50,
+             "cpuset_cpus"                  : TestCpusetCpus,
             }
     modules = CgroupModules()
-    if (modules.init(['blkio', 'cpu', 'devices', 'freezer', 'memory']) <= 0):
+    if (modules.init(['blkio', 'cpu', 'cpuset', 'devices', 'freezer',
+                      'memory']) <= 0):
         raise error.TestFail('Can\'t mount any cgroup modules')
     # Add all vms
     vms = []
-- 
1.7.7.3

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to