On Thu 20 Dec 2012 11:23:39 AM CST, Amos Kong wrote:
Enable demo mode of netperf, process mixed demo results to drop
unstable factors, only compute the effective demo results when
all the threads work. This would make performance result more
stable.
We start many clients and watch the demo output, if all the threads
really work, then start to record the interrupt/packet number, and
count the cpu usage. After a fixed test time, record the final
interrupt/packet number.
Signed-off-by: Amos Kong<[email protected]>
---
shared/cfg/subtests.cfg.sample | 2 +-
tests/netperf.py | 95 ++++++++++++++++++++++++++++-----------
2 files changed, 69 insertions(+), 28 deletions(-)
diff --git a/shared/cfg/subtests.cfg.sample b/shared/cfg/subtests.cfg.sample
index df30cdb..9b5bedd 100644
--- a/shared/cfg/subtests.cfg.sample
+++ b/shared/cfg/subtests.cfg.sample
@@ -2480,7 +2480,7 @@ variants:
# bridge_nic2 = switch
nic_model_nic2 = e1000
netperf_files = netperf-2.6.0.tar.bz2
- setup_cmd = "cd /tmp&& rm -rf netperf-2.6.0&& tar xvfj netperf-2.6.0.tar.bz2&& cd
netperf-2.6.0&& ./configure --enable-burst&& make"
+ setup_cmd = "cd /tmp&& rm -rf netperf-2.6.0&& tar xvfj netperf-2.6.0.tar.bz2&& cd
netperf-2.6.0&& ./configure --enable-burst --enable-demo=yes&& make"
# configure netperf test parameters
l = 60
protocols = "TCP_STREAM TCP_MAERTS TCP_RR"
diff --git a/tests/netperf.py b/tests/netperf.py
index cde8a39..68090fc 100644
--- a/tests/netperf.py
+++ b/tests/netperf.py
@@ -1,4 +1,4 @@
-import logging, os, commands, threading, re, glob, time
+import logging, os, commands, threading, re, glob, time, shutil
from autotest.client import utils
from autotest.client.shared import error
from virttest import utils_test, utils_misc, remote
@@ -247,18 +247,6 @@ def start_test(server, server_ctl, host, clients,
resultsdir, l=60,
@param server_cyg: shell session for cygwin in windows guest
"""
- def parse_file(file_prefix, raw=""):
- """ Parse result files and reture throughput total """
- thu = 0
- for filename in glob.glob("%s.*.nf" % file_prefix):
- o = commands.getoutput("cat %s |tail -n 1" % filename)
- try:
- thu += float(o.split()[raw])
- except Exception:
- logging.debug(commands.getoutput("cat %s.*" % file_prefix))
- return -1
- return thu
-
guest_ver_cmd = params.get("guest_ver_cmd", "uname -r")
fd = open("%s/netperf-result.%s.RHS" % (resultsdir, time.time()), "w")
@@ -307,19 +295,18 @@ def start_test(server, server_ctl, host, clients,
resultsdir, l=60,
for j in sessions_test:
if (protocol == "TCP_RR"):
ret = launch_client(j, server, server_ctl, host, clients,
l,
- "-t %s -v 0 -P -0 -- -r %s,%s" % (protocol, i, i),
+ "-t %s -v 1 -- -r %s,%s" % (protocol, i, i),
netserver_port, params, server_cyg)
- thu = parse_file("/tmp/netperf.%s" % ret['pid'], 0)
elif (protocol == "TCP_MAERTS"):
ret = launch_client(j, server, server_ctl, host, clients,
l,
"-C -c -t %s -- -m ,%s" % (protocol, i),
netserver_port, params, server_cyg)
- thu = parse_file("/tmp/netperf.%s" % ret['pid'], 4)
else:
ret = launch_client(j, server, server_ctl, host, clients,
l,
"-C -c -t %s -- -m %s" % (protocol, i),
netserver_port, params, server_cyg)
- thu = parse_file("/tmp/netperf.%s" % ret['pid'], 4)
+
+ thu = float(ret['thu'])
cpu = 100 - float(ret['mpstat'].split()[mpstat_index])
normal = thu / cpu
if ret.get('rx_pkts') and ret.get('irq_inj'):
@@ -353,7 +340,7 @@ def start_test(server, server_ctl, host, clients,
resultsdir, l=60,
fd.flush()
logging.debug("Remove temporary files")
- commands.getoutput("rm -f /tmp/netperf.%s.*.nf" % ret['pid'])
+ commands.getoutput("rm -f /tmp/netperf.%s.nf" % ret['pid'])
fd.close()
@@ -472,19 +459,51 @@ def launch_client(sessions, server, server_ctl, host,
clients, l, nf_args,
def netperf_thread(i, numa_enable, client_s):
cmd = ""
+ fname = "/tmp/netperf.%s.nf" % pid
if numa_enable:
output = ssh_cmd(client_s, "numactl --hardware")
n = int(re.findall("available: (\d+) nodes", output)[0]) - 1
cmd += "numactl --cpunodebind=%s --membind=%s " % (n, n)
- cmd += "%s -H %s -l %s %s" % (client_path, server, l, nf_args)
+ cmd += "%s -D 1 -H %s -l %s %s" % (client_path, server, int(l)*1.5,
nf_args)
+ cmd += ">> %s" % fname
+
+ ssh_cmd(client_s, cmd)
+ output = ssh_cmd(client_s, "cat %s" % fname)
+ if not os.path.exists(fname):
+ f = file(fname, "w")
+ f.write(output)
+ f.close()
+
+
+ def parse_demo_result(fname, sessions):
+ """
+ Process the demo result, remove the noise from head,
+ and compute the final throughout.
+
+ @param fname: result file name
+ @param sessions: sessions' number
+ """
+ fd = open(fname)
+ lines = fd.readlines()
+ fd.close()
+
+ for i in range(1, len(lines)+1):
+ if "AF_INET" in lines[-i]:
+ break
+ nresult = i - 1
+ if nresult< int(sessions):
+ raise error.TestError("We couldn't expect this parallism,"
+ "expect %s get %s" % (sessions, nresult))
+
+ niteration = nresult / sessions
+ result = 0.0
+ for this in lines[-sessions * niteration:]:
+ result += float(re.findall("Interim result: *(\S+)", this)[0])
It seem's that result not like you except, error:
Traceback (most recent call last):
File "/usr/local/autotest-devel/client/shared/test.py", line 852, in
_call_test_function
return func(*args, **dargs)
File "/usr/local/autotest-devel/client/shared/test.py", line 304, in
execute
postprocess_profiled_run, args, dargs)
File "/usr/local/autotest-devel/client/shared/test.py", line 221, in
_call_run_once
*args, **dargs)
File "/usr/local/autotest-devel/client/shared/test.py", line 327, in
run_once_profiling
self.run_once(*args, **dargs)
File "/usr/local/autotest-devel/client/virt/virt_test.py", line 101,
in run_once
run_func(self, params, env)
File "/usr/local/autotest-devel/client/shared/error.py", line 138, in
new_fn
return fn(*args, **kwargs)
File "/usr/local/autotest-devel/client/virt/tests/netperf.py", line
215, in run_netperf
params=params, server_cyg=server_cyg, test=test)
File "/usr/local/autotest-devel/client/shared/error.py", line 138, in
new_fn
return fn(*args, **kwargs)
File "/usr/local/autotest-devel/client/virt/tests/netperf.py", line
314, in start_test
netserver_port, params, server_cyg)
File "/usr/local/autotest-devel/client/shared/error.py", line 138, in
new_fn
return fn(*args, **kwargs)
File "/usr/local/autotest-devel/client/virt/tests/netperf.py", line
557, in launch_client
ret['thu'] = parse_demo_result(fname, int(sessions))
File "/usr/local/autotest-devel/client/virt/tests/netperf.py", line
500, in parse_demo_result
print float(re.findall("Interim result: *(\S+)", this)[0])
IndexError: list index out of range
+ result = result / niteration
+ logging.debug("niteration: %s" % niteration)
+ return result
- output = ssh_cmd(client_s, cmd)
- f = file("/tmp/netperf.%s.%s.nf" % (pid, i), "w")
- f.write(output)
- f.close()
- if get_status_flag:
- start_state = get_state()
pid = str(os.getpid())
threads = []
numa_enable = params.get("netperf_with_numa", "yes") == "yes"
@@ -496,10 +515,25 @@ def launch_client(sessions, server, server_ctl, host,
clients, l, nf_args,
t.start()
ret = {}
ret['pid'] = pid
+ fname = "/tmp/netperf.%s.nf" % pid
+ while True:
+ try:
+ fd = open(fname)
+ content = "".join(fd.readlines())
+ fd.close()
+ except IOError:
+ content = ""
+ if int(sessions) == len(re.findall("MIGRATE", content)):
+ logging.debug("All netperf clients start to work.")
+ break
+
+ # real& effective test starts
+ if get_status_flag:
+ start_state = get_state()
ret['mpstat'] = ssh_cmd(host, "mpstat 1 %d |tail -n 1" % (l - 1))
- for t in threads:
- t.join()
+ shutil.copy(fname, "/tmp/finished_result")
+ # real& effective test ends
if get_status_flag:
end_state = get_state()
if len(start_state) != len(end_state):
@@ -511,4 +545,11 @@ def launch_client(sessions, server, server_ctl, host,
clients, l, nf_args,
for i in range(len(end_state) / 2):
ret[end_state[i * 2]] = (end_state[i * 2 + 1]
- start_state[i * 2 + 1])
+ # wait all the threads stop
+ for t in threads:
+ t.join()
+
+ # recover result file to remove the noise from end
+ shutil.copy("/tmp/finished_result", fname)
+ ret['thu'] = parse_demo_result(fname, int(sessions))
return ret
_______________________________________________
Autotest-kernel mailing list
[email protected]
https://www.redhat.com/mailman/listinfo/autotest-kernel