* [RFC PATCH 0/4] Network performance regression @ 2011-12-23 10:28 Amos Kong 2011-12-23 10:28 ` [RFC PATCH 1/4] virt-test: add NTttcp subtests Amos Kong ` (5 more replies) 0 siblings, 6 replies; 14+ messages in thread From: Amos Kong @ 2011-12-23 10:28 UTC (permalink / raw) To: lmr, wquan, kvm, jasowang, rhod, autotest This patchset adds a new network perf testcase for Windows, refactors old netperf test, and support numa resource control. Process the raw results to a standard format at the end of test, then we can compute average and compare with old results. Welcome to give feedback, thanks in advance! --- Amos Kong (4): virt-test: add NTttcp subtests virt-test: Refactor netperf test and add analysis module netperf: pin guest vcpus/memory/vhost thread to numa node virt: Introduce regression testing infrastructure client/tests/kvm/control | 7 + client/tests/kvm/perf.conf | 23 +++ client/tests/kvm/subtests.cfg.sample | 57 +++++- client/virt/tests/analyzer.py | 224 ++++++++++++++++++++++++ client/virt/tests/netperf.py | 312 ++++++++++++++++++++++++---------- client/virt/tests/ntttcp.py | 160 +++++++++++++++++ client/virt/tests/regression.py | 33 ++++ 7 files changed, 718 insertions(+), 98 deletions(-) create mode 100644 client/tests/kvm/perf.conf create mode 100644 client/virt/tests/analyzer.py create mode 100644 client/virt/tests/ntttcp.py create mode 100644 client/virt/tests/regression.py -- Amos Kong ^ permalink raw reply [flat|nested] 14+ messages in thread
* [RFC PATCH 1/4] virt-test: add NTttcp subtests 2011-12-23 10:28 [RFC PATCH 0/4] Network performance regression Amos Kong @ 2011-12-23 10:28 ` Amos Kong 2011-12-23 10:28 ` [RFC PATCH 2/4] virt-test: Refactor netperf test and add analysis module Amos Kong ` (4 subsequent siblings) 5 siblings, 0 replies; 14+ messages in thread From: Amos Kong @ 2011-12-23 10:28 UTC (permalink / raw) To: lmr, wquan, kvm, jasowang, rhod, autotest This case will test tcp throughput between 2 windows guests, or between 1 guest and 1 external Windows host. When test between guest and external Windows host, 'receiver_address' should be set to external Windows' ip address. Need extract NTttcp.tar.gz[1] to the root dir of winutils.iso [1] http://amos-kong.rhcloud.com/pub/NTttcp.tar.gz NTttcp/ NTttcp/NT Testing TCP Tool.msi NTttcp/ntttcp.au3 This test will generate result files with standard format, raw_output_1.RHS: buf(k)| throughput(Mbit/s) 2| 109.548 4| 209.519 8| 399.576 We can compare it by a common method. Signed-off-by: Qingtang Zhou <qzhou@redhat.com> Signed-off-by: Amos Kong <akong@redhat.com> --- client/tests/kvm/subtests.cfg.sample | 21 ++++ client/virt/tests/ntttcp.py | 160 ++++++++++++++++++++++++++++++++++ 2 files changed, 181 insertions(+), 0 deletions(-) create mode 100644 client/virt/tests/ntttcp.py diff --git a/client/tests/kvm/subtests.cfg.sample b/client/tests/kvm/subtests.cfg.sample index 3d47fb4..a05aee8 100644 --- a/client/tests/kvm/subtests.cfg.sample +++ b/client/tests/kvm/subtests.cfg.sample @@ -966,6 +966,27 @@ variants: netperf_cmd = %s/netperf-2.4.5/src/netperf -t %s -H %s -l 60 -- -r %s protocols = "TCP_RR TCP_CRR UDP_RR" + - ntttcp: + type = ntttcp + image_snapshot = yes + check_ntttcp_cmd = "cmd /c dir C:\NTttcp" + ntttcp_sender_cmd = "cmd /c C:\NTttcp\NTttcps.exe -m %s,0,%s -a 2 -l %s" + ntttcp_receiver_cmd = "cmd /c C:\NTttcp\NTttcpr.exe -m %s,0,%s -a 6 -rb 256k" + session_num = 1 + buffers = "2k 4k 8k 16k 32k 64k 128k 256k" + timeout = 300 + kill_vm = yes + variants: + - guest_guest: + vms += " vm2" + - guest_host: + # external Windows system IP, NTttcp need to be installed firstly. + receiver_address = "192.168.1.1" + 32: + ntttcp_install_cmd = 'cmd /c "D:\autoit3.exe D:\NTttcp\NTttcp.au3 && mkdir C:\NTttcp && copy "C:\Program Files\Microsoft Corporation\NT Testing TCP Tool\*" C:\NTttcp && cd C:\NTttcp\ && copy NTttcp_%s.exe NTttcps.exe && copy NTttcp_%s.exe NTttcpr.exe"' + 64: + ntttcp_install_cmd = 'cmd /c "D:\autoit3.exe D:\NTttcp\NTttcp.au3 && mkdir C:\NTttcp && copy "C:\Program Files (x86)\Microsoft Corporation\NT Testing TCP Tool\*" C:\NTttcp && cd C:\NTttcp\ && copy NTttcp_%s.exe NTttcps.exe && copy NTttcp_%s.exe NTttcpr.exe"' + - ethtool: install setup image_copy unattended_install.cdrom only Linux type = ethtool diff --git a/client/virt/tests/ntttcp.py b/client/virt/tests/ntttcp.py new file mode 100644 index 0000000..4a1f7b0 --- /dev/null +++ b/client/virt/tests/ntttcp.py @@ -0,0 +1,160 @@ +import logging, os, glob, re +from autotest_lib.client.common_lib import error +from autotest_lib.client.virt import virt_utils + +_receiver_ready = False + +def run_ntttcp(test, params, env): + """ + Run NTttcp on Windows guest + + 1) Install NTttcp in server/client side by Autoit + 2) Start NTttcp in server/client side + 3) Get test results + + @param test: kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environment. + """ + login_timeout = int(params.get("login_timeout", 360)) + timeout = int(params.get("timeout")) + results_path = os.path.join(test.resultsdir, + 'raw_output_%s' % test.iteration) + if params.get("platform") == "64": + platform = "x64" + else: + platform = "x86" + buffers = params.get("buffers").split() + session_num = params.get("session_num") + + vm_sender = env.get_vm(params["main_vm"]) + vm_sender.verify_alive() + vm_receiver = None + receiver_addr = params.get("receiver_address") + if not receiver_addr: + vm_receiver = env.get_vm("vm2") + vm_receiver.verify_alive() + try: + sess = None + sess = vm_receiver.wait_for_login(timeout=login_timeout) + receiver_addr = vm_receiver.get_address() + if not receiver_addr: + raise error.TestError("Can't get receiver(%s) ip address" % + vm_sender.name) + finally: + if sess: + sess.close() + + def install_ntttcp(session): + """ Install ntttcp through a remote session """ + logging.info("Installing NTttcp ...") + if session.cmd_status(params.get("check_ntttcp_cmd")) == 0: + # Don't install ntttcp if it's already installed + logging.info("NTttcp directory already exists") + return + ntttcp_install_cmd = params.get("ntttcp_install_cmd") + ret, output = session.cmd_status_output(ntttcp_install_cmd % + (platform, platform), timeout=200) + if ret != 0: + logging.error(output) + raise error.TestError("Can't install NTttcp on guest") + + def receiver(): + """ Receive side """ + logging.info("Starting receiver process on %s", receiver_addr) + if vm_receiver: + session = vm_receiver.wait_for_login(timeout=login_timeout) + else: + username = params.get("username", "") + password = params.get("password", "") + prompt = params.get("shell_prompt", "[\#\$]") + linesep = eval("'%s'" % params.get("shell_linesep", r"\n")) + client = params.get("shell_client") + port = int(params.get("shell_port")) + log_filename = ("session-%s-%s.log" % (receiver_addr, + virt_utils.generate_random_string(4))) + session = virt_utils.remote_login(client, receiver_addr, port, + username, password, prompt, + linesep, log_filename, timeout) + install_ntttcp(session) + ntttcp_receiver_cmd = params.get("ntttcp_receiver_cmd") + global _receiver_ready + f = open(results_path + ".receiver", 'a') + for b in buffers: + _receiver_ready = True + cmd = ntttcp_receiver_cmd % (session_num, receiver_addr) + r = session.cmd_output(cmd, timeout=timeout, + print_func=logging.debug) + _receiver_ready = False + f.write("Send buffer size: %s\n%s\n%s" % (b, cmd, r)) + f.close() + session.close() + + def _wait(): + """ Check if receiver is ready """ + global _receiver_ready + if _receiver_ready: + return _receiver_ready + return None + + def sender(): + """ Send side """ + logging.info("Sarting sender process ...") + session = vm_sender.wait_for_login(timeout=login_timeout) + install_ntttcp(session) + ntttcp_sender_cmd = params.get("ntttcp_sender_cmd") + f = open(results_path + ".sender", 'a') + try: + for b in buffers: + cmd = ntttcp_sender_cmd % (session_num, receiver_addr, b) + # Wait until receiver ready + virt_utils.wait_for(_wait, timeout) + r = session.cmd_output(cmd, timeout=timeout, + print_func=logging.debug) + f.write("Send buffer size: %s\n%s\n%s" % (b, cmd, r)) + finally: + f.close() + session.close() + + def parse_file(resultfile): + """ Parse raw result files and generate files with standard format """ + file = open(resultfile, "r") + list= [] + found = False + for line in file.readlines(): + o = re.findall("Send buffer size: (\d+)", line) + if o: + buffer = o[0] + if "Total Throughput(Mbit/s)" in line: + found = True + if found: + fields = line.split() + if len(fields) == 0: + continue + try: + [float(i) for i in fields] + list.append([buffer, fields[-1]]) + except ValueError: + continue + found = False + return list + + try: + bg = virt_utils.Thread(receiver, ()) + bg.start() + if bg.is_alive(): + sender() + bg.join(suppress_exception=True) + else: + raise error.TestError("Can't start backgroud receiver thread") + finally: + for i in glob.glob("%s.receiver" % results_path): + f = open("%s.RHS" % results_path, "w") + raw = " buf(k)| throughput(Mbit/s)" + logging.info(raw) + f.write(raw + "\n") + for j in parse_file(i): + raw = "%8s| %8s" % (j[0], j[1]) + logging.info(raw) + f.write(raw + "\n") + f.close() ^ permalink raw reply related [flat|nested] 14+ messages in thread
* [RFC PATCH 2/4] virt-test: Refactor netperf test and add analysis module 2011-12-23 10:28 [RFC PATCH 0/4] Network performance regression Amos Kong 2011-12-23 10:28 ` [RFC PATCH 1/4] virt-test: add NTttcp subtests Amos Kong @ 2011-12-23 10:28 ` Amos Kong 2011-12-23 10:28 ` [RFC PATCH 3/4] netperf: pin guest vcpus/memory/vhost thread to numa node Amos Kong ` (3 subsequent siblings) 5 siblings, 0 replies; 14+ messages in thread From: Amos Kong @ 2011-12-23 10:28 UTC (permalink / raw) To: lmr, wquan, kvm, jasowang, rhod, autotest Always use a VM as netperf server, we can use another VM/localhost/external host as the netperf clients. We setup env and launch test by executing remote ssh commands, you need to configure the IP of local/external host in configure file, VMs' IP can be got automatically. Signed-off-by: Amos Kong <akong@redhat.com> --- client/tests/kvm/subtests.cfg.sample | 36 +++- client/virt/tests/netperf.py | 295 ++++++++++++++++++++++++---------- 2 files changed, 233 insertions(+), 98 deletions(-) diff --git a/client/tests/kvm/subtests.cfg.sample b/client/tests/kvm/subtests.cfg.sample index a05aee8..9beb9f7 100644 --- a/client/tests/kvm/subtests.cfg.sample +++ b/client/tests/kvm/subtests.cfg.sample @@ -951,20 +951,36 @@ variants: - netperf: install setup image_copy unattended_install.cdrom only Linux + only virtio_net type = netperf - nics += ' nic2 nic3 nic4' + kill_vm = yes + image_snapshot = yes + nics += ' nic2' + # nic1 is for control, nic2 is for data connection + # bridge_nic1 = virbr0 + pci_model_nic1 = virtio_net + # bridge_nic2 = switch + pci_model_nic2 = e1000 nic_mode = tap netperf_files = netperf-2.4.5.tar.bz2 wait_before_data.patch - packet_size = 1500 - setup_cmd = "cd %s && tar xvfj netperf-2.4.5.tar.bz2 && cd netperf-2.4.5 && patch -p0 < ../wait_before_data.patch && ./configure && make" - netserver_cmd = %s/netperf-2.4.5/src/netserver + setup_cmd = "cd /tmp && rm -rf netperf-2.4.5 && tar xvfj netperf-2.4.5.tar.bz2 && cd netperf-2.4.5 && patch -p0 < ../wait_before_data.patch && ./configure && make" + # configure netperf test parameters + # l = 60 + # protocols = "TCP_STREAM TCP_MAERTS TCP_RR" + # sessions = "1 2 4" + # sessions_rr = "50 100 250 500" + # sizes = "64 256 512 1024" + # sizes_rr = "64 256 512 1024" variants: - - stream: - netperf_cmd = %s/netperf-2.4.5/src/netperf -t %s -H %s -l 60 -- -m %s - protocols = "TCP_STREAM TCP_MAERTS TCP_SENDFILE UDP_STREAM" - - rr: - netperf_cmd = %s/netperf-2.4.5/src/netperf -t %s -H %s -l 60 -- -r %s - protocols = "TCP_RR TCP_CRR UDP_RR" + - guest_guest: + vms += " vm2" + nics = 'nic1' + - host_guest: + # local host ip address + # client = localhost + - exhost_guest: + # external host ip address + # client = - ntttcp: type = ntttcp diff --git a/client/virt/tests/netperf.py b/client/virt/tests/netperf.py index fea1e9e..9c766bf 100644 --- a/client/virt/tests/netperf.py +++ b/client/virt/tests/netperf.py @@ -1,17 +1,17 @@ -import logging, os, signal +import logging, os, commands, sys, threading, re, glob from autotest_lib.client.common_lib import error from autotest_lib.client.bin import utils from autotest_lib.client.virt import aexpect, virt_utils +from autotest_lib.client.virt import virt_test_utils def run_netperf(test, params, env): """ Network stress test with netperf. - 1) Boot up a VM with multiple nics. - 2) Launch netserver on guest. - 3) Execute multiple netperf clients on host in parallel - with different protocols. - 4) Output the test result. + 1) Boot up VM(s), setup SSH authorization between host + and guest(s)/external host + 2) Prepare the test environment in server/client/host + 3) Execute netperf tests, collect and analyze the results @param test: KVM test object. @param params: Dictionary with the test parameters. @@ -21,86 +21,205 @@ def run_netperf(test, params, env): vm.verify_alive() login_timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=login_timeout) + server = vm.get_address() + server_ctl = vm.get_address(1) session.close() - session_serial = vm.wait_for_serial_login(timeout=login_timeout) - - netperf_dir = os.path.join(os.environ['AUTODIR'], "tests/netperf2") - setup_cmd = params.get("setup_cmd") - - firewall_flush = "iptables -F" - session_serial.cmd_output(firewall_flush) - try: - utils.run("iptables -F") - except Exception: - pass - - for i in params.get("netperf_files").split(): - vm.copy_files_to(os.path.join(netperf_dir, i), "/tmp") - - try: - session_serial.cmd(firewall_flush) - except aexpect.ShellError: - logging.warning("Could not flush firewall rules on guest") - - session_serial.cmd(setup_cmd % "/tmp", timeout=200) - session_serial.cmd(params.get("netserver_cmd") % "/tmp") - - if "tcpdump" in env and env["tcpdump"].is_alive(): - # Stop the background tcpdump process - try: - logging.debug("Stopping the background tcpdump") - env["tcpdump"].close() - except Exception: - pass - - def netperf(i=0): - guest_ip = vm.get_address(i) - logging.info("Netperf_%s: netserver %s" % (i, guest_ip)) - result_file = os.path.join(test.resultsdir, "output_%s_%s" - % (test.iteration, i )) - list_fail = [] - result = open(result_file, "w") - result.write("Netperf test results\n") - - for p in params.get("protocols").split(): - packet_size = params.get("packet_size", "1500") - for size in packet_size.split(): - cmd = params.get("netperf_cmd") % (netperf_dir, p, - guest_ip, size) - logging.info("Netperf_%s: protocol %s" % (i, p)) - try: - netperf_output = utils.system_output(cmd, - retain_output=True) - result.write("%s\n" % netperf_output) - except Exception: - logging.error("Test of protocol %s failed", p) - list_fail.append(p) - - result.close() - if list_fail: - raise error.TestFail("Some netperf tests failed: %s" % - ", ".join(list_fail)) - - try: - logging.info("Setup and run netperf clients on host") - utils.run(setup_cmd % netperf_dir) - - bg = [] - nic_num = len(params.get("nics").split()) - for i in range(nic_num): - bg.append(virt_utils.Thread(netperf, (i,))) - bg[i].start() - - completed = False - while not completed: - completed = True - for b in bg: - if b.isAlive(): - completed = False - finally: - try: - for b in bg: - if b: - b.join() - finally: - session_serial.cmd_output("killall netserver") + + if "vm2" in params["vms"]: + vm2 = env.get_vm("vm2") + vm2.verify_alive() + session2 = vm2.wait_for_login(timeout=login_timeout) + client = vm2.get_address() + session2.close() + + if params.get("client"): + client = params["client"] + if params.get("host"): + host = params["host"] + else: + cmd = "ifconfig %s|awk 'NR==2 {print $2}'|awk -F: '{print $2}'" + host = commands.getoutput(cmd % params["bridge"]) + + shell_port = params["shell_port"] + password = params["password"] + username = params["username"] + + def env_setup(ip): + logging.debug("Setup env for %s" % ip) + virt_utils.scp_to_remote(ip, shell_port, username, password, + "~/.ssh/id_dsa.pub", "~/.ssh/authorized_keys") + ssh_cmd(ip, "service iptables stop") + + netperf_dir = os.path.join(os.environ['AUTODIR'], "tests/netperf2") + for i in params.get("netperf_files").split(): + virt_utils.scp_to_remote(ip, shell_port, username, password, + "%s/%s" % (netperf_dir, i), "/tmp/") + ssh_cmd(ip, params.get("setup_cmd")) + + logging.info("Prepare env of server/client/host") + if not os.path.exists(os.path.expandvars("$HOME/.ssh/id_dsa.pub")): + commands.getoutput('yes ""|ssh-keygen -t dsa -q -N ""') + + #env_setup(server_ctl) + #env_setup(client) + #env_setup(host) + logging.info("Start netperf testing ...") + start_test(server, server_ctl, host, client, test.resultsdir, + l=int(params.get('l')), + sessions_rr=params.get('sessions_rr'), + sessions=params.get('sessions'), + sizes_rr=params.get('sizes_rr'), + sizes=params.get('sizes'), + protocols=params.get('protocols')) + + +def start_test(server, server_ctl, host, client, resultsdir, l=60, + sessions_rr="50 100 250 500", sessions="1 2 4", + sizes_rr="64 256 512 1024 2048", + sizes="64 256 512 1024 2048 4096", + protocols="TCP_STREAM TCP_MAERTS TCP_RR"): + """ + Start to test with different kind of configurations + + @param server: netperf server ip for data connection + @param server_ctl: ip to control netperf server + @param host: localhost ip + @param client: netperf client ip + @param resultsdir: directory to restore the results + @param l: test duration + @param sessions_rr: sessions number list for RR test + @param sessions: sessions number list + @param sizes_rr: request/response sizes (TCP_RR, UDP_RR) + @param sizes: send size (TCP_STREAM, UDP_STREAM) + @param protocols: test type + """ + + def parse_file(file_prefix, raw=""): + """ Parse result files and reture throughput total """ + thu = 0 + for file in glob.glob("%s.*.nf" % file_prefix): + o = commands.getoutput("cat %s |tail -n 1" % file) + try: + thu += float(o.split()[raw]) + except: + logging.debug(commands.getoutput("cat %s.*" % file_prefix)) + return -1 + return thu + + fd = open("%s/netperf-result.RHS" % resultsdir, "w") + for protocol in protocols.split(): + logging.info(protocol) + fd.write(protocol+ "\n") + row = "%8s|%5s|%10s|%6s|%9s|%10s|%10s|%12s|%12s|%9s|%8s|%8s|%10s|%10s" \ + "|%11s|%10s" % ("sessions", "size", "throughput", "cpu", + "normalize", "#tx-pkts", "#rx-pkts", "#tx-byts", "#rx-byts", + "#re-trans", "#tx-intr", "#rx-intr", "#io_exit", "#irq_inj", + "#tpkt/#exit", "#rpkt/#irq") + logging.info(row) + fd.write(row + "\n") + if (protocol == "TCP_RR"): + sessions_test = sessions_rr.split() + sizes_test = sizes_rr.split() + else: + sessions_test = sessions.split() + sizes_test = sizes.split() + for i in sizes_test: + for j in sessions_test: + if (protocol == "TCP_RR"): + ret = launch_client(1, server, server_ctl, host, client, l, + "-t %s -v 0 -P -0 -- -r %s,%s -b %s" % (protocol, i, i, j)) + thu = parse_file("/tmp/netperf.%s" % ret['pid'], 0) + else: + ret = launch_client(j, server, server_ctl, host, client, l, + "-C -c -t %s -- -m %s" % (protocol, i)) + thu = parse_file("/tmp/netperf.%s" % ret['pid'], 4) + cpu = float(ret['mpstat'].split()[10]) + cpu = 100 - cpu + normal = thu / cpu + pkt_rx_irq = float(ret['rx_pkts']) / float(ret['irq_inj']) + pkt_tx_exit = float(ret['tx_pkts']) / float(ret['io_exit']) + row = "%8d|%5d|%10.2f|%6.2f|%9.2f|%10d|%10d|%12d|%12d|%9d" \ + "|%8d|%8d|%10d|%10d|%11.2f|%10.2f" % (int(j), int(i), + thu, cpu, normal, ret['tx_pkts'], ret['rx_pkts'], + ret['tx_byts'], ret['rx_byts'], ret['re_pkts'], + ret['tx_intr'], ret['rx_intr'], ret['io_exit'], + ret['irq_inj'], pkt_tx_exit, pkt_rx_irq) + logging.info(row) + fd.write(row + "\n") + fd.flush() + logging.debug("Remove temporary files") + commands.getoutput("rm -f /tmp/netperf.%s.*.nf" % ret['pid']) + fd.close() + + +def ssh_cmd(ip, cmd, user="root"): + """ + Execute remote command and return the output + + @param ip: remote machine IP + @param cmd: executed command + @param user: username + """ + return utils.system_output('ssh -o StrictHostKeyChecking=no -o ' + 'UserKnownHostsFile=/dev/null %s@%s "%s"' % (user, ip, cmd)) + + +def launch_client(sessions, server, server_ctl, host, client, l, nf_args): + """ Launch netperf clients """ + + client_path="/tmp/netperf-2.4.5/src/netperf" + server_path="/tmp/netperf-2.4.5/src/netserver" + ssh_cmd(server_ctl, "pidof netserver || %s" % server_path) + ncpu = ssh_cmd(server_ctl, "cat /proc/cpuinfo |grep processor |wc -l") + + def count_interrupt(name): + """ + @param name: the name of interrupt, such as "virtio0-input" + """ + intr = 0 + stat = ssh_cmd(server_ctl, "cat /proc/interrupts |grep %s" % name) + for cpu in range(int(ncpu)): + intr += int(stat.split()[cpu+1]) + return intr + + def get_state(): + for i in ssh_cmd(server_ctl, "ifconfig").split("\n\n"): + if server in i: + nrx = int(re.findall("RX packets:(\d+)", i)[0]) + ntx = int(re.findall("TX packets:(\d+)", i)[0]) + nrxb = int(re.findall("RX bytes:(\d+)", i)[0]) + ntxb = int(re.findall("TX bytes:(\d+)", i)[0]) + nre = int(ssh_cmd(server_ctl, "grep Tcp /proc/net/snmp|tail -1" + ).split()[12]) + nrx_intr = count_interrupt("virtio0-input") + ntx_intr = count_interrupt("virtio0-output") + io_exit = int(ssh_cmd(host, "cat /sys/kernel/debug/kvm/io_exits")) + irq_inj = int(ssh_cmd(host, "cat /sys/kernel/debug/kvm/irq_injections")) + return [nrx, ntx, nrxb, ntxb, nre, nrx_intr, ntx_intr, io_exit, irq_inj] + + def netperf_thread(i): + cmd = "%s -H %s -l %s %s" % (client_path, server, l, nf_args) + output = ssh_cmd(client, cmd) + f = file("/tmp/netperf.%s.%s.nf" % (pid, i), "w") + f.write(output) + f.close() + + start_state = get_state() + pid = str(os.getpid()) + threads = [] + for i in range(int(sessions)): + t = threading.Thread(target=netperf_thread, kwargs={"i": i}) + threads.append(t) + t.start() + ret = {} + ret['pid'] = pid + ret['mpstat'] = ssh_cmd(host, "mpstat 1 %d |tail -n 1" % (l - 1)) + for t in threads: + t.join() + + end_state = get_state() + items = ['rx_pkts', 'tx_pkts', 'rx_byts', 'tx_byts', 're_pkts', + 'rx_intr', 'tx_intr', 'io_exit', 'irq_inj'] + for i in range(len(items)): + ret[items[i]] = end_state[i] - start_state[i] + return ret ^ permalink raw reply related [flat|nested] 14+ messages in thread
* [RFC PATCH 3/4] netperf: pin guest vcpus/memory/vhost thread to numa node 2011-12-23 10:28 [RFC PATCH 0/4] Network performance regression Amos Kong 2011-12-23 10:28 ` [RFC PATCH 1/4] virt-test: add NTttcp subtests Amos Kong 2011-12-23 10:28 ` [RFC PATCH 2/4] virt-test: Refactor netperf test and add analysis module Amos Kong @ 2011-12-23 10:28 ` Amos Kong 2011-12-23 10:28 ` [RFC PATCH 4/4] virt: Introduce regression testing infrastructure Amos Kong ` (2 subsequent siblings) 5 siblings, 0 replies; 14+ messages in thread From: Amos Kong @ 2011-12-23 10:28 UTC (permalink / raw) To: lmr, wquan, kvm, jasowang, rhod, autotest Dynamically checking hardware and pin guest cpu threads and guest memory to last numa node Signed-off-by: Amos Kong <akong@redhat.com> --- client/virt/tests/netperf.py | 19 ++++++++++++++++++- 1 files changed, 18 insertions(+), 1 deletions(-) diff --git a/client/virt/tests/netperf.py b/client/virt/tests/netperf.py index 9c766bf..d4153fc 100644 --- a/client/virt/tests/netperf.py +++ b/client/virt/tests/netperf.py @@ -25,6 +25,20 @@ def run_netperf(test, params, env): server_ctl = vm.get_address(1) session.close() + logging.debug(commands.getoutput("numactl --hardware")) + logging.debug(commands.getoutput("numactl --show")) + # pin guest vcpus/memory/vhost threads to last numa node of host by default + numa_node = int(params.get('numa_node', -1)) + p = virt_utils.NumaNode(numa_node) + node_num = int(p.get_node_num()) + vhost_threads = commands.getoutput("ps aux |grep '\[vhost-.*\]'|grep -v grep|awk '{print $2}'") + for i in vhost_threads.split(): + logging.debug("pin vhost_net thread(%s) to host cpu node" % i) + p.pin_cpu(i) + o = vm.monitor.info("cpus") + for i in re.findall("thread_id=(\d+)", o): + p.pin_cpu(i) + if "vm2" in params["vms"]: vm2 = env.get_vm("vm2") vm2.verify_alive() @@ -198,7 +212,10 @@ def launch_client(sessions, server, server_ctl, host, client, l, nf_args): return [nrx, ntx, nrxb, ntxb, nre, nrx_intr, ntx_intr, io_exit, irq_inj] def netperf_thread(i): - cmd = "%s -H %s -l %s %s" % (client_path, server, l, nf_args) + output = ssh_cmd(client, "numactl --hardware") + n = int(re.findall("available: (\d+) nodes", output)[0]) - 1 + cmd = "numactl --cpunodebind=%s --membind=%s %s -H %s -l %s %s" % \ + (n, n, client_path, server, l, nf_args) output = ssh_cmd(client, cmd) f = file("/tmp/netperf.%s.%s.nf" % (pid, i), "w") f.write(output) ^ permalink raw reply related [flat|nested] 14+ messages in thread
* [RFC PATCH 4/4] virt: Introduce regression testing infrastructure 2011-12-23 10:28 [RFC PATCH 0/4] Network performance regression Amos Kong ` (2 preceding siblings ...) 2011-12-23 10:28 ` [RFC PATCH 3/4] netperf: pin guest vcpus/memory/vhost thread to numa node Amos Kong @ 2011-12-23 10:28 ` Amos Kong 2011-12-24 1:13 ` Yang Hamo Bai 2011-12-29 13:12 ` [RFC PATCH 0/4] Network performance regression Amos Kong 2012-01-05 3:05 ` [Autotest PATCH v2 " Amos Kong 5 siblings, 1 reply; 14+ messages in thread From: Amos Kong @ 2011-12-23 10:28 UTC (permalink / raw) To: lmr, wquan, kvm, jasowang, rhod, autotest regression.py: 'regression' module is used to compare the test results of two jobs, we can use it (regression.compare()) at the end of control file, This script can also be used directly. Example: | # python regression.py ntttcp /ntttcp-result1 /ntttcp-result2 \ | ../../tests/kvm/perf.conf | Fri Dec 23 17:23:08 2011 | | 1 - /tmp/netperf-avg-0.385058442362.txt | 2 - /tmp/netperf-avg-0.66384166902.txt | | ======================== | buf(k)| throughput(Mbit/s) | 1 2| 109.548 | 2 2| 104.239 | % | -4.8 | 1 4| 209.519 | 2 4| 211.633 | % | +1.0 analyzer.py: It's used to compare two test results (standard format), it can also be used directly. | # python analyzer.py /result1.RHS /ntttcp-result2.RHS perf.conf: config test related parameters. It supports to compare current result with the result in autotest server. autotest result directory should be shared by NFS first, and specify its address in perf.conf Signed-off-by: Amos Kong <akong@redhat.com> --- client/tests/kvm/control | 7 + client/tests/kvm/perf.conf | 23 ++++ client/virt/tests/analyzer.py | 224 +++++++++++++++++++++++++++++++++++++++ client/virt/tests/regression.py | 33 ++++++ 4 files changed, 287 insertions(+), 0 deletions(-) create mode 100644 client/tests/kvm/perf.conf create mode 100644 client/virt/tests/analyzer.py create mode 100644 client/virt/tests/regression.py diff --git a/client/tests/kvm/control b/client/tests/kvm/control index 950154c..5cdf506 100644 --- a/client/tests/kvm/control +++ b/client/tests/kvm/control @@ -67,3 +67,10 @@ if args: parser.parse_string(str) virt_utils.run_tests(parser, job) + +# compare the perfmance results of job +# from autotest_lib.client.virt.tests import regression +# regression.compare("ntttcp", "$olddir", +# "%s/results/default/" % os.environ['AUTODIR'], +# config_file="%s/tests/kvm/perf.conf" % os.environ['AUTODIR'], +# output_dir="%s/results/default/" % os.environ['AUTODIR']) diff --git a/client/tests/kvm/perf.conf b/client/tests/kvm/perf.conf new file mode 100644 index 0000000..31b72b2 --- /dev/null +++ b/client/tests/kvm/perf.conf @@ -0,0 +1,23 @@ +# this config file is used to set test related parameters +# + +[server] +result_nfs = kvm-autotest.englab.nay.redhat.com:/usr/local/autotest/results +result_mntdir = /results/ + +[ntttcp] +result_dir = results +result_file_pattern = .*.RHS + +[netperf] +result_dir = results +result_file_pattern = netperf-result.RHS + +[iozone] +result_dir = guest_test_results +result_file_pattern = + +[ffsb] +result_dir = results +result_file_pattern = + diff --git a/client/virt/tests/analyzer.py b/client/virt/tests/analyzer.py new file mode 100644 index 0000000..9023c77 --- /dev/null +++ b/client/virt/tests/analyzer.py @@ -0,0 +1,224 @@ +import sys, re, string, time, commands, os, random + +def aton(str): + substring = re.split("\.", str) + if len(substring) == 1: + if substring[0].isdigit(): + return string.atoi(str) + elif len(substring) == 2: + if substring[0].isdigit() and substring[1].isdigit(): + return string.atof(str) + return False + +def avg(dict, i): + linetmp = [] + tmp = [] + lines = {} + + filecounts = len(dict) + for j in range(len(dict)): + lines[j] = re.split("\|", dict[j][i]) + for value in range(len(lines[0])): + avgtmp = 0 + column_caculate = 2 + if value < column_caculate: + linetmp.append(lines[0][value]) + else: + space = "" + strlen = len(lines[0][value]) + for i in range(len(lines)): + avgtmp += (aton(lines[i][value].strip())) + if len(re.findall("\.", lines[0][value])) == 0: + avgtmpstr = "%d" % (avgtmp/filecounts) + else: + avgtmpstr = "%.2f" % (avgtmp/filecounts) + + strlenvalue = len(avgtmpstr) + tmplen = strlen-strlenvalue + if value == (len(lines[0])-1): + for v in range(tmplen-1): + space += " " + avgtmpstr= space + avgtmpstr + "\n" + linetmp.append(avgtmpstr) + break + for v in range(tmplen): + space += " " + avgtmpstr = space + avgtmpstr + linetmp.append(avgtmpstr) + line = "|".join(linetmp) + return line + +def avgfile(filenames): + """ + caculate the average of namelist + 1)get the data of every file, then put the data into the dict + 2)caculat the average of the file + """ + filelines = [] + dict = {} + name = "/tmp/netperf-avg-%s.txt" % random.random() + + for i in range(len(filenames)): + fd = open(filenames[i], "r") + dict[i] = fd.readlines() + fd.close() + filenum = len(dict) + if filenum == 1: + content = dict[0] + else: + for i in range(len(dict[0])): + if dict[0][i] == dict[1][i]: + filelines.append(dict[0][i]) + else: + line = avg(dict, i) + filelines.append(line) + content = filelines + f = open(name, "w") + f.write(''.join(content)) + f.close() + return name + +def record_result(name1, name2, file): + + def tee(content): + f = open(file, "a") + f.write(content + "\n") + print content + + result1 = {} + result2 = {} + result3 = {} + row = 0 + strlen = 0 + eachLine = "" + tee(name1) + + # read the first file + fd = open(name1, "r") + for eachLine in fd: + #eachLine = ''.join(eachLine.split()) + eachLine = eachLine.replace('\r', '') + eachLine = eachLine.replace('\n', '') + result1[row] = re.split("\|", eachLine) + row += 1 + + fd.close() + row = 0 + # read the second file + fd = open(name2, "r") + for eachLine in fd: + #eachLine = ''.join(eachLine.split()) + eachLine = eachLine.replace('\r', '') + eachLine = eachLine.replace('\n', '') + if re.findall("sessions", eachLine) != 0: + strlen = len(eachLine) + result2[row] = re.split("\|", eachLine) + row += 1 + + fd.close() + + name1_list = re.split("/", name1) + name2_list = re.split("/", name2) + + len1 = len(name1_list) + file_name11 = name1_list[len1-1] + len2 = len(name2_list) + file_name22 = name2_list[len2-1] + + #rename the file which will save the result + name1list = re.split("-", file_name11) + name2list = re.split("-", file_name22) + if (len(name1list) > len(name2list)): + namelen = len(name2list) + else: + namelen = len(name1list) + + resultlist = [] + for i in range(namelen): + if name1list[i] == name2list[i]: + resultlist.append(name1list[i]) + + timevalue = time.time() + timestring = time.ctime(timevalue) + tee("%s\n" % timestring) + tee("1 - %s" % name1) + tee("2 - %s\n" % name2) + + #caculate the length of each line + eachLine = "" + for i in range(strlen): + eachLine += "=" + eachLine += "======" + tee("%s" % eachLine) + row = strlen = 0 + for row in result1: + if result1[row] == result2[row]: + if len(result1[row]) > 1: + result1[row][0] = " %s" % result1[row][0] + eachLine = "|".join(result1[row]) + tee("%s" % eachLine) + else: + eachLine = "|".join(result1[row]) + tee("%s" % eachLine) + else: + strlen = len(result1[row][0]) + tmp = result1[row][0].strip() + tmp = "%s" % tmp + result1[row][0] = tmp.rjust(strlen, ' ') + result1[row][0] = "1 %s" % result1[row][0] + eachLine = "|".join(result1[row]) + tee("%s" % eachLine) + + strlen = len(result2[row][0]) + tmp = result2[row][0].strip() + tmp = "%s" % tmp + result2[row][0] = tmp.rjust(strlen, ' ') + result2[row][0] = "2 %s" % result2[row][0] + eachLine = "|".join(result2[row]) + tee("%s" % eachLine) + + result_tmp = [] + strlen = 0 + result_colum = 1 + for i in range(len(result1[row])): + if i < result_colum: + tmp_str = "" + strlen += len(result1[row][i]) + tmp_str = tmp_str.rjust(strlen-1, ' ') + tmp_str = "%" + tmp_str + if i == result_colum - 1: + result_tmp.append(tmp_str) + elif i >= result_colum: + strlen = len(result1[row][i]) + aa = (result1[row][i]).strip() + aa = string.atof(aa) + bb = (result2[row][i]).strip() + bb = string.atof(bb) + if aa != 0: + cc = ((bb-aa)/aa)*100 + if cc > 0: + result = "+%.1f" % cc + else: + result = "%.1f" % cc + else: + result = "0" + result_str = result.rjust(strlen, ' ') + result_tmp.append(result_str) + + eachLine = "|".join(result_tmp) + tee("%s" % eachLine) + +def analyze(list_files1, list_files2, output_dir=""): + average1 = avgfile(list_files1.split()) + average2 = avgfile(list_files2.split()) + f = os.path.join(output_dir, "end-report-%s.txt" % + time.strftime('%Y-%m-%d-%H.%M.%S')) + record_result(average1, average2, f) + commands.getoutput("rm -f /tmp/netperf-avg-*") + + +if __name__ == "__main__": + if len(sys.argv) < 3: + print 'Usage: python %s "$results list1" "results list2"' % sys.argv[0] + sys.exit(1) + analyze(sys.argv[1], sys.argv[2], sys.argv[3]) diff --git a/client/virt/tests/regression.py b/client/virt/tests/regression.py new file mode 100644 index 0000000..e2588a7 --- /dev/null +++ b/client/virt/tests/regression.py @@ -0,0 +1,33 @@ +import ConfigParser, sys, commands, os +import analyzer + +def compare(testname, olddir, curdir, config_file='perf.conf', output_dir=""): + config = ConfigParser.ConfigParser() + config.read(config_file) + + result_nfs = config.get("server", "result_nfs") + result_mntdir = config.get("server", "result_mntdir") + result_dir = config.get(testname, "result_dir") + result_file_pattern = config.get(testname, "result_file_pattern") + + def search_files(dir): + cmd = 'find %s|grep %s|grep "%s/%s"' % (dir, + testname, result_dir, result_file_pattern) + return commands.getoutput(cmd) + + if not os.path.isdir(result_mntdir): + os.mkdir(result_mntdir) + commands.getoutput("mount %s %s" % (result_nfs, result_mntdir)) + + if not os.path.isabs(olddir): + olddir = result_mntdir + olddir + oldlist = search_files(olddir) + newlist = search_files(curdir) + if oldlist != "" or newlist != "": + analyzer.analyze(oldlist, newlist, output_dir) + +if __name__ == "__main__": + if len(sys.argv) != 5: + print 'Usage: python %s $testname $dir1 $dir2 $configfile' % sys.argv[0] + sys.exit(1) + compare(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) ^ permalink raw reply related [flat|nested] 14+ messages in thread
* Re: [RFC PATCH 4/4] virt: Introduce regression testing infrastructure 2011-12-23 10:28 ` [RFC PATCH 4/4] virt: Introduce regression testing infrastructure Amos Kong @ 2011-12-24 1:13 ` Yang Hamo Bai 2011-12-25 1:26 ` Amos Kong 0 siblings, 1 reply; 14+ messages in thread From: Yang Hamo Bai @ 2011-12-24 1:13 UTC (permalink / raw) To: Amos Kong; +Cc: lmr, wquan, kvm, jasowang, rhod, autotest Hi akong, see the inline comment. On Fri, Dec 23, 2011 at 6:28 PM, Amos Kong <akong@redhat.com> wrote: > regression.py: > 'regression' module is used to compare the test results > of two jobs, we can use it (regression.compare()) at > the end of control file, > > This script can also be used directly. Example: > | # python regression.py ntttcp /ntttcp-result1 /ntttcp-result2 \ > | ../../tests/kvm/perf.conf > | Fri Dec 23 17:23:08 2011 > | > | 1 - /tmp/netperf-avg-0.385058442362.txt > | 2 - /tmp/netperf-avg-0.66384166902.txt > | > | ======================== > | buf(k)| throughput(Mbit/s) > | 1 2| 109.548 > | 2 2| 104.239 > | % | -4.8 > | 1 4| 209.519 > | 2 4| 211.633 > | % | +1.0 > > analyzer.py: > It's used to compare two test results (standard format), > it can also be used directly. > | # python analyzer.py /result1.RHS /ntttcp-result2.RHS > > perf.conf: > config test related parameters. > > It supports to compare current result with the result in autotest server. > autotest result directory should be shared by NFS first, and specify > its address in perf.conf > > Signed-off-by: Amos Kong <akong@redhat.com> > --- > client/tests/kvm/control | 7 + > client/tests/kvm/perf.conf | 23 ++++ > client/virt/tests/analyzer.py | 224 +++++++++++++++++++++++++++++++++++++++ > client/virt/tests/regression.py | 33 ++++++ > 4 files changed, 287 insertions(+), 0 deletions(-) > create mode 100644 client/tests/kvm/perf.conf > create mode 100644 client/virt/tests/analyzer.py > create mode 100644 client/virt/tests/regression.py > > diff --git a/client/tests/kvm/control b/client/tests/kvm/control > index 950154c..5cdf506 100644 > --- a/client/tests/kvm/control > +++ b/client/tests/kvm/control > @@ -67,3 +67,10 @@ if args: > parser.parse_string(str) > > virt_utils.run_tests(parser, job) > + > +# compare the perfmance results of job > +# from autotest_lib.client.virt.tests import regression > +# regression.compare("ntttcp", "$olddir", > +# "%s/results/default/" % os.environ['AUTODIR'], > +# config_file="%s/tests/kvm/perf.conf" % os.environ['AUTODIR'], > +# output_dir="%s/results/default/" % os.environ['AUTODIR']) > diff --git a/client/tests/kvm/perf.conf b/client/tests/kvm/perf.conf > new file mode 100644 > index 0000000..31b72b2 > --- /dev/null > +++ b/client/tests/kvm/perf.conf > @@ -0,0 +1,23 @@ > +# this config file is used to set test related parameters > +# > + > +[server] > +result_nfs = kvm-autotest.englab.nay.redhat.com:/usr/local/autotest/results Is it suitable to use an internal only address as the default path for the public repo? If not, why not use a variable and let the user set it? > +result_mntdir = /results/ > + > +[ntttcp] > +result_dir = results > +result_file_pattern = .*.RHS > + > +[netperf] > +result_dir = results > +result_file_pattern = netperf-result.RHS > + > +[iozone] > +result_dir = guest_test_results > +result_file_pattern = > + > +[ffsb] > +result_dir = results > +result_file_pattern = > + > diff --git a/client/virt/tests/analyzer.py b/client/virt/tests/analyzer.py > new file mode 100644 > index 0000000..9023c77 > --- /dev/null > +++ b/client/virt/tests/analyzer.py > @@ -0,0 +1,224 @@ > +import sys, re, string, time, commands, os, random > + > +def aton(str): > + substring = re.split("\.", str) > + if len(substring) == 1: > + if substring[0].isdigit(): > + return string.atoi(str) > + elif len(substring) == 2: > + if substring[0].isdigit() and substring[1].isdigit(): > + return string.atof(str) > + return False > + > +def avg(dict, i): > + linetmp = [] > + tmp = [] > + lines = {} > + > + filecounts = len(dict) > + for j in range(len(dict)): > + lines[j] = re.split("\|", dict[j][i]) > + for value in range(len(lines[0])): > + avgtmp = 0 > + column_caculate = 2 > + if value < column_caculate: > + linetmp.append(lines[0][value]) > + else: > + space = "" > + strlen = len(lines[0][value]) > + for i in range(len(lines)): > + avgtmp += (aton(lines[i][value].strip())) > + if len(re.findall("\.", lines[0][value])) == 0: > + avgtmpstr = "%d" % (avgtmp/filecounts) > + else: > + avgtmpstr = "%.2f" % (avgtmp/filecounts) > + > + strlenvalue = len(avgtmpstr) > + tmplen = strlen-strlenvalue > + if value == (len(lines[0])-1): > + for v in range(tmplen-1): > + space += " " > + avgtmpstr= space + avgtmpstr + "\n" > + linetmp.append(avgtmpstr) > + break > + for v in range(tmplen): > + space += " " > + avgtmpstr = space + avgtmpstr > + linetmp.append(avgtmpstr) > + line = "|".join(linetmp) > + return line > + > +def avgfile(filenames): > + """ > + caculate the average of namelist > + 1)get the data of every file, then put the data into the dict > + 2)caculat the average of the file > + """ > + filelines = [] > + dict = {} > + name = "/tmp/netperf-avg-%s.txt" % random.random() > + > + for i in range(len(filenames)): > + fd = open(filenames[i], "r") > + dict[i] = fd.readlines() > + fd.close() > + filenum = len(dict) > + if filenum == 1: > + content = dict[0] > + else: > + for i in range(len(dict[0])): > + if dict[0][i] == dict[1][i]: > + filelines.append(dict[0][i]) > + else: > + line = avg(dict, i) > + filelines.append(line) > + content = filelines > + f = open(name, "w") > + f.write(''.join(content)) > + f.close() > + return name > + > +def record_result(name1, name2, file): > + > + def tee(content): > + f = open(file, "a") > + f.write(content + "\n") > + print content > + > + result1 = {} > + result2 = {} > + result3 = {} > + row = 0 > + strlen = 0 > + eachLine = "" > + tee(name1) > + > + # read the first file > + fd = open(name1, "r") > + for eachLine in fd: > + #eachLine = ''.join(eachLine.split()) > + eachLine = eachLine.replace('\r', '') > + eachLine = eachLine.replace('\n', '') > + result1[row] = re.split("\|", eachLine) > + row += 1 > + > + fd.close() > + row = 0 > + # read the second file > + fd = open(name2, "r") > + for eachLine in fd: > + #eachLine = ''.join(eachLine.split()) > + eachLine = eachLine.replace('\r', '') > + eachLine = eachLine.replace('\n', '') > + if re.findall("sessions", eachLine) != 0: > + strlen = len(eachLine) > + result2[row] = re.split("\|", eachLine) > + row += 1 > + > + fd.close() > + > + name1_list = re.split("/", name1) > + name2_list = re.split("/", name2) > + > + len1 = len(name1_list) > + file_name11 = name1_list[len1-1] > + len2 = len(name2_list) > + file_name22 = name2_list[len2-1] > + > + #rename the file which will save the result > + name1list = re.split("-", file_name11) > + name2list = re.split("-", file_name22) > + if (len(name1list) > len(name2list)): > + namelen = len(name2list) > + else: > + namelen = len(name1list) > + > + resultlist = [] > + for i in range(namelen): > + if name1list[i] == name2list[i]: > + resultlist.append(name1list[i]) > + > + timevalue = time.time() > + timestring = time.ctime(timevalue) > + tee("%s\n" % timestring) > + tee("1 - %s" % name1) > + tee("2 - %s\n" % name2) > + > + #caculate the length of each line > + eachLine = "" > + for i in range(strlen): > + eachLine += "=" > + eachLine += "======" > + tee("%s" % eachLine) > + row = strlen = 0 > + for row in result1: > + if result1[row] == result2[row]: > + if len(result1[row]) > 1: > + result1[row][0] = " %s" % result1[row][0] > + eachLine = "|".join(result1[row]) > + tee("%s" % eachLine) > + else: > + eachLine = "|".join(result1[row]) > + tee("%s" % eachLine) > + else: > + strlen = len(result1[row][0]) > + tmp = result1[row][0].strip() > + tmp = "%s" % tmp > + result1[row][0] = tmp.rjust(strlen, ' ') > + result1[row][0] = "1 %s" % result1[row][0] > + eachLine = "|".join(result1[row]) > + tee("%s" % eachLine) > + > + strlen = len(result2[row][0]) > + tmp = result2[row][0].strip() > + tmp = "%s" % tmp > + result2[row][0] = tmp.rjust(strlen, ' ') > + result2[row][0] = "2 %s" % result2[row][0] > + eachLine = "|".join(result2[row]) > + tee("%s" % eachLine) > + > + result_tmp = [] > + strlen = 0 > + result_colum = 1 > + for i in range(len(result1[row])): > + if i < result_colum: > + tmp_str = "" > + strlen += len(result1[row][i]) > + tmp_str = tmp_str.rjust(strlen-1, ' ') > + tmp_str = "%" + tmp_str > + if i == result_colum - 1: > + result_tmp.append(tmp_str) > + elif i >= result_colum: > + strlen = len(result1[row][i]) > + aa = (result1[row][i]).strip() > + aa = string.atof(aa) > + bb = (result2[row][i]).strip() > + bb = string.atof(bb) > + if aa != 0: > + cc = ((bb-aa)/aa)*100 > + if cc > 0: > + result = "+%.1f" % cc > + else: > + result = "%.1f" % cc > + else: > + result = "0" > + result_str = result.rjust(strlen, ' ') > + result_tmp.append(result_str) > + > + eachLine = "|".join(result_tmp) > + tee("%s" % eachLine) > + > +def analyze(list_files1, list_files2, output_dir=""): > + average1 = avgfile(list_files1.split()) > + average2 = avgfile(list_files2.split()) > + f = os.path.join(output_dir, "end-report-%s.txt" % > + time.strftime('%Y-%m-%d-%H.%M.%S')) > + record_result(average1, average2, f) > + commands.getoutput("rm -f /tmp/netperf-avg-*") > + > + > +if __name__ == "__main__": > + if len(sys.argv) < 3: > + print 'Usage: python %s "$results list1" "results list2"' % sys.argv[0] > + sys.exit(1) > + analyze(sys.argv[1], sys.argv[2], sys.argv[3]) > diff --git a/client/virt/tests/regression.py b/client/virt/tests/regression.py > new file mode 100644 > index 0000000..e2588a7 > --- /dev/null > +++ b/client/virt/tests/regression.py > @@ -0,0 +1,33 @@ > +import ConfigParser, sys, commands, os > +import analyzer > + > +def compare(testname, olddir, curdir, config_file='perf.conf', output_dir=""): > + config = ConfigParser.ConfigParser() > + config.read(config_file) > + > + result_nfs = config.get("server", "result_nfs") > + result_mntdir = config.get("server", "result_mntdir") > + result_dir = config.get(testname, "result_dir") > + result_file_pattern = config.get(testname, "result_file_pattern") > + > + def search_files(dir): > + cmd = 'find %s|grep %s|grep "%s/%s"' % (dir, > + testname, result_dir, result_file_pattern) > + return commands.getoutput(cmd) > + > + if not os.path.isdir(result_mntdir): > + os.mkdir(result_mntdir) > + commands.getoutput("mount %s %s" % (result_nfs, result_mntdir)) > + > + if not os.path.isabs(olddir): > + olddir = result_mntdir + olddir > + oldlist = search_files(olddir) > + newlist = search_files(curdir) > + if oldlist != "" or newlist != "": > + analyzer.analyze(oldlist, newlist, output_dir) > + > +if __name__ == "__main__": > + if len(sys.argv) != 5: > + print 'Usage: python %s $testname $dir1 $dir2 $configfile' % sys.argv[0] > + sys.exit(1) > + compare(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) > Thanks, Yang -- """ Keep It Simple,Stupid. """ Chinese Name: 白杨 Nick Name: Hamo Homepage: http://hamobai.com/ GPG KEY ID: 0xA4691A33 Key fingerprint = 09D5 2D78 8E2B 0995 CF8E 4331 33C4 3D24 A469 1A33 ^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [RFC PATCH 4/4] virt: Introduce regression testing infrastructure 2011-12-24 1:13 ` Yang Hamo Bai @ 2011-12-25 1:26 ` Amos Kong 0 siblings, 0 replies; 14+ messages in thread From: Amos Kong @ 2011-12-25 1:26 UTC (permalink / raw) To: Yang Hamo Bai; +Cc: wquan, kvm, rhod, autotest ----- Original Message ----- > Hi akong, > > see the inline comment. > > On Fri, Dec 23, 2011 at 6:28 PM, Amos Kong <akong@redhat.com> wrote: > > regression.py: > > 'regression' module is used to compare the test results > > of two jobs, we can use it (regression.compare()) at > > the end of control file, > > > > This script can also be used directly. Example: > > | # python regression.py ntttcp /ntttcp-result1 /ntttcp-result2 \ > > | ../../tests/kvm/perf.conf > > | Fri Dec 23 17:23:08 2011 > > | > > | 1 - /tmp/netperf-avg-0.385058442362.txt > > | 2 - /tmp/netperf-avg-0.66384166902.txt > > | > > | ======================== > > | buf(k)| throughput(Mbit/s) > > | 1 2| 109.548 > > | 2 2| 104.239 > > | % | -4.8 > > | 1 4| 209.519 > > | 2 4| 211.633 > > | % | +1.0 > > > > analyzer.py: > > It's used to compare two test results (standard format), > > it can also be used directly. > > | # python analyzer.py /result1.RHS /ntttcp-result2.RHS > > > > perf.conf: > > config test related parameters. > > > > It supports to compare current result with the result in autotest > > server. > > autotest result directory should be shared by NFS first, and > > specify > > its address in perf.conf > > > > Signed-off-by: Amos Kong <akong@redhat.com> > > --- > > client/tests/kvm/control | 7 + > > client/tests/kvm/perf.conf | 23 ++++ > > client/virt/tests/analyzer.py | 224 > > +++++++++++++++++++++++++++++++++++++++ > > client/virt/tests/regression.py | 33 ++++++ > > 4 files changed, 287 insertions(+), 0 deletions(-) > > create mode 100644 client/tests/kvm/perf.conf > > create mode 100644 client/virt/tests/analyzer.py > > create mode 100644 client/virt/tests/regression.py > > > > diff --git a/client/tests/kvm/control b/client/tests/kvm/control > > index 950154c..5cdf506 100644 > > --- a/client/tests/kvm/control > > +++ b/client/tests/kvm/control > > @@ -67,3 +67,10 @@ if args: > > parser.parse_string(str) > > > > virt_utils.run_tests(parser, job) > > + > > +# compare the perfmance results of job > > +# from autotest_lib.client.virt.tests import regression > > +# regression.compare("ntttcp", "$olddir", > > +# "%s/results/default/" % os.environ['AUTODIR'], > > +# config_file="%s/tests/kvm/perf.conf" % os.environ['AUTODIR'], > > +# output_dir="%s/results/default/" % os.environ['AUTODIR']) > > diff --git a/client/tests/kvm/perf.conf > > b/client/tests/kvm/perf.conf > > new file mode 100644 > > index 0000000..31b72b2 > > --- /dev/null > > +++ b/client/tests/kvm/perf.conf > > @@ -0,0 +1,23 @@ > > +# this config file is used to set test related parameters > > +# > > + > > +[server] > > +result_nfs = > > kvm-autotest.englab.nay.redhat.com:/usr/local/autotest/results > Is it suitable to use an internal only address as the default path > for the public repo? > If not, why not use a variable and let the user set it? I found this problem when I sent it out ;) will fix it in next version. Thanks. result_nfs = $autotest_server:/usr/local/autotest/results > > +result_mntdir = /results/ > > + > > +[ntttcp] > > +result_dir = results > > +result_file_pattern = .*.RHS > > + > > +[netperf] > > +result_dir = results > > +result_file_pattern = netperf-result.RHS > > + > > +[iozone] > > +result_dir = guest_test_results > > +result_file_pattern = > > + > > +[ffsb] > > +result_dir = results > > +result_file_pattern = > > + > > diff --git a/client/virt/tests/analyzer.py > > b/client/virt/tests/analyzer.py > > new file mode 100644 > > index 0000000..9023c77 > > --- /dev/null > > +++ b/client/virt/tests/analyzer.py > > @@ -0,0 +1,224 @@ > > +import sys, re, string, time, commands, os, random > > + > > +def aton(str): > > + substring = re.split("\.", str) > > + if len(substring) == 1: > > + if substring[0].isdigit(): > > + return string.atoi(str) > > + elif len(substring) == 2: > > + if substring[0].isdigit() and substring[1].isdigit(): > > + return string.atof(str) > > + return False > > + > > +def avg(dict, i): > > + linetmp = [] > > + tmp = [] > > + lines = {} > > + > > + filecounts = len(dict) > > + for j in range(len(dict)): > > + lines[j] = re.split("\|", dict[j][i]) > > + for value in range(len(lines[0])): > > + avgtmp = 0 > > + column_caculate = 2 > > + if value < column_caculate: > > + linetmp.append(lines[0][value]) > > + else: > > + space = "" > > + strlen = len(lines[0][value]) > > + for i in range(len(lines)): > > + avgtmp += (aton(lines[i][value].strip())) > > + if len(re.findall("\.", lines[0][value])) == 0: > > + avgtmpstr = "%d" % (avgtmp/filecounts) > > + else: > > + avgtmpstr = "%.2f" % (avgtmp/filecounts) > > + > > + strlenvalue = len(avgtmpstr) > > + tmplen = strlen-strlenvalue > > + if value == (len(lines[0])-1): > > + for v in range(tmplen-1): > > + space += " " > > + avgtmpstr= space + avgtmpstr + "\n" > > + linetmp.append(avgtmpstr) > > + break > > + for v in range(tmplen): > > + space += " " > > + avgtmpstr = space + avgtmpstr > > + linetmp.append(avgtmpstr) > > + line = "|".join(linetmp) > > + return line > > + > > +def avgfile(filenames): > > + """ > > + caculate the average of namelist > > + 1)get the data of every file, then put the data into the dict > > + 2)caculat the average of the file > > + """ > > + filelines = [] > > + dict = {} > > + name = "/tmp/netperf-avg-%s.txt" % random.random() > > + > > + for i in range(len(filenames)): > > + fd = open(filenames[i], "r") > > + dict[i] = fd.readlines() > > + fd.close() > > + filenum = len(dict) > > + if filenum == 1: > > + content = dict[0] > > + else: > > + for i in range(len(dict[0])): > > + if dict[0][i] == dict[1][i]: > > + filelines.append(dict[0][i]) > > + else: > > + line = avg(dict, i) > > + filelines.append(line) > > + content = filelines > > + f = open(name, "w") > > + f.write(''.join(content)) > > + f.close() > > + return name > > + > > +def record_result(name1, name2, file): > > + > > + def tee(content): > > + f = open(file, "a") > > + f.write(content + "\n") > > + print content > > + > > + result1 = {} > > + result2 = {} > > + result3 = {} > > + row = 0 > > + strlen = 0 > > + eachLine = "" > > + tee(name1) > > + > > + # read the first file > > + fd = open(name1, "r") > > + for eachLine in fd: > > + #eachLine = ''.join(eachLine.split()) > > + eachLine = eachLine.replace('\r', '') > > + eachLine = eachLine.replace('\n', '') > > + result1[row] = re.split("\|", eachLine) > > + row += 1 > > + > > + fd.close() > > + row = 0 > > + # read the second file > > + fd = open(name2, "r") > > + for eachLine in fd: > > + #eachLine = ''.join(eachLine.split()) > > + eachLine = eachLine.replace('\r', '') > > + eachLine = eachLine.replace('\n', '') > > + if re.findall("sessions", eachLine) != 0: > > + strlen = len(eachLine) > > + result2[row] = re.split("\|", eachLine) > > + row += 1 > > + > > + fd.close() > > + > > + name1_list = re.split("/", name1) > > + name2_list = re.split("/", name2) > > + > > + len1 = len(name1_list) > > + file_name11 = name1_list[len1-1] > > + len2 = len(name2_list) > > + file_name22 = name2_list[len2-1] > > + > > + #rename the file which will save the result > > + name1list = re.split("-", file_name11) > > + name2list = re.split("-", file_name22) > > + if (len(name1list) > len(name2list)): > > + namelen = len(name2list) > > + else: > > + namelen = len(name1list) > > + > > + resultlist = [] > > + for i in range(namelen): > > + if name1list[i] == name2list[i]: > > + resultlist.append(name1list[i]) > > + > > + timevalue = time.time() > > + timestring = time.ctime(timevalue) > > + tee("%s\n" % timestring) > > + tee("1 - %s" % name1) > > + tee("2 - %s\n" % name2) > > + > > + #caculate the length of each line > > + eachLine = "" > > + for i in range(strlen): > > + eachLine += "=" > > + eachLine += "======" > > + tee("%s" % eachLine) > > + row = strlen = 0 > > + for row in result1: > > + if result1[row] == result2[row]: > > + if len(result1[row]) > 1: > > + result1[row][0] = " %s" % result1[row][0] > > + eachLine = "|".join(result1[row]) > > + tee("%s" % eachLine) > > + else: > > + eachLine = "|".join(result1[row]) > > + tee("%s" % eachLine) > > + else: > > + strlen = len(result1[row][0]) > > + tmp = result1[row][0].strip() > > + tmp = "%s" % tmp > > + result1[row][0] = tmp.rjust(strlen, ' ') > > + result1[row][0] = "1 %s" % result1[row][0] > > + eachLine = "|".join(result1[row]) > > + tee("%s" % eachLine) > > + > > + strlen = len(result2[row][0]) > > + tmp = result2[row][0].strip() > > + tmp = "%s" % tmp > > + result2[row][0] = tmp.rjust(strlen, ' ') > > + result2[row][0] = "2 %s" % result2[row][0] > > + eachLine = "|".join(result2[row]) > > + tee("%s" % eachLine) > > + > > + result_tmp = [] > > + strlen = 0 > > + result_colum = 1 > > + for i in range(len(result1[row])): > > + if i < result_colum: > > + tmp_str = "" > > + strlen += len(result1[row][i]) > > + tmp_str = tmp_str.rjust(strlen-1, ' ') > > + tmp_str = "%" + tmp_str > > + if i == result_colum - 1: > > + result_tmp.append(tmp_str) > > + elif i >= result_colum: > > + strlen = len(result1[row][i]) > > + aa = (result1[row][i]).strip() > > + aa = string.atof(aa) > > + bb = (result2[row][i]).strip() > > + bb = string.atof(bb) > > + if aa != 0: > > + cc = ((bb-aa)/aa)*100 > > + if cc > 0: > > + result = "+%.1f" % cc > > + else: > > + result = "%.1f" % cc > > + else: > > + result = "0" > > + result_str = result.rjust(strlen, ' ') > > + result_tmp.append(result_str) > > + > > + eachLine = "|".join(result_tmp) > > + tee("%s" % eachLine) > > + > > +def analyze(list_files1, list_files2, output_dir=""): > > + average1 = avgfile(list_files1.split()) > > + average2 = avgfile(list_files2.split()) > > + f = os.path.join(output_dir, "end-report-%s.txt" % > > + time.strftime('%Y-%m-%d-%H.%M.%S')) > > + record_result(average1, average2, f) > > + commands.getoutput("rm -f /tmp/netperf-avg-*") > > + > > + > > +if __name__ == "__main__": > > + if len(sys.argv) < 3: > > + print 'Usage: python %s "$results list1" "results list2"' > > % sys.argv[0] > > + sys.exit(1) > > + analyze(sys.argv[1], sys.argv[2], sys.argv[3]) > > diff --git a/client/virt/tests/regression.py > > b/client/virt/tests/regression.py > > new file mode 100644 > > index 0000000..e2588a7 > > --- /dev/null > > +++ b/client/virt/tests/regression.py > > @@ -0,0 +1,33 @@ > > +import ConfigParser, sys, commands, os > > +import analyzer > > + > > +def compare(testname, olddir, curdir, config_file='perf.conf', > > output_dir=""): > > + config = ConfigParser.ConfigParser() > > + config.read(config_file) > > + > > + result_nfs = config.get("server", "result_nfs") > > + result_mntdir = config.get("server", "result_mntdir") > > + result_dir = config.get(testname, "result_dir") > > + result_file_pattern = config.get(testname, > > "result_file_pattern") > > + > > + def search_files(dir): > > + cmd = 'find %s|grep %s|grep "%s/%s"' % (dir, > > + testname, result_dir, result_file_pattern) > > + return commands.getoutput(cmd) > > + > > + if not os.path.isdir(result_mntdir): > > + os.mkdir(result_mntdir) > > + commands.getoutput("mount %s %s" % (result_nfs, > > result_mntdir)) > > + > > + if not os.path.isabs(olddir): > > + olddir = result_mntdir + olddir > > + oldlist = search_files(olddir) > > + newlist = search_files(curdir) > > + if oldlist != "" or newlist != "": > > + analyzer.analyze(oldlist, newlist, output_dir) > > + > > +if __name__ == "__main__": > > + if len(sys.argv) != 5: > > + print 'Usage: python %s $testname $dir1 $dir2 $configfile' > > % sys.argv[0] > > + sys.exit(1) > > + compare(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) > > > > Thanks, > Yang > -- > """ > Keep It Simple,Stupid. > """ > > Chinese Name: 白杨 > Nick Name: Hamo > Homepage: http://hamobai.com/ > GPG KEY ID: 0xA4691A33 > Key fingerprint = 09D5 2D78 8E2B 0995 CF8E 4331 33C4 3D24 A469 1A33 > _______________________________________________ Autotest mailing list Autotest@test.kernel.org http://test.kernel.org/cgi-bin/mailman/listinfo/autotest ^ permalink raw reply [flat|nested] 14+ messages in thread
* Re: [RFC PATCH 0/4] Network performance regression 2011-12-23 10:28 [RFC PATCH 0/4] Network performance regression Amos Kong ` (3 preceding siblings ...) 2011-12-23 10:28 ` [RFC PATCH 4/4] virt: Introduce regression testing infrastructure Amos Kong @ 2011-12-29 13:12 ` Amos Kong 2012-01-05 3:05 ` [Autotest PATCH v2 " Amos Kong 5 siblings, 0 replies; 14+ messages in thread From: Amos Kong @ 2011-12-29 13:12 UTC (permalink / raw) To: lmr, wquan, kvm, jasowang, rhod, autotest ----- Original Message ----- > This patchset adds a new network perf testcase for Windows, > refactors old netperf test, and support numa resource control. > Process the raw results to a standard format at the end of test, > then we can compute average and compare with old results. > > Welcome to give feedback, thanks in advance! > > --- > > Amos Kong (4): > virt-test: add NTttcp subtests > virt-test: Refactor netperf test and add analysis module > netperf: pin guest vcpus/memory/vhost thread to numa node > virt: Introduce regression testing infrastructure Hi Lucas, I've improved those four patches and update it to my repo. Please check the latest code from it if you want to review those patches :) git://github.com/kongove/autotest [master branch] Changes: - compute the standard deviation of samples - ntttcp / netperf setup bug - bind numa node for ntttcp - sharp analysis functions - etc regression.py and analysis.py also need to be improved, current regression compare is not convenient. I will send the v2 after resolving existed problem and improving the Stability of result. Amos. ^ permalink raw reply [flat|nested] 14+ messages in thread
* [Autotest PATCH v2 0/4] Network performance regression 2011-12-23 10:28 [RFC PATCH 0/4] Network performance regression Amos Kong ` (4 preceding siblings ...) 2011-12-29 13:12 ` [RFC PATCH 0/4] Network performance regression Amos Kong @ 2012-01-05 3:05 ` Amos Kong 2012-01-05 3:05 ` [Autotest PATCH v2 1/4] virt-test: add NTttcp subtests Amos Kong ` (4 more replies) 5 siblings, 5 replies; 14+ messages in thread From: Amos Kong @ 2012-01-05 3:05 UTC (permalink / raw) To: lmr, autotest; +Cc: kvm This patchset adds a new network perf testcase for Windows, refactors old netperf test, and support numa resource control. Process the raw results to a 'standard format' at the end of test, then we can analyze them with general module, compute average and compare with old results. User can configure test time/repeat times for getting stable results. Welcome to give feedback, thanks in advance! Changes from v1: - refactor analysis module - add new features in analysis code - shape those two tests - fix some script bugs - add autoio script for ntttcp test --- Amos Kong (4): virt-test: add NTttcp subtests virt-test: Refactor netperf test and add analysis module netperf: pin guest vcpus/memory/vhost thread to numa node virt: Introduce regression testing infrastructure client/tests/kvm/control | 7 + client/tests/kvm/perf.conf | 23 +++ client/virt/scripts/ntttcp.au3 | 41 +++++ client/virt/subtests.cfg.sample | 59 ++++++- client/virt/tests/analyzer.py | 172 ++++++++++++++++++++++ client/virt/tests/netperf.py | 312 ++++++++++++++++++++++++++++----------- client/virt/tests/ntttcp.py | 183 +++++++++++++++++++++++ client/virt/tests/regression.py | 34 ++++ 8 files changed, 733 insertions(+), 98 deletions(-) create mode 100644 client/tests/kvm/perf.conf create mode 100755 client/virt/scripts/ntttcp.au3 create mode 100644 client/virt/tests/analyzer.py create mode 100644 client/virt/tests/ntttcp.py create mode 100644 client/virt/tests/regression.py -- Amos Kong ^ permalink raw reply [flat|nested] 14+ messages in thread
* [Autotest PATCH v2 1/4] virt-test: add NTttcp subtests 2012-01-05 3:05 ` [Autotest PATCH v2 " Amos Kong @ 2012-01-05 3:05 ` Amos Kong 2012-01-05 3:06 ` [Autotest PATCH v2 2/4] virt-test: Refactor netperf test and add analysis module Amos Kong ` (3 subsequent siblings) 4 siblings, 0 replies; 14+ messages in thread From: Amos Kong @ 2012-01-05 3:05 UTC (permalink / raw) To: lmr, autotest; +Cc: kvm This case will test tcp throughput between 2 windows guests, or between 1 guest and 1 external Windows host. When test between guest and external Windows host, 'receiver_address' should be set to external Windows' ip address. ! NTttcp package can be downloaded from this link: ! http://msdn.microsoft.com/en-us/windows/hardware/gg463264 ! ntttcp.au3: This script will sign "End-user license agreement" ! for you, please don't use this script if you don't agree EULA. ! ! Create a directory in winutils.iso and put msi and autoit script ! to it. ! winutils.iso: ! NTttcp/ ! NTttcp/NT Testing TCP Tool.msi ! NTttcp/ntttcp.au3 This test will generate result files with 'standard' format, split different items by '|', use one line as the title. We can analyze them by a general modules. raw_output_1.RHS: buf(k)| throughput(Mbit/s) ... 64| 2407.548 128| 2102.254 256| 4930.362 512| 4723.035 1024| 4725.334 Changes from v1: - pin vcpus/vhost_net threads to numa node - add autoio script for ntttcp test - user should put msi and autoit script to iso - fix threads sync issue - set test time to 30 seconds Signed-off-by: Qingtang Zhou <qzhou@redhat.com> Signed-off-by: Amos Kong <akong@redhat.com> --- client/virt/scripts/ntttcp.au3 | 41 +++++++++ client/virt/subtests.cfg.sample | 22 +++++ client/virt/tests/ntttcp.py | 183 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 246 insertions(+), 0 deletions(-) create mode 100755 client/virt/scripts/ntttcp.au3 create mode 100644 client/virt/tests/ntttcp.py diff --git a/client/virt/scripts/ntttcp.au3 b/client/virt/scripts/ntttcp.au3 new file mode 100755 index 0000000..00489e8 --- /dev/null +++ b/client/virt/scripts/ntttcp.au3 @@ -0,0 +1,41 @@ +#cs --------------------------------------------- +AutoIt Version: 3.1.1.0 +Author: Qingtang Zhou <qzhou@redhat.com> + +Script Function: +Install NT Testing TCP tool + +Note: This script will sign "End-user license agreement" for user +#ce --------------------------------------------- + +Func WaitWind($title) + WinWait($title, "") + + If Not WinActive($title, "") Then + WinActivate($title, "") + EndIf +EndFunc + +$FILE="msiexec /i ""D:\NTttcp\\NT Testing TCP Tool.msi""" +Run($FILE) + +WaitWind("NT Testing TCP Tool") +WinWaitActive("NT Testing TCP Tool", "Welcome to the NT Testing TCP Tool Setup Wizard") +Send("!n") + +WaitWind("NT Testing TCP Tool") +WinWaitActive("NT Testing TCP Tool", "License Agreement") +send("!a") +send("{ENTER}") + +WaitWind("NT Testing TCP Tool") +WinWaitActive("NT Testing TCP Tool", "Select Installation Folder") +Send("{ENTER}") + +WaitWind("NT Testing TCP Tool") +WinWaitActive("NT Testing TCP Tool", "Confirm Installation") +send("{ENTER}") + +WinWaitActive("NT Testing TCP Tool", "Installation Complete") +send("!c") + diff --git a/client/virt/subtests.cfg.sample b/client/virt/subtests.cfg.sample index 89dda8c..5b41f67 100644 --- a/client/virt/subtests.cfg.sample +++ b/client/virt/subtests.cfg.sample @@ -1007,6 +1007,28 @@ variants: netperf_cmd = %s/netperf-2.4.5/src/netperf -t %s -H %s -l 60 -- -r %s protocols = "TCP_RR TCP_CRR UDP_RR" + - ntttcp: + type = ntttcp + image_snapshot = yes + check_ntttcp_cmd = "cmd /c dir C:\NTttcp" + ntttcp_sender_cmd = "cmd /c C:\NTttcp\NTttcps.exe -m %s,0,%s -a 2 -l %s -t 30" + ntttcp_receiver_cmd = "cmd /c C:\NTttcp\NTttcpr.exe -m %s,0,%s -a 6 -rb 256k -t 30" + session_num = 1 + buffers = "2k 4k 8k 16k 32k 64k 128k 256k" + timeout = 300 + kill_vm = yes + numa_node = -1 + variants: + - guest_guest: + vms += " vm2" + - guest_host: + # external Windows system IP, NTttcp need to be installed firstly. + receiver_address = "192.168.1.1" + 32: + ntttcp_install_cmd = 'cmd /c "D:\autoit3.exe D:\NTttcp\NTttcp.au3 && mkdir C:\NTttcp && copy "C:\Program Files\Microsoft Corporation\NT Testing TCP Tool\*" C:\NTttcp && cd C:\NTttcp\ && copy NTttcp_%s.exe NTttcps.exe && copy NTttcp_%s.exe NTttcpr.exe"' + 64: + ntttcp_install_cmd = 'cmd /c "D:\autoit3.exe D:\NTttcp\NTttcp.au3 && mkdir C:\NTttcp && copy "C:\Program Files (x86)\Microsoft Corporation\NT Testing TCP Tool\*" C:\NTttcp && cd C:\NTttcp\ && copy NTttcp_%s.exe NTttcps.exe && copy NTttcp_%s.exe NTttcpr.exe"' + - ethtool: install setup image_copy unattended_install.cdrom only Linux type = ethtool diff --git a/client/virt/tests/ntttcp.py b/client/virt/tests/ntttcp.py new file mode 100644 index 0000000..188e0cf --- /dev/null +++ b/client/virt/tests/ntttcp.py @@ -0,0 +1,183 @@ +import logging, os, glob, re, commands +from autotest_lib.client.common_lib import error +from autotest_lib.client.virt import virt_utils + +_receiver_ready = False + +def run_ntttcp(test, params, env): + """ + Run NTttcp on Windows guest + + 1) Install NTttcp in server/client side by Autoit + 2) Start NTttcp in server/client side + 3) Get test results + + @param test: kvm test object + @param params: Dictionary with the test parameters + @param env: Dictionary with test environment. + """ + login_timeout = int(params.get("login_timeout", 360)) + timeout = int(params.get("timeout")) + results_path = os.path.join(test.resultsdir, + 'raw_output_%s' % test.iteration) + if params.get("platform") == "64": + platform = "x64" + else: + platform = "x86" + buffers = params.get("buffers").split() + session_num = params.get("session_num") + + vm_sender = env.get_vm(params["main_vm"]) + vm_sender.verify_alive() + vm_receiver = None + receiver_addr = params.get("receiver_address") + + logging.debug(commands.getoutput("numactl --hardware")) + logging.debug(commands.getoutput("numactl --show")) + # pin guest vcpus/memory/vhost threads to last numa node of host by default + numa_node = int(params.get('numa_node', -1)) + p = virt_utils.NumaNode(numa_node) + node_num = int(p.get_node_num()) + + if not receiver_addr: + vm_receiver = env.get_vm("vm2") + vm_receiver.verify_alive() + try: + sess = None + sess = vm_receiver.wait_for_login(timeout=login_timeout) + receiver_addr = vm_receiver.get_address() + if not receiver_addr: + raise error.TestError("Can't get receiver(%s) ip address" % + vm_sender.name) + o = vm_receiver.monitor.info("cpus") + for i in re.findall("thread_id=(\d+)", o): + p.pin_cpu(i) + finally: + if sess: + sess.close() + + vhost_threads = commands.getoutput("ps aux |grep '\[vhost-.*\]'|" + "grep -v grep|awk '{print $2}'") + for i in vhost_threads.split(): + logging.debug("pin vhost_net thread(%s) to host cpu node" % i) + p.pin_cpu(i) + o = vm_sender.monitor.info("cpus") + for i in re.findall("thread_id=(\d+)", o): + p.pin_cpu(i) + + def install_ntttcp(session): + """ Install ntttcp through a remote session """ + logging.info("Installing NTttcp ...") + if session.cmd_status(params.get("check_ntttcp_cmd")) == 0: + # Don't install ntttcp if it's already installed + logging.info("NTttcp directory already exists") + return + ntttcp_install_cmd = params.get("ntttcp_install_cmd") + ret, output = session.cmd_status_output(ntttcp_install_cmd % + (platform, platform), timeout=200) + if ret != 0: + logging.error(output) + raise error.TestError("Can't install NTttcp on guest") + + def receiver(): + """ Receive side """ + logging.info("Starting receiver process on %s", receiver_addr) + if vm_receiver: + session = vm_receiver.wait_for_login(timeout=login_timeout) + else: + username = params.get("username", "") + password = params.get("password", "") + prompt = params.get("shell_prompt", "[\#\$]") + linesep = eval("'%s'" % params.get("shell_linesep", r"\n")) + client = params.get("shell_client") + port = int(params.get("shell_port")) + log_filename = ("session-%s-%s.log" % (receiver_addr, + virt_utils.generate_random_string(4))) + session = virt_utils.remote_login(client, receiver_addr, port, + username, password, prompt, + linesep, log_filename, timeout) + session.set_status_test_command("echo %errorlevel%") + install_ntttcp(session) + ntttcp_receiver_cmd = params.get("ntttcp_receiver_cmd") + global _receiver_ready + f = open(results_path + ".receiver", 'a') + for b in buffers: + virt_utils.wait_for(lambda: not _wait(), timeout) + _receiver_ready = True + cmd = ntttcp_receiver_cmd % (session_num, receiver_addr) + r = session.cmd_output(cmd, timeout=timeout, + print_func=logging.debug) + f.write("Send buffer size: %s\n%s\n%s" % (b, cmd, r)) + f.close() + session.close() + + def _wait(): + """ Check if receiver is ready """ + global _receiver_ready + if _receiver_ready: + return _receiver_ready + return False + + def sender(): + """ Send side """ + logging.info("Sarting sender process ...") + session = vm_sender.wait_for_login(timeout=login_timeout) + install_ntttcp(session) + ntttcp_sender_cmd = params.get("ntttcp_sender_cmd") + f = open(results_path + ".sender", 'a') + try: + global _receiver_ready + for b in buffers: + cmd = ntttcp_sender_cmd % (session_num, receiver_addr, b) + # Wait until receiver ready + virt_utils.wait_for(_wait, timeout) + r = session.cmd_output(cmd, timeout=timeout, + print_func=logging.debug) + _receiver_ready = False + f.write("Send buffer size: %s\n%s\n%s" % (b, cmd, r)) + finally: + f.close() + session.close() + + def parse_file(resultfile): + """ Parse raw result files and generate files with standard format """ + file = open(resultfile, "r") + list= [] + found = False + for line in file.readlines(): + o = re.findall("Send buffer size: (\d+)", line) + if o: + buffer = o[0] + if "Total Throughput(Mbit/s)" in line: + found = True + if found: + fields = line.split() + if len(fields) == 0: + continue + try: + [float(i) for i in fields] + list.append([buffer, fields[-1]]) + except ValueError: + continue + found = False + return list + + try: + bg = virt_utils.Thread(receiver, ()) + bg.start() + if bg.is_alive(): + sender() + bg.join(suppress_exception=True) + else: + raise error.TestError("Can't start backgroud receiver thread") + finally: + for i in glob.glob("%s.receiver" % results_path): + f = open("%s.RHS" % results_path, "w") + raw = " buf(k)| throughput(Mbit/s)" + logging.info(raw) + f.write(raw + "\n") + for j in parse_file(i): + raw = "%8s| %8s" % (j[0], j[1]) + logging.info(raw) + f.write(raw + "\n") + f.close() ^ permalink raw reply related [flat|nested] 14+ messages in thread
* [Autotest PATCH v2 2/4] virt-test: Refactor netperf test and add analysis module 2012-01-05 3:05 ` [Autotest PATCH v2 " Amos Kong 2012-01-05 3:05 ` [Autotest PATCH v2 1/4] virt-test: add NTttcp subtests Amos Kong @ 2012-01-05 3:06 ` Amos Kong 2012-01-05 3:06 ` [Autotest PATCH v2 3/4] netperf: pin guest vcpus/memory/vhost thread to numa node Amos Kong ` (2 subsequent siblings) 4 siblings, 0 replies; 14+ messages in thread From: Amos Kong @ 2012-01-05 3:06 UTC (permalink / raw) To: lmr, autotest; +Cc: kvm Always use a VM as netperf server, we can use another VM/localhost/external host as the netperf clients. We setup env and launch test by executing remote ssh commands, you need to configure the IP of local/external host in configure file, VMs' IP can be got automatically. Generate a file with 'standard' format at the end of test, then we can analyze them by general module. Changes from v1: - record packet bytes - enable arp_ignore - get packet info from ifconfig - shape functions - don't change ssh config Signed-off-by: Amos Kong <akong@redhat.com> --- client/virt/subtests.cfg.sample | 36 +++-- client/virt/tests/netperf.py | 295 +++++++++++++++++++++++++++------------ 2 files changed, 233 insertions(+), 98 deletions(-) diff --git a/client/virt/subtests.cfg.sample b/client/virt/subtests.cfg.sample index 5b41f67..fc2621d 100644 --- a/client/virt/subtests.cfg.sample +++ b/client/virt/subtests.cfg.sample @@ -992,20 +992,36 @@ variants: - netperf: install setup image_copy unattended_install.cdrom only Linux + only virtio_net type = netperf - nics += ' nic2 nic3 nic4' + kill_vm = yes + image_snapshot = yes + nics += ' nic2' + # nic1 is for control, nic2 is for data connection + # bridge_nic1 = virbr0 + pci_model_nic1 = virtio_net + # bridge_nic2 = switch + pci_model_nic2 = e1000 nic_mode = tap netperf_files = netperf-2.4.5.tar.bz2 wait_before_data.patch - packet_size = 1500 - setup_cmd = "cd %s && tar xvfj netperf-2.4.5.tar.bz2 && cd netperf-2.4.5 && patch -p0 < ../wait_before_data.patch && ./configure && make" - netserver_cmd = %s/netperf-2.4.5/src/netserver + setup_cmd = "cd /tmp && rm -rf netperf-2.4.5 && tar xvfj netperf-2.4.5.tar.bz2 && cd netperf-2.4.5 && patch -p0 < ../wait_before_data.patch && ./configure && make" + # configure netperf test parameters + # l = 60 + # protocols = "TCP_STREAM TCP_MAERTS TCP_RR" + # sessions = "1 2 4" + # sessions_rr = "50 100 250 500" + # sizes = "64 256 512 1024" + # sizes_rr = "64 256 512 1024" variants: - - stream: - netperf_cmd = %s/netperf-2.4.5/src/netperf -t %s -H %s -l 60 -- -m %s - protocols = "TCP_STREAM TCP_MAERTS TCP_SENDFILE UDP_STREAM" - - rr: - netperf_cmd = %s/netperf-2.4.5/src/netperf -t %s -H %s -l 60 -- -r %s - protocols = "TCP_RR TCP_CRR UDP_RR" + - guest_guest: + vms += " vm2" + nics = 'nic1' + - host_guest: + # local host ip address + # client = localhost + - exhost_guest: + # external host ip address + # client = - ntttcp: type = ntttcp diff --git a/client/virt/tests/netperf.py b/client/virt/tests/netperf.py index fea1e9e..fd037df 100644 --- a/client/virt/tests/netperf.py +++ b/client/virt/tests/netperf.py @@ -1,17 +1,17 @@ -import logging, os, signal +import logging, os, commands, sys, threading, re, glob from autotest_lib.client.common_lib import error from autotest_lib.client.bin import utils from autotest_lib.client.virt import aexpect, virt_utils +from autotest_lib.client.virt import virt_test_utils def run_netperf(test, params, env): """ Network stress test with netperf. - 1) Boot up a VM with multiple nics. - 2) Launch netserver on guest. - 3) Execute multiple netperf clients on host in parallel - with different protocols. - 4) Output the test result. + 1) Boot up VM(s), setup SSH authorization between host + and guest(s)/external host + 2) Prepare the test environment in server/client/host + 3) Execute netperf tests, collect and analyze the results @param test: KVM test object. @param params: Dictionary with the test parameters. @@ -21,86 +21,205 @@ def run_netperf(test, params, env): vm.verify_alive() login_timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=login_timeout) + server = vm.get_address() + server_ctl = vm.get_address(1) session.close() - session_serial = vm.wait_for_serial_login(timeout=login_timeout) - - netperf_dir = os.path.join(os.environ['AUTODIR'], "tests/netperf2") - setup_cmd = params.get("setup_cmd") - - firewall_flush = "iptables -F" - session_serial.cmd_output(firewall_flush) - try: - utils.run("iptables -F") - except Exception: - pass - - for i in params.get("netperf_files").split(): - vm.copy_files_to(os.path.join(netperf_dir, i), "/tmp") - - try: - session_serial.cmd(firewall_flush) - except aexpect.ShellError: - logging.warning("Could not flush firewall rules on guest") - - session_serial.cmd(setup_cmd % "/tmp", timeout=200) - session_serial.cmd(params.get("netserver_cmd") % "/tmp") - - if "tcpdump" in env and env["tcpdump"].is_alive(): - # Stop the background tcpdump process - try: - logging.debug("Stopping the background tcpdump") - env["tcpdump"].close() - except Exception: - pass - - def netperf(i=0): - guest_ip = vm.get_address(i) - logging.info("Netperf_%s: netserver %s" % (i, guest_ip)) - result_file = os.path.join(test.resultsdir, "output_%s_%s" - % (test.iteration, i )) - list_fail = [] - result = open(result_file, "w") - result.write("Netperf test results\n") - - for p in params.get("protocols").split(): - packet_size = params.get("packet_size", "1500") - for size in packet_size.split(): - cmd = params.get("netperf_cmd") % (netperf_dir, p, - guest_ip, size) - logging.info("Netperf_%s: protocol %s" % (i, p)) - try: - netperf_output = utils.system_output(cmd, - retain_output=True) - result.write("%s\n" % netperf_output) - except Exception: - logging.error("Test of protocol %s failed", p) - list_fail.append(p) - - result.close() - if list_fail: - raise error.TestFail("Some netperf tests failed: %s" % - ", ".join(list_fail)) - - try: - logging.info("Setup and run netperf clients on host") - utils.run(setup_cmd % netperf_dir) - - bg = [] - nic_num = len(params.get("nics").split()) - for i in range(nic_num): - bg.append(virt_utils.Thread(netperf, (i,))) - bg[i].start() - - completed = False - while not completed: - completed = True - for b in bg: - if b.isAlive(): - completed = False - finally: - try: - for b in bg: - if b: - b.join() - finally: - session_serial.cmd_output("killall netserver") + + if "vm2" in params["vms"]: + vm2 = env.get_vm("vm2") + vm2.verify_alive() + session2 = vm2.wait_for_login(timeout=login_timeout) + client = vm2.get_address() + session2.close() + + if params.get("client"): + client = params["client"] + if params.get("host"): + host = params["host"] + else: + cmd = "ifconfig %s|awk 'NR==2 {print $2}'|awk -F: '{print $2}'" + host = commands.getoutput(cmd % params["bridge"]) + + shell_port = params["shell_port"] + password = params["password"] + username = params["username"] + + def env_setup(ip): + logging.debug("Setup env for %s" % ip) + virt_utils.scp_to_remote(ip, shell_port, username, password, + "~/.ssh/id_dsa.pub", "~/.ssh/authorized_keys") + ssh_cmd(ip, "service iptables stop") + ssh_cmd(ip, "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore") + + netperf_dir = os.path.join(os.environ['AUTODIR'], "tests/netperf2") + for i in params.get("netperf_files").split(): + virt_utils.scp_to_remote(ip, shell_port, username, password, + "%s/%s" % (netperf_dir, i), "/tmp/") + ssh_cmd(ip, params.get("setup_cmd")) + + logging.info("Prepare env of server/client/host") + if not os.path.exists(os.path.expandvars("$HOME/.ssh/id_dsa.pub")): + commands.getoutput('yes ""|ssh-keygen -t dsa -q -N ""') + + env_setup(server_ctl) + env_setup(client) + env_setup(host) + logging.info("Start netperf testing ...") + start_test(server, server_ctl, host, client, test.resultsdir, + l=int(params.get('l')), + sessions_rr=params.get('sessions_rr'), + sessions=params.get('sessions'), + sizes_rr=params.get('sizes_rr'), + sizes=params.get('sizes'), + protocols=params.get('protocols')) + + +def start_test(server, server_ctl, host, client, resultsdir, l=60, + sessions_rr="50 100 250 500", sessions="1 2 4", + sizes_rr="64 256 512 1024 2048", + sizes="64 256 512 1024 2048 4096", + protocols="TCP_STREAM TCP_MAERTS TCP_RR"): + """ + Start to test with different kind of configurations + + @param server: netperf server ip for data connection + @param server_ctl: ip to control netperf server + @param host: localhost ip + @param client: netperf client ip + @param resultsdir: directory to restore the results + @param l: test duration + @param sessions_rr: sessions number list for RR test + @param sessions: sessions number list + @param sizes_rr: request/response sizes (TCP_RR, UDP_RR) + @param sizes: send size (TCP_STREAM, UDP_STREAM) + @param protocols: test type + """ + + def parse_file(file_prefix, raw=""): + """ Parse result files and reture throughput total """ + thu = 0 + for file in glob.glob("%s.*.nf" % file_prefix): + o = commands.getoutput("cat %s |tail -n 1" % file) + try: + thu += float(o.split()[raw]) + except: + logging.debug(commands.getoutput("cat %s.*" % file_prefix)) + return -1 + return thu + + fd = open("%s/netperf-result.RHS" % resultsdir, "w") + for protocol in protocols.split(): + logging.info(protocol) + fd.write(protocol+ "\n") + row = "%5s|%8s|%10s|%6s|%9s|%10s|%10s|%12s|%12s|%9s|%8s|%8s|%10s|%10s" \ + "|%11s|%10s" % ("size", "sessions", "throughput", "cpu", + "normalize", "#tx-pkts", "#rx-pkts", "#tx-byts", "#rx-byts", + "#re-trans", "#tx-intr", "#rx-intr", "#io_exit", "#irq_inj", + "#tpkt/#exit", "#rpkt/#irq") + logging.info(row) + fd.write(row + "\n") + if (protocol == "TCP_RR"): + sessions_test = sessions_rr.split() + sizes_test = sizes_rr.split() + else: + sessions_test = sessions.split() + sizes_test = sizes.split() + for i in sizes_test: + for j in sessions_test: + if (protocol == "TCP_RR"): + ret = launch_client(1, server, server_ctl, host, client, l, + "-t %s -v 0 -P -0 -- -r %s,%s -b %s" % (protocol, i, i, j)) + thu = parse_file("/tmp/netperf.%s" % ret['pid'], 0) + else: + ret = launch_client(j, server, server_ctl, host, client, l, + "-C -c -t %s -- -m %s" % (protocol, i)) + thu = parse_file("/tmp/netperf.%s" % ret['pid'], 4) + cpu = 100 - float(ret['mpstat'].split()[10]) + normal = thu / cpu + pkt_rx_irq = float(ret['rx_pkts']) / float(ret['irq_inj']) + pkt_tx_exit = float(ret['tx_pkts']) / float(ret['io_exit']) + row = "%5d|%8d|%10.2f|%6.2f|%9.2f|%10d|%10d|%12d|%12d|%9d" \ + "|%8d|%8d|%10d|%10d|%11.2f|%10.2f" % (int(i), int(j), + thu, cpu, normal, ret['tx_pkts'], ret['rx_pkts'], + ret['tx_byts'], ret['rx_byts'], ret['re_pkts'], + ret['tx_intr'], ret['rx_intr'], ret['io_exit'], + ret['irq_inj'], pkt_tx_exit, pkt_rx_irq) + logging.info(row) + fd.write(row + "\n") + fd.flush() + logging.debug("Remove temporary files") + commands.getoutput("rm -f /tmp/netperf.%s.*.nf" % ret['pid']) + fd.close() + + +def ssh_cmd(ip, cmd, user="root"): + """ + Execute remote command and return the output + + @param ip: remote machine IP + @param cmd: executed command + @param user: username + """ + return utils.system_output('ssh -o StrictHostKeyChecking=no -o ' + 'UserKnownHostsFile=/dev/null %s@%s "%s"' % (user, ip, cmd)) + + +def launch_client(sessions, server, server_ctl, host, client, l, nf_args): + """ Launch netperf clients """ + + client_path="/tmp/netperf-2.4.5/src/netperf" + server_path="/tmp/netperf-2.4.5/src/netserver" + ssh_cmd(server_ctl, "pidof netserver || %s" % server_path) + ncpu = ssh_cmd(server_ctl, "cat /proc/cpuinfo |grep processor |wc -l") + + def count_interrupt(name): + """ + @param name: the name of interrupt, such as "virtio0-input" + """ + intr = 0 + stat = ssh_cmd(server_ctl, "cat /proc/interrupts |grep %s" % name) + for cpu in range(int(ncpu)): + intr += int(stat.split()[cpu+1]) + return intr + + def get_state(): + for i in ssh_cmd(server_ctl, "ifconfig").split("\n\n"): + if server in i: + nrx = int(re.findall("RX packets:(\d+)", i)[0]) + ntx = int(re.findall("TX packets:(\d+)", i)[0]) + nrxb = int(re.findall("RX bytes:(\d+)", i)[0]) + ntxb = int(re.findall("TX bytes:(\d+)", i)[0]) + nre = int(ssh_cmd(server_ctl, "grep Tcp /proc/net/snmp|tail -1" + ).split()[12]) + nrx_intr = count_interrupt("virtio0-input") + ntx_intr = count_interrupt("virtio0-output") + io_exit = int(ssh_cmd(host, "cat /sys/kernel/debug/kvm/io_exits")) + irq_inj = int(ssh_cmd(host, "cat /sys/kernel/debug/kvm/irq_injections")) + return [nrx, ntx, nrxb, ntxb, nre, nrx_intr, ntx_intr, io_exit, irq_inj] + + def netperf_thread(i): + cmd = "%s -H %s -l %s %s" % (client_path, server, l, nf_args) + output = ssh_cmd(client, cmd) + f = file("/tmp/netperf.%s.%s.nf" % (pid, i), "w") + f.write(output) + f.close() + + start_state = get_state() + pid = str(os.getpid()) + threads = [] + for i in range(int(sessions)): + t = threading.Thread(target=netperf_thread, kwargs={"i": i}) + threads.append(t) + t.start() + ret = {} + ret['pid'] = pid + ret['mpstat'] = ssh_cmd(host, "mpstat 1 %d |tail -n 1" % (l - 1)) + for t in threads: + t.join() + + end_state = get_state() + items = ['rx_pkts', 'tx_pkts', 'rx_byts', 'tx_byts', 're_pkts', + 'rx_intr', 'tx_intr', 'io_exit', 'irq_inj'] + for i in range(len(items)): + ret[items[i]] = end_state[i] - start_state[i] + return ret ^ permalink raw reply related [flat|nested] 14+ messages in thread
* [Autotest PATCH v2 3/4] netperf: pin guest vcpus/memory/vhost thread to numa node 2012-01-05 3:05 ` [Autotest PATCH v2 " Amos Kong 2012-01-05 3:05 ` [Autotest PATCH v2 1/4] virt-test: add NTttcp subtests Amos Kong 2012-01-05 3:06 ` [Autotest PATCH v2 2/4] virt-test: Refactor netperf test and add analysis module Amos Kong @ 2012-01-05 3:06 ` Amos Kong 2012-01-05 3:06 ` [Autotest PATCH v2 4/4] virt: Introduce regression testing infrastructure Amos Kong 2012-01-06 20:17 ` [Autotest PATCH v2 0/4] Network performance regression Lucas Meneghel Rodrigues 4 siblings, 0 replies; 14+ messages in thread From: Amos Kong @ 2012-01-05 3:06 UTC (permalink / raw) To: lmr, autotest; +Cc: kvm Dynamically checking hardware and pin guest cpu threads and guest memory to last numa node Changes from v1: - assign numanode to -1 for netperf test Signed-off-by: Amos Kong <akong@redhat.com> --- client/virt/subtests.cfg.sample | 1 + client/virt/tests/netperf.py | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 1 deletions(-) diff --git a/client/virt/subtests.cfg.sample b/client/virt/subtests.cfg.sample index fc2621d..7687887 100644 --- a/client/virt/subtests.cfg.sample +++ b/client/virt/subtests.cfg.sample @@ -1012,6 +1012,7 @@ variants: # sessions_rr = "50 100 250 500" # sizes = "64 256 512 1024" # sizes_rr = "64 256 512 1024" + numa_node = -1 variants: - guest_guest: vms += " vm2" diff --git a/client/virt/tests/netperf.py b/client/virt/tests/netperf.py index fd037df..7a27a93 100644 --- a/client/virt/tests/netperf.py +++ b/client/virt/tests/netperf.py @@ -25,6 +25,20 @@ def run_netperf(test, params, env): server_ctl = vm.get_address(1) session.close() + logging.debug(commands.getoutput("numactl --hardware")) + logging.debug(commands.getoutput("numactl --show")) + # pin guest vcpus/memory/vhost threads to last numa node of host by default + numa_node = int(params.get('numa_node', -1)) + p = virt_utils.NumaNode(numa_node) + node_num = int(p.get_node_num()) + vhost_threads = commands.getoutput("ps aux |grep '\[vhost-.*\]'|grep -v grep|awk '{print $2}'") + for i in vhost_threads.split(): + logging.debug("pin vhost_net thread(%s) to host cpu node" % i) + p.pin_cpu(i) + o = vm.monitor.info("cpus") + for i in re.findall("thread_id=(\d+)", o): + p.pin_cpu(i) + if "vm2" in params["vms"]: vm2 = env.get_vm("vm2") vm2.verify_alive() @@ -198,7 +212,10 @@ def launch_client(sessions, server, server_ctl, host, client, l, nf_args): return [nrx, ntx, nrxb, ntxb, nre, nrx_intr, ntx_intr, io_exit, irq_inj] def netperf_thread(i): - cmd = "%s -H %s -l %s %s" % (client_path, server, l, nf_args) + output = ssh_cmd(client, "numactl --hardware") + n = int(re.findall("available: (\d+) nodes", output)[0]) - 1 + cmd = "numactl --cpunodebind=%s --membind=%s %s -H %s -l %s %s" % \ + (n, n, client_path, server, l, nf_args) output = ssh_cmd(client, cmd) f = file("/tmp/netperf.%s.%s.nf" % (pid, i), "w") f.write(output) ^ permalink raw reply related [flat|nested] 14+ messages in thread
* [Autotest PATCH v2 4/4] virt: Introduce regression testing infrastructure 2012-01-05 3:05 ` [Autotest PATCH v2 " Amos Kong ` (2 preceding siblings ...) 2012-01-05 3:06 ` [Autotest PATCH v2 3/4] netperf: pin guest vcpus/memory/vhost thread to numa node Amos Kong @ 2012-01-05 3:06 ` Amos Kong 2012-01-06 20:17 ` [Autotest PATCH v2 0/4] Network performance regression Lucas Meneghel Rodrigues 4 siblings, 0 replies; 14+ messages in thread From: Amos Kong @ 2012-01-05 3:06 UTC (permalink / raw) To: lmr, autotest; +Cc: kvm > regression.py: 'regression' module is used to compare the test results of two jobs, we can use it (regression.compare()) at the end of control file, This script can also be used directly. Example(tested in unclear env): | # python regression.py netperf /result1-dir /result2-dir \ | ../../tests/kvm/perf.conf > analyzer.py: It's used to compute average, standard deviation, augment rate, etc, and compare two test results (standard format). it can be called at the end of job (end of control file), we can also be used directly. | # python analyzer.py "result-v1-1.RHS result-v1-2.RHS" \ | "result-v2-1.RHS result-v2-2.RHS result-v2-3.RHS" log.txt | Thu Jan 5 10:17:24 2012 | | == Avg1 SD Augment Rate ========================== | TCP_STREAM | size|sessions|throughput| cpu|normalize| ... | 2048| 2| 14699.17| 31.73| 463.19| ... | %SD | 0.0| 0.6| 0.0| 0.8| ... | 2048| 4| 15935.68| 34.30| 464.66| ... | %SD | 0.0| 0.3| 1.7| 1.5| ... | ... | | == AvgS Augment Rate ============================= | TCP_STREAM | size|sessions|throughput| cpu|normalize| ... | 2048| 2| 7835.61| 31.66| 247.36| ... | 2048| 2| 8757.03| 31.94| 274.14| ... | % | +0.0| +11.8| +0.9| +10.8| ... | 2048| 4| 12000.65| 32.38| 370.62| ... | 2048| 4| 13641.20| 32.27| 423.29| ... | % | +0.0| +13.7| -0.3| +14.2| ... | > perf.conf: config test related parameters. It supports to compare current result with the result in autotest server. autotest result directory should be shared by NFS first, and specify its address in perf.conf Changes from v1: - refactor analysis code - add standard deviation percent Signed-off-by: Amos Kong <akong@redhat.com> --- client/tests/kvm/control | 7 ++ client/tests/kvm/perf.conf | 23 +++++ client/virt/tests/analyzer.py | 172 +++++++++++++++++++++++++++++++++++++++ client/virt/tests/regression.py | 34 ++++++++ 4 files changed, 236 insertions(+), 0 deletions(-) create mode 100644 client/tests/kvm/perf.conf create mode 100644 client/virt/tests/analyzer.py create mode 100644 client/virt/tests/regression.py diff --git a/client/tests/kvm/control b/client/tests/kvm/control index 950154c..5f4df87 100644 --- a/client/tests/kvm/control +++ b/client/tests/kvm/control @@ -67,3 +67,10 @@ if args: parser.parse_string(str) virt_utils.run_tests(parser, job) + +# compare the perfmance results of job +# from autotest_lib.client.virt.tests import regression +# regression.compare("ntttcp", "$olddir", +# "%s/results/default/" % os.environ['AUTODIR'], +# config_file="%s/tests/kvm/perf.conf" % os.environ['AUTODIR'], +# output_dir="%s/results/default/netperf-result.txt" % os.environ['AUTODIR']) diff --git a/client/tests/kvm/perf.conf b/client/tests/kvm/perf.conf new file mode 100644 index 0000000..31b72b2 --- /dev/null +++ b/client/tests/kvm/perf.conf @@ -0,0 +1,23 @@ +# this config file is used to set test related parameters +# + +[server] +result_nfs = kvm-autotest.englab.nay.redhat.com:/usr/local/autotest/results +result_mntdir = /results/ + +[ntttcp] +result_dir = results +result_file_pattern = .*.RHS + +[netperf] +result_dir = results +result_file_pattern = netperf-result.RHS + +[iozone] +result_dir = guest_test_results +result_file_pattern = + +[ffsb] +result_dir = results +result_file_pattern = + diff --git a/client/virt/tests/analyzer.py b/client/virt/tests/analyzer.py new file mode 100644 index 0000000..24b13a3 --- /dev/null +++ b/client/virt/tests/analyzer.py @@ -0,0 +1,172 @@ +import sys, re, string, time, commands, os, random + +def tee(content, filename): + """ Write content to standard output and file """ + fd = open(filename, "a") + fd.write(content + "\n") + fd.close() + print content + +class samples(): + def __init__(self, files): + self.files_dict = [] + for i in range(len(files)): + fd = open(files[i], "r") + self.files_dict.append(fd.readlines()) + fd.close() + + def getAvg(self): + return self._process(self.files_dict, self._get_list_avg) + + def getAvgPercent(self, avgs_dict): + return self._process(avgs_dict, self._get_augment_rate) + + def getSD(self): + return self._process(self.files_dict, self._get_list_sd) + + def getSDPercent(self, sds_dict): + return self._process(sds_dict, self._get_percent) + + def _get_percent(self, data): + """ num2 / num1 * 100 """ + result = "0.0" + if len(data) == 2 and float(data[0]) != 0: + result = "%.1f" % (float(data[1]) / float(data[0]) * 100) + return result + + def _get_augment_rate(self, data): + """ (num2 - num1) / num1 * 100 """ + result = "+0.0" + if len(data) == 2 and float(data[0]) != 0: + result = "%+.1f" % \ + (((float(data[1]) - float(data[0])) / float(data[0])) * 100) + return result + + def _get_list_sd(self, data): + """ + sumX = x1 + x2 + ... + xn + avgX = sumX / n + sumSquareX = x1^2 + ... + xn^2 + SD = sqrt([sumSquareX - (n * (avgX ^ 2))] / (n - 1)) + """ + sum = sqsum = 0 + n = len(data) + for i in data: + sum += float(i) + sqsum += float(i) ** 2 + avg = sum / n + if avg == 0: + return "0.0" + return "%.1f" % (((sqsum - (n * avg**2)) / (n - 1))**0.5) + + def _get_list_avg(self, data): + """ Compute the average of list members """ + sum = 0 + for i in data: + sum += float(i) + if "." in data[0]: + return "%.2f" % (sum / len(data)) + return "%d" % (sum / len(data)) + + def _process_lines(self, files_dict, row, func): + """ Process lines of different sample files with assigned method """ + lines = [] + ret_lines = [] + + for i in range(len(files_dict)): + lines.append(files_dict[i][row].split("|")) + for col in range(len(lines[0])): + data_list = [] + for i in range(len(lines)): + data_list.append(lines[i][col].strip()) + ret_lines.append(func(data_list)) + return "|".join(ret_lines) + + def _process(self, files_dict, func): + """ Process dicts of sample files with assigned method """ + ret_lines = [] + if len(files_dict) == 1: + return files_dict[0], files_dict[0] + for i in range(len(files_dict[0])): + is_diff = False + for j in range(len(files_dict)): + if files_dict[0][i] != files_dict[j][i]: + is_diff = True + if is_diff: + line = self._process_lines(files_dict, i, func) + ret_lines.append(line) + else: + ret_lines.append(files_dict[0][i].strip()) + return ret_lines + + +def display(lists, rate, f, summary="Augment Rate", prefix="% ", ignore_col=1): + """ + Display lists data to standard format + + param lists: row data lists + param rate: argument rate list + param f: result output file + param summary: compare result summary + param prefix: output prefix in rate lines + param ignore_col: do not display some columns + """ + def format(list, str, ignore_col=0): + """ Format the string width of list member """ + str = str.split("|") + for l in range(len(list)): + line = list[l].split("|") + for col in range(len(line)): + line[col] = line[col].rjust(len(str[col]), ' ') + if not re.findall("[a-zA-Z]", line[col]) and col < ignore_col: + line[col] = " " * len(str[col]) + list[l] = "|".join(line) + return list + + for l in range(len(lists[0])): + if not re.findall("[a-zA-Z]", lists[0][l]): + break + tee("\n== %s " % summary + "="*(len(lists[0][l-1]) - len(summary) + 3) , f) + for n in range(len(lists)): + lists[n] = format(lists[n], lists[n][l-1]) + rate = format(rate, rate[l-1], ignore_col) + for i in range(len(lists[0])): + for n in range(len(lists)): + is_diff = False + for j in range(len(lists)): + if lists[0][i] != lists[j][i]: + is_diff = True + if is_diff or n==0: + tee(' ' * len(prefix) + lists[n][i], f) + if lists[0][i] != rate[i] and not re.findall("[a-zA-Z]", rate[i]): + tee(prefix + rate[i], f) + + +def analyze(sample_list1, sample_list2, log_file="./netperf-result.txt"): + """ Compute averages of two lists of files, compare and display results """ + + commands.getoutput("rm -f %s" % log_file) + tee(time.ctime(time.time()), log_file) + s1 = samples(sample_list1.split()) + avg1 = s1.getAvg() + sd1 = s1.getSD() + s2 = samples(sample_list2.split()) + avg2 = s2.getAvg() + sd2 = s2.getSD() + sd1 = s1.getSDPercent([avg1, sd1]) + sd2 = s1.getSDPercent([avg2, sd2]) + display([avg1], sd1, log_file, summary="Avg1 SD Augment Rate", + prefix="%SD ") + display([avg2], sd2, log_file, summary="Avg2 SD Augment Rate", + prefix="%SD ") + avgs_rate = s1.getAvgPercent([avg1, avg2]) + display([avg1, avg2], avgs_rate, log_file, summary="AvgS Augment Rate", + prefix="% ") + + +if __name__ == "__main__": + if len(sys.argv) < 3: + print 'Usage: python %s "$results list1" "$results list2" $log_file'\ + % sys.argv[0] + sys.exit(1) + analyze(sys.argv[1], sys.argv[2], sys.argv[3]) diff --git a/client/virt/tests/regression.py b/client/virt/tests/regression.py new file mode 100644 index 0000000..4809554 --- /dev/null +++ b/client/virt/tests/regression.py @@ -0,0 +1,34 @@ +import ConfigParser, sys, commands, os +import analyzer + +def compare(testname, olddir, curdir, config_file='perf.conf', output_dir=""): + config = ConfigParser.ConfigParser() + config.read(config_file) + + result_nfs = config.get("server", "result_nfs") + result_mntdir = config.get("server", "result_mntdir") + result_dir = config.get(testname, "result_dir") + result_file_pattern = config.get(testname, "result_file_pattern") + + def search_files(dir): + cmd = 'find %s|grep %s|grep "%s/%s"' % (dir, + testname, result_dir, result_file_pattern) + return commands.getoutput(cmd) + + if not os.path.isdir(result_mntdir): + os.mkdir(result_mntdir) + commands.getoutput("mount %s %s" % (result_nfs, result_mntdir)) + + if not os.path.isabs(olddir): + olddir = result_mntdir + olddir + oldlist = search_files(olddir) + newlist = search_files(curdir) + if oldlist != "" or newlist != "": + analyzer.analyze(oldlist, newlist, output_dir) + + +if __name__ == "__main__": + if len(sys.argv) != 5: + print 'Usage: python %s $testname $dir1 $dir2 $configfile' % sys.argv[0] + sys.exit(1) + compare(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) ^ permalink raw reply related [flat|nested] 14+ messages in thread
* Re: [Autotest PATCH v2 0/4] Network performance regression 2012-01-05 3:05 ` [Autotest PATCH v2 " Amos Kong ` (3 preceding siblings ...) 2012-01-05 3:06 ` [Autotest PATCH v2 4/4] virt: Introduce regression testing infrastructure Amos Kong @ 2012-01-06 20:17 ` Lucas Meneghel Rodrigues 4 siblings, 0 replies; 14+ messages in thread From: Lucas Meneghel Rodrigues @ 2012-01-06 20:17 UTC (permalink / raw) To: Amos Kong; +Cc: autotest, kvm On 01/05/2012 01:05 AM, Amos Kong wrote: > This patchset adds a new network perf testcase for Windows, > refactors old netperf test, and support numa resource control. > Process the raw results to a 'standard format' at the end of test, > then we can analyze them with general module, compute average > and compare with old results. > User can configure test time/repeat times for getting stable results. > > Welcome to give feedback, thanks in advance! I've made a first review of the series, with comments on your pull request: https://github.com/autotest/autotest/pull/126 Let me know what you think about my findings. Cheers, Lucas > Changes from v1: > - refactor analysis module > - add new features in analysis code > - shape those two tests > - fix some script bugs > - add autoio script for ntttcp test > > --- > > Amos Kong (4): > virt-test: add NTttcp subtests > virt-test: Refactor netperf test and add analysis module > netperf: pin guest vcpus/memory/vhost thread to numa node > virt: Introduce regression testing infrastructure > > > client/tests/kvm/control | 7 + > client/tests/kvm/perf.conf | 23 +++ > client/virt/scripts/ntttcp.au3 | 41 +++++ > client/virt/subtests.cfg.sample | 59 ++++++- > client/virt/tests/analyzer.py | 172 ++++++++++++++++++++++ > client/virt/tests/netperf.py | 312 ++++++++++++++++++++++++++++----------- > client/virt/tests/ntttcp.py | 183 +++++++++++++++++++++++ > client/virt/tests/regression.py | 34 ++++ > 8 files changed, 733 insertions(+), 98 deletions(-) > create mode 100644 client/tests/kvm/perf.conf > create mode 100755 client/virt/scripts/ntttcp.au3 > create mode 100644 client/virt/tests/analyzer.py > create mode 100644 client/virt/tests/ntttcp.py > create mode 100644 client/virt/tests/regression.py > ^ permalink raw reply [flat|nested] 14+ messages in thread
end of thread, other threads:[~2012-01-06 20:16 UTC | newest] Thread overview: 14+ messages (download: mbox.gz follow: Atom feed -- links below jump to the message on this page -- 2011-12-23 10:28 [RFC PATCH 0/4] Network performance regression Amos Kong 2011-12-23 10:28 ` [RFC PATCH 1/4] virt-test: add NTttcp subtests Amos Kong 2011-12-23 10:28 ` [RFC PATCH 2/4] virt-test: Refactor netperf test and add analysis module Amos Kong 2011-12-23 10:28 ` [RFC PATCH 3/4] netperf: pin guest vcpus/memory/vhost thread to numa node Amos Kong 2011-12-23 10:28 ` [RFC PATCH 4/4] virt: Introduce regression testing infrastructure Amos Kong 2011-12-24 1:13 ` Yang Hamo Bai 2011-12-25 1:26 ` Amos Kong 2011-12-29 13:12 ` [RFC PATCH 0/4] Network performance regression Amos Kong 2012-01-05 3:05 ` [Autotest PATCH v2 " Amos Kong 2012-01-05 3:05 ` [Autotest PATCH v2 1/4] virt-test: add NTttcp subtests Amos Kong 2012-01-05 3:06 ` [Autotest PATCH v2 2/4] virt-test: Refactor netperf test and add analysis module Amos Kong 2012-01-05 3:06 ` [Autotest PATCH v2 3/4] netperf: pin guest vcpus/memory/vhost thread to numa node Amos Kong 2012-01-05 3:06 ` [Autotest PATCH v2 4/4] virt: Introduce regression testing infrastructure Amos Kong 2012-01-06 20:17 ` [Autotest PATCH v2 0/4] Network performance regression Lucas Meneghel Rodrigues
This is a public inbox, see mirroring instructions for how to clone and mirror all data and code used for this inbox; as well as URLs for NNTP newsgroup(s).