* [PATCH 1/3] virt: Add Transparent Hugepages setup v2
2011-06-16 0:18 [PATCH 0/3] Transparent Hugepages test v2 Lucas Meneghel Rodrigues
@ 2011-06-16 0:18 ` Lucas Meneghel Rodrigues
2011-06-16 15:56 ` Andrea Arcangeli
2011-06-16 0:18 ` [PATCH 2/3] KVM test: Add Transparent Hugepages subtests v2 Lucas Meneghel Rodrigues
2011-06-16 0:18 ` [PATCH 3/3] Add THP test variants to tests_base.cfg.sample v2 Lucas Meneghel Rodrigues
2 siblings, 1 reply; 7+ messages in thread
From: Lucas Meneghel Rodrigues @ 2011-06-16 0:18 UTC (permalink / raw)
To: autotest; +Cc: kvm
This class configures khugepaged to active mode, with
functions to restore original guest configuration.
Changes from v1:
* Rather than a pre/post script, config is now part of
the framework
* No need to store configuration in files anymore to restore
host khugepaged original behavior
Signed-off-by: Yiqiao Pu <ypu@redhat.com
Signed-off-by: Lucas Meneghel Rodrigues <lmr@redhat.com>
---
client/virt/virt_test_setup.py | 143 +++++++++++++++++++++++++++++++++++++++-
1 files changed, 142 insertions(+), 1 deletions(-)
diff --git a/client/virt/virt_test_setup.py b/client/virt/virt_test_setup.py
index 3e1f5b5..792ffe6 100644
--- a/client/virt/virt_test_setup.py
+++ b/client/virt/virt_test_setup.py
@@ -1,11 +1,152 @@
"""
Library to perform pre/post test setup for KVM autotest.
"""
-import os, logging
+import os, logging, re, sre
from autotest_lib.client.common_lib import error
from autotest_lib.client.bin import utils
+class THPError(Exception):
+ """
+ Base exception for Transparent Hugepage setup.
+ """
+ pass
+
+
+class THPNotSupportedError(THPError):
+ """
+ Thrown when host does not support tansparent hugepages.
+ """
+ pass
+
+
+class THPWriteConfigError(THPError):
+ """
+ Thrown when host does not support tansparent hugepages.
+ """
+ pass
+
+
+class THPKhugepagedError(THPError):
+ """
+ Thrown when khugepaged is not behaving as expected.
+ """
+ pass
+
+
+class TransparentHugePageConfig(object):
+ def __init__(self, test, params):
+ """
+ Find paths for transparent hugepages and kugepaged configuration. Also,
+ back up original host configuration so it can be restored during
+ cleanup.
+ """
+ self.params = params
+
+ RH_THP_PATH = "/sys/kernel/mm/redhat_transparent_hugepage"
+ UPSTREAM_THP_PATH = "/sys/kernel/mm/transparent_hugepage"
+ if os.path.isdir(RH_THP_PATH):
+ self.thp_path = RH_THP_PATH
+ elif os.path.isdir(UPSTREAM_THP_PATH):
+ self.thp_path = UPSTREAM_THP_PATH
+ else:
+ raise THPNotSupportedError("System doesn't support transparent "
+ "hugepages")
+
+ # test_cfg holds all the desired host config values we want to set
+ # before THP tests
+ test_cfg={"%s/defrag" % self.thp_path: "yes",
+ "%s/enabled" % self.thp_path: "always",
+ "%s/khugepaged/defrag" % self.thp_path: "yes",
+ "%s/khugepaged/scan_sleep_millisecs" % self.thp_path: "100",
+ "%s/khugepaged/pages_to_scan" % self.thp_path: "4096",
+ "%s/khugepaged/alloc_sleep_millisecs" % self.thp_path: "100",
+ "/sys/kernel/mm/ksm/run": "1",
+ "/proc/sys/vm/nr_hugepages":"0"}
+ if os.path.isfile("%s/khugepaged/enabled" % self.thp_path):
+ test_cfg["%s/khugepaged/enabled" % self.thp_path] = "always"
+ if os.path.isfile("%s/khugepaged/max_ptes_none" % self.thp_path):
+ test_cfg["%s/khugepaged/max_ptes_none" % self.thp_path] = "511"
+ test_cfg["%s/defrag" % self.thp_path] = "always"
+
+ tmp_list = []
+ test_config = self.params.get("test_config", None)
+ if test_config is not None:
+ tmp_list = re.split(';', test_config)
+ while len(tmp_list) > 0:
+ tmp_cfg = tmp_list.pop()
+ test_cfg[re.split(":", tmp_cfg)[0]] = sre.split(":", tmp_cfg)[1]
+
+ self.original_config = {}
+ # Save host current config, so we can restore it during cleanup
+ for path in test_cfg:
+ param = open(path, 'r').read()
+ if ("enabled" in param) or ("defrag" in param):
+ param = re.split("\[|\]", param)[1] + '\n'
+ self.original_config[path] = param
+
+ self.test_config = test_cfg
+
+
+ def set_env(self):
+ """
+ Applies test configuration on the host.
+ """
+ if self.test_config:
+ for path in self.test_config.keys():
+ file(path, 'w').write(self.test_config[path])
+
+
+ def set_params(self, path_dict={}, filename="", value=""):
+ """
+ Sets the value of a config file for a given path.
+
+ @param path_dict: Dict of files' paths {path : value}
+ @param filename: Name of file to setup
+ @param value: Value set to the configuration files
+ """
+ for path in path_dict.keys():
+ if path in filename:
+ try:
+ file(path, "w").write(value)
+ except IOError, e:
+ raise THPWriteConfigError("Can not set %s to %s: %s" %
+ (value, filename, e))
+
+
+ def khugepaged_test(self):
+ """
+ Start, stop and frequency change test for khugepaged.
+ """
+ status_list = ["never", "always", "never"]
+ for status in status_list:
+ self.set_params(self.test_config, "enabled", status)
+ try:
+ utils.run('pgrep khugepaged')
+ except error.CmdError:
+ raise THPKhugepagedError("khugepaged can not be set to "
+ "status %s" % status)
+
+
+ def setup(self):
+ """
+ Configure host for testing. Also, check that khugepaged is working as
+ expected.
+ """
+ self.set_env()
+ self.khugepaged_test()
+
+
+ def cleanup(self):
+ """:
+ Restore the host's original configuration after test
+ """
+ for path in self.original_config:
+ p_file = open(path, 'w')
+ p_file.write(self.original_config[path])
+ p_file.close()
+
+
class HugePageConfig(object):
def __init__(self, params):
"""
--
1.7.5.4
^ permalink raw reply related [flat|nested] 7+ messages in thread* [PATCH 2/3] KVM test: Add Transparent Hugepages subtests v2
2011-06-16 0:18 [PATCH 0/3] Transparent Hugepages test v2 Lucas Meneghel Rodrigues
2011-06-16 0:18 ` [PATCH 1/3] virt: Add Transparent Hugepages setup v2 Lucas Meneghel Rodrigues
@ 2011-06-16 0:18 ` Lucas Meneghel Rodrigues
2011-06-16 0:18 ` [PATCH 3/3] Add THP test variants to tests_base.cfg.sample v2 Lucas Meneghel Rodrigues
2 siblings, 0 replies; 7+ messages in thread
From: Lucas Meneghel Rodrigues @ 2011-06-16 0:18 UTC (permalink / raw)
To: autotest; +Cc: kvm
Transparent hugepage test includes:
1) Smoke test and stress test
Smoking test is test the transparent hugepage is used by kvm and guest.
Stress test test use a parallel dd to test the stability of transparent
hugepages
2) Swap test
Bootup a vm and verify that it can be swapped out and swapped in
correctly
3) Defrag test
Allocate hugepage for libhugetlbfs while defrag is on and off. Then
compare the results
Changes from v1:
* Different paths to mount debugfs and tmpfs on
* Use of autotest API to execute commands
* Use more current guest virt API to execute commands
Signed-off-by: Yiqiao Pu <ypu@redhat.com>
Signed-off-by: Lucas Meneghel Rodrigues <lmr@redhat.com>
---
client/tests/kvm/tests/trans_hugepage.py | 101 +++++++++++++++++++
client/tests/kvm/tests/trans_hugepage_defrag.py | 85 ++++++++++++++++
client/tests/kvm/tests/trans_hugepage_swapping.py | 109 +++++++++++++++++++++
3 files changed, 295 insertions(+), 0 deletions(-)
create mode 100644 client/tests/kvm/tests/trans_hugepage.py
create mode 100644 client/tests/kvm/tests/trans_hugepage_defrag.py
create mode 100644 client/tests/kvm/tests/trans_hugepage_swapping.py
diff --git a/client/tests/kvm/tests/trans_hugepage.py b/client/tests/kvm/tests/trans_hugepage.py
new file mode 100644
index 0000000..cbda16c
--- /dev/null
+++ b/client/tests/kvm/tests/trans_hugepage.py
@@ -0,0 +1,101 @@
+import logging, time, commands, os, string, re
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.common_lib import utils
+from autotest_lib.client.virt import virt_test_utils, aexpect
+
+
+@error.context_aware
+def run_trans_hugepage(test, params, env):
+ """
+ KVM kernel hugepages user side test:
+ 1) Smoke test
+ 2) Stress test
+
+ @param test: KVM test object.
+ @param params: Dictionary with test parameters.
+ @param env: Dictionary with the test environment.
+ """
+ def get_mem_status(params, type):
+ if type == "host":
+ info = utils.system_output("cat /proc/meminfo")
+ else:
+ info = session.cmd("cat /proc/meminfo")
+ for h in re.split("\n+", info):
+ if h.startswith("%s" % params):
+ output = re.split('\s+', h)[1]
+ return output
+
+
+ # Check khugepage is used by guest
+ dd_timeout = float(params.get("dd_timeout", 900))
+ fail = 0
+ nr_ah = []
+ mem = params['mem']
+
+ debugfs_flag = 1
+ debugfs_path = os.path.join(test.tmpdir, 'debugfs')
+ mem_path = os.path.join("/tmp", 'thp_space')
+
+ error.context("smoke test setup")
+ if not os.path.ismount(debugfs_path):
+ if not os.path.isdir(debugfs_path):
+ os.makedirs(debugfs_path)
+ utils.run("mount -t debugfs none %s" % debugfs_path)
+
+ logging.info("Smoke test start")
+ error.context("smoke test")
+ login_timeout = float(params.get("login_timeout", "3600"))
+ vm = virt_test_utils.get_living_vm(env, params.get("main_vm"))
+ session = virt_test_utils.wait_for_login(vm, timeout=login_timeout)
+
+ nr_ah_before = get_mem_status('AnonHugePages', 'host')
+ if nr_ah_before <= 0:
+ raise error.TestFail("VM is not using transparent hugepages")
+
+ # Protect system from oom killer
+ if int(get_mem_status('MemFree', 'guest')) / 1024 < mem :
+ mem = int(get_mem_status('MemFree', 'guest')) / 1024
+
+ session.cmd("mkdir -p %s" % mem_path)
+
+ session.cmd("mount -t tmpfs -o size=%sM none %s" % (str(mem), mem_path))
+
+ count = mem / 4
+ session.cmd("dd if=/dev/zero of=%s/1 bs=4000000 count=%s" %
+ (mem_path, count), timeout=dd_timeout)
+
+ nr_ah_after = get_mem_status('AnonHugePages', 'host')
+
+ if nr_ah_after <= nr_ah_before:
+ logging.warning("VM did not use Transparent Hugepages during dd")
+
+ if debugfs_flag == 1:
+ if int(open('%s/kvm/largepages' % debugfs_path, 'r').read()) <= 0:
+ raise error.TestFail("KVM did not use Transparent Hugepages")
+
+ logging.info("Smoke test finished")
+
+ # Use parallel dd as stress for memory
+ count = count / 3
+ logging.info("Stress test start")
+ error.context("stress test")
+ output = session.cmd("for i in `seq %s`; do dd if=/dev/zero of=%s/$i "
+ "bs=4000000 count=1& done" % (count, mem_path),
+ timeout=dd_timeout)
+
+ if len(re.findall("No space", output)) > count * 0.05:
+ raise error.TestFail("Too many dd instances failed in guest")
+
+ try:
+ output = session.cmd('pidof dd')
+ except Exception:
+ output = None
+
+ if output is not None:
+ for i in re.split('\n+', o):
+ session.cmd('kill -9 %s' % i)
+
+ session.cmd("umount %s" % mem_path)
+ logging.info("Stress test finished")
+
+ session.close()
diff --git a/client/tests/kvm/tests/trans_hugepage_defrag.py b/client/tests/kvm/tests/trans_hugepage_defrag.py
new file mode 100644
index 0000000..ddf8f7b
--- /dev/null
+++ b/client/tests/kvm/tests/trans_hugepage_defrag.py
@@ -0,0 +1,85 @@
+import logging, time, commands, os, string, re
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.bin import utils
+from autotest_lib.client.virt import virt_test_utils, virt_test_setup
+
+
+@error.context_aware
+def run_trans_hugepage_defrag(test, params, env):
+ """
+ KVM khugepage userspace side test:
+ 1) Verify that the host supports kernel hugepages.
+ If it does proceed with the test.
+ 2) Verify that the kernel hugepages can be used in host.
+ 3) Verify that the kernel hugepages can be used in guest.
+ 4) Migrate guest while using hugepages.
+
+ @param test: KVM test object.
+ @param params: Dictionary with test parameters.
+ @param env: Dictionary with the test environment.
+ """
+ def get_mem_status(params):
+ for line in file('/proc/meminfo', 'r').readlines():
+ if line.startswith("%s" % params):
+ output = re.split('\s+', line)[1]
+ return output
+
+
+ def set_libhugetlbfs(number):
+ f = file("/proc/sys/vm/nr_hugepages", "w+")
+ f.write(number)
+ f.seek(0)
+ ret = f.read()
+ return int(ret)
+
+
+ test_config = virt_test_setup.TransparentHugePageConfig(test, params)
+ test_config.setup()
+ # Test the defrag
+ logging.info("Defrag test start")
+ login_timeout = float(params.get("login_timeout", 360))
+ vm = virt_test_utils.get_living_vm(env, params.get("main_vm"))
+ session = virt_test_utils.wait_for_login(vm, timeout=login_timeout)
+ mem_path = os.path.join("/tmp", "thp_space")
+
+ error.context("Fragmenting guest memory")
+ try:
+ if not os.path.isdir(mem_path):
+ os.makedirs(mem_path)
+ if os.system("mount -t tmpfs none %s" % mem_path):
+ raise error.TestError("Can not mount tmpfs")
+
+ # Try to fragment the memory a bit
+ cmd = ("for i in `seq 262144`; do dd if=/dev/urandom of=%s/$i "
+ "bs=4K count=1 & done" % mem_path)
+ utils.run(cmd)
+ finally:
+ utils.run("umount %s" % mem_path)
+
+ total = int(get_mem_status('MemTotal'))
+ hugepagesize = int(get_mem_status('Hugepagesize'))
+ nr_full = str(total / hugepagesize)
+
+ error.context("activating khugepaged defrag functionality")
+ # Allocate hugepages for libhugetlbfs before and after enable defrag,
+ # and check out the difference.
+ nr_hp_before = set_libhugetlbfs(nr_full)
+ try:
+ defrag_path = os.path.join(test_config.thp_path, 'khugepaged', 'defrag')
+ file(str(defrag_path), 'w').write('yes')
+ except IOError, e:
+ raise error.TestFail("Can not start defrag on khugepaged: %s" % e)
+ # TODO: Is sitting an arbitrary amount of time appropriate? Aren't there
+ # better ways to do this?
+ time.sleep(1)
+ nr_hp_after = set_libhugetlbfs(nr_full)
+
+ if nr_hp_before >= nr_hp_after:
+ raise error.TestFail("There was no memory defragmentation on host: "
+ "%s huge pages allocated before turning "
+ "khugepaged defrag on, %s allocated after it" %
+ (nr_hp_before, nr_hp_after))
+
+ session.close()
+ logging.info("Defrag test succeeded")
+ test_config.cleanup()
diff --git a/client/tests/kvm/tests/trans_hugepage_swapping.py b/client/tests/kvm/tests/trans_hugepage_swapping.py
new file mode 100644
index 0000000..63f1560
--- /dev/null
+++ b/client/tests/kvm/tests/trans_hugepage_swapping.py
@@ -0,0 +1,109 @@
+import logging, time, commands, os, string, re
+from autotest_lib.client.common_lib import error
+from autotest_lib.client.bin import utils
+from autotest_lib.client.virt import virt_utils, virt_test_utils
+from autotest_lib.client.virt import virt_test_setup, virt_env_process
+
+
+@error.context_aware
+def run_trans_hugepage_swapping(test, params, env):
+ """
+ KVM khugepage user side test:
+ 1) Verify that the hugepages can be swapped in/out.
+
+ @param test: KVM test object.
+ @param params: Dictionary with test parameters.
+ @param env: Dictionary with the test environment.
+ """
+ def get_args(args_list):
+ """
+ Get the memory arguments from system
+ """
+ args_list_tmp = args_list.copy()
+ for line in file('/proc/meminfo', 'r').readlines():
+ for key in args_list_tmp.keys():
+ if line.startswith("%s" % args_list_tmp[key]):
+ args_list_tmp[key] = int(re.split('\s+', line)[1])
+ return args_list_tmp
+
+ test_config = virt_test_setup.TransparentHugePageConfig(test, params)
+ test_config.setup()
+ # Swapping test
+ logging.info("Swapping test start")
+ # Parameters of memory information
+ # @total: Memory size
+ # @free: Free memory size
+ # @swap_size: Swap size
+ # @swap_free: Free swap size
+ # @hugepage_size: Page size of one hugepage
+ # @page_size: The biggest page size that app can ask for
+ args_dict_check = {"free" : "MemFree", "swap_size" : "SwapTotal",
+ "swap_free" : "SwapFree", "total" : "MemTotal",
+ "hugepage_size" : "Hugepagesize",}
+ args_dict = get_args(args_dict_check)
+ swap_free = []
+ total = int(args_dict['total']) / 1024
+ free = int(args_dict['free']) / 1024
+ swap_size = int(args_dict['swap_size']) / 1024
+ swap_free.append(int(args_dict['swap_free'])/1024)
+ hugepage_size = int(args_dict['hugepage_size']) / 1024
+ dd_timeout = float(params.get("dd_timeout", 900))
+ login_timeout = float(params.get("login_timeout", 360))
+ check_cmd_timeout = float(params.get("check_cmd_timeout", 900))
+ mem_path = os.path.join(test.tmpdir, 'thp_space')
+
+ # If swap is enough fill all memory with dd
+ if swap_free > (total - free):
+ count = total / hugepage_size
+ tmpfs_size = total
+ else:
+ count = free / hugepage_size
+ tmpfs_size = free
+
+ if swap_size <= 0:
+ raise logging.info("Host does not have swap enabled")
+ session = None
+ try:
+ if not os.path.isdir(mem_path):
+ os.makedirs(mem_path)
+ utils.run("mount -t tmpfs -o size=%sM none %s" % tmpfs_path)
+
+ # Set the memory size of vm
+ # To ignore the oom killer set it to the free swap size
+ vm = virt_test_utils.get_living_vm(env, params.get("main_vm"))
+ if int(params['mem']) > swap_free[0]:
+ vm.destroy()
+ vm_name = 'vmsw'
+ vm0 = params.get("main_vm")
+ vm0_key = virt_utils.env_get_vm(env, vm0)
+ params['vms'] = params['vms'] + " " + vm_name
+ params['mem'] = str(swap_free[0])
+ vm_key = vm0_key.clone(vm0, params)
+ virt_utils.env_register_vm(env, vm_name, vm_key)
+ virt_env_process.preprocess_vm(test, params, env, vm_name)
+ vm_key.create()
+ session = virt_utils.wait_for(vm_key.remote_login,
+ timeout=login_timeout)
+ else:
+ session = virt_test_utils.wait_for_login(vm, timeout=login_timeout)
+
+ error.context("making guest to swap memory")
+ cmd = ("dd if=/dev/zero of=%s/zero bs=%s000000 count=%s" %
+ (mem_path, hugepage_size, count))
+ utils.run(cmd)
+
+ args_dict = get_args(args_dict_check)
+ swap_free.append(int(args_dict['swap_free'])/1024)
+
+ if swap_free[1] - swap_free[0] >= 0:
+ raise error.TestFail("No data was swapped to memory")
+
+ # Try harder to make guest memory to be swapped
+ session.cmd("find / -name \"*\"", timeout=check_cmd_timeout)
+ finally:
+ if session is not None:
+ session.cmd("umount %s" % mem_path)
+
+ logging.info("Swapping test succeed")
+ session.close()
+ test_config.cleanup()
--
1.7.5.4
^ permalink raw reply related [flat|nested] 7+ messages in thread