diff --git a/libvirt/tests/cfg/memory/memory_devices/virtio_mem_device_lifecycle.cfg b/libvirt/tests/cfg/memory/memory_devices/virtio_mem_device_lifecycle.cfg new file mode 100644 index 0000000000..cd62f1a874 --- /dev/null +++ b/libvirt/tests/cfg/memory/memory_devices/virtio_mem_device_lifecycle.cfg @@ -0,0 +1,70 @@ +- memory.devices.virtio_mem.lifecycle: + type = virtio_mem_device_lifecycle + no s390-virtio + start_vm = no + state_file = "/tmp/%s.save" + numa_mem_val = 1048576 + memory_val = 2097152 + current_mem_val = 2097152 + aarch64: + numa_mem_val = 4194304 + memory_val = 8388608 + current_mem_val = 8388608 + vm_attrs = {'max_mem_rt': 15428800, 'max_mem_rt_unit': 'KiB','memory_unit':"KiB", 'memory':${memory_val}, 'current_mem':${current_mem_val}, 'current_mem_unit':'KiB', 'vcpu': 4,'cpu': {'numa_cell': [{'id': '0', 'cpus': '0-1', 'memory': '${numa_mem_val}', 'unit': 'KiB'},{'id':'1','cpus': '2-3','memory':'${numa_mem_val}','unit':'KiB'}]}} + init_alias_name = virtiomem0 + plug_alias_name = virtiomem1 + required_kernel = [5.14.0,) + guest_required_kernel = [5.8.0,) + func_supported_since_libvirt_ver = (8, 0, 0) + func_supported_since_qemu_kvm_ver = (6, 2, 0) + variants kernel_pagesize: + - 4k: + only x86_64, aarch64 + page_size = 4 + default_hp_size = 2048 + - 64k: + only aarch64 + page_size = 64 + default_hp_size = 524288 + variants memory_source: + - no_source: + source_dict = {} + source_xpath = [] + - nodemask: + nodeset_num = 1 + source_dict = {'nodemask':'%s'} + source_xpath = [{'element_attrs':[".//source/nodemask"],'text':'%s'}] + - pagesize: + source_dict = {'pagesize':${page_size}, 'pagesize_unit':'KiB'} + source_xpath = [{'element_attrs':[".//source/pagesize[@unit='KiB']"],'text':'${page_size}'}] + - nodemask_pagesize: + nodeset_num = 2 + use_huge_page = "yes" + source_dict = {'nodemask':'%s', 'pagesize':${default_hp_size}, 'pagesize_unit':'KiB'} + source_xpath = [{'element_attrs':[".//source/nodemask"],'text':'%s'}, {'element_attrs':[".//source/pagesize[@unit='KiB']"],'text':'${default_hp_size}'}] + variants memory_target: + - all_requested_and_address: + init_size = 524288 + init_requested = ${init_size} + init_address = '0x240000000' + plug_size = 1048576 + plug_requested = ${plug_size} + plug_address = '0x300000000' + init_target_dict = {'size':${init_size}, 'size_unit':'KiB', 'node':0, 'requested_size':${init_requested}, 'requested_unit':'KiB', 'block_size':${default_hp_size}, 'address':{'attrs':{'base':'${init_address}'}}} + init_target_xpath = [{'element_attrs':[".//target/size[@unit='KiB']"],'text':'${init_size}'}, {'element_attrs':[".//target/requested[@unit='KiB']"],'text':'${init_requested}'}, {'element_attrs':[".//target/block[@unit='KiB']"],'text':'${default_hp_size}'}, {'element_attrs':[".//target/current[@unit='KiB']"],'text':'${init_requested}'}, {'element_attrs':[".//target/node"],'text':'0'}, {'element_attrs':[".//target/address[@base='${init_address}']"]}] + plug_target_dict = {'size':${plug_size}, 'size_unit':'KiB', 'node':1, 'requested_size':${plug_requested}, 'requested_unit':'KiB', 'block_size':${default_hp_size}, 'address':{'attrs':{'base':'${plug_address}'}}} + plug_target_xpath = [{'element_attrs':[".//target/size[@unit='KiB']"],'text':'${plug_size}'}, {'element_attrs':[".//target/requested[@unit='KiB']"],'text':'${plug_requested}'}, {'element_attrs':[".//target/block[@unit='KiB']"],'text':'${default_hp_size}'}, {'element_attrs':[".//target/current[@unit='KiB']"],'text':'${plug_requested}'}, {'element_attrs':[".//target/node"],'text':'1'}, {'element_attrs':[".//target/address[@base='${plug_address}']"]}] + - part_requested_and_no_address: + init_size = 1048576 + init_requested = 524288 + plug_size = 2097152 + plug_requested = 1048576 + init_target_dict = {'size':${init_size}, 'size_unit':'KiB', 'node':0, 'requested_size':${init_requested}, 'requested_unit':'KiB', 'block_size':${default_hp_size}} + init_target_xpath = [{'element_attrs':[".//target/size[@unit='KiB']"],'text':'${init_size}'}, {'element_attrs':[".//target/requested[@unit='KiB']"],'text':'${init_requested}'}, {'element_attrs':[".//target/block[@unit='KiB']"],'text':'${default_hp_size}'}, {'element_attrs':[".//target/current[@unit='KiB']"],'text':'${init_requested}'}, {'element_attrs':[".//target/node"],'text':'0'}, {'element_attrs':[".//target/address"]}] + plug_target_dict = {'size':${plug_size}, 'size_unit':'KiB', 'node':1, 'requested_size':${plug_requested}, 'requested_unit':'KiB', 'block_size':${default_hp_size}} + plug_target_xpath = [{'element_attrs':[".//target/size[@unit='KiB']"],'text':'${plug_size}'}, {'element_attrs':[".//target/requested[@unit='KiB']"],'text':'${plug_requested}'}, {'element_attrs':[".//target/block[@unit='KiB']"],'text':'${default_hp_size}'}, {'element_attrs':[".//target/current[@unit='KiB']"],'text':'${plug_requested}'}, {'element_attrs':[".//target/node"],'text':'1'}, {'element_attrs':[".//target/address"]}] + init_mem_device_dict = {'mem_model':'virtio-mem', 'source':${source_dict}, 'target':${init_target_dict}} + init_xpath_list = [${source_xpath}, ${init_target_xpath}] + plug_mem_device_dict = {'mem_model':'virtio-mem', 'source':${source_dict}, 'target':${plug_target_dict}} + plug_xpath_list = [${source_xpath}, ${plug_target_xpath}] + diff --git a/libvirt/tests/src/memory/memory_devices/virtio_mem_device_lifecycle.py b/libvirt/tests/src/memory/memory_devices/virtio_mem_device_lifecycle.py new file mode 100644 index 0000000000..388fac4d96 --- /dev/null +++ b/libvirt/tests/src/memory/memory_devices/virtio_mem_device_lifecycle.py @@ -0,0 +1,221 @@ +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# Copyright Red Hat +# +# SPDX-License-Identifier: GPL-2.0 +# +# Author: Liang Cong +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +import os + +from virttest import utils_libvirtd +from virttest import virsh + +from virttest.libvirt_xml import vm_xml +from virttest.libvirt_xml.devices import memory +from virttest import test_setup +from virttest import utils_misc +from virttest.utils_libvirt import libvirt_memory +from virttest.utils_libvirt import libvirt_vmxml + +from provider.memory import memory_base +from provider.numa import numa_base + + +def run(test, params, env): + """ + Verify various config of dimm memory device settings take effect + during the life cycle of guest vm. + """ + + def clean_empty_memory_device_config(mem_device_dict): + """ + Clean empty config of the memory device + + :param mem_device_dict (dict): memory device config dictionary + """ + for key in list(mem_device_dict.keys()): + if not mem_device_dict[key]: + del mem_device_dict[key] + + def check_virtio_mem_device_xml(xpath_dict): + """ + Check the virtio-mem memory device config by xpath + + :param xpath_dict (dict): xpath dict to check if the memory config is correct, + like {"alias_name":[xpath1, xpath2]},...} + """ + vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) + memory_devices = vmxml.devices.by_device_tag('memory') + target_memory_num = 0 + for alias_name, xpath_list in xpath_dict.items(): + for memory_device in memory_devices: + if alias_name == memory_device.alias.get('name'): + target_memory_num = target_memory_num + 1 + for xpath in xpath_list: + libvirt_vmxml.check_guest_xml_by_xpaths(memory_device, xpath) + if target_memory_num != len(xpath_dict): + test.fail('Expected %d virtio-mem mem devices with required alias name, but found %d' + % (len(xpath_dict), target_memory_num)) + + def check_current_mem_size(alias_name, expect_current_size): + """ + Check if virtio-mem memory with alias_name has expected current memory size + + :param alias_name (str): alias name of the virtio-mem device + :param expect_current_size (int): expected current memory size of the virtio-mem device + + :return: bool, true if virtio-mem memory with alias_name has expected current memory size + """ + vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) + memory_devices = vmxml.devices.by_device_tag('memory') + for memory_device in memory_devices: + if alias_name == memory_device.alias.get('name') and memory_device.target.current_size == expect_current_size: + return True + return False + + def check_case_availability(): + """ + Check whether the case is available + """ + memory_base.check_mem_page_sizes(test, page_size, default_hp_size) + memory_base.check_supported_version(params, test, vm) + + def setup_test(): + """ + Setup for the case: + 1. Get host available numa nodes + 2. Change parameters according to available numa nodes + 3. Allocate huge page memory for target host node if needs + """ + if nodeset_num: + numatest_obj = numa_base.NumaTest(vm, params, test) + if 1 == nodeset_num: + min_memory_size = init_size + plug_size + elif 2 == nodeset_num: + min_memory_size = init_size if init_size >= plug_size else plug_size + numatest_obj.check_numa_nodes_availability(nodeset_num, min_memory_size) + numa_list = numatest_obj.get_available_numa_nodes(min_memory_size)[:nodeset_num] + nodeset_str = numa_base.convert_to_string_with_dash( + ','.join([str(node) for node in numa_list])) + source_dict = params.get("source_dict") + source_xpath = params.get("source_xpath") + source_dict = eval(source_dict % nodeset_str) + source_xpath = eval(source_xpath % nodeset_str) + init_mem_device_dict["source"] = source_dict + plug_mem_device_dict["source"] = source_dict + init_xpath_list[0] = source_xpath + plug_xpath_list[0] = source_xpath + test.log.debug("Selected numa nodeset is:%s", nodeset_str) + + if use_huge_page: + params["target_nodes"] = " ".join([str(node) for node in numa_list]) + params["target_hugepages"] = (init_size + plug_size) / default_hp_size + hpc = test_setup.HugePageConfig(params) + hpc.setup() + + if vm.is_alive(): + vm.destroy() + + def run_test(): + """ + Test steps + """ + test.log.info("TEST_STEP1: Define the guest") + clean_empty_memory_device_config(init_mem_device_dict) + memory_base.define_guest_with_memory_device(params, init_mem_device_dict, vm_attrs) + + test.log.info("TEST_STEP2: Start the guest") + vm.start() + vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) + test.log.debug("Guest config xml after start is:\n%s", vmxml) + + test.log.info("TEST_STEP3: Wait for the guest os is boot up," + "Check virtio-mem memory device config by virsh dumpxml") + vm.wait_for_login().close() + check_virtio_mem_device_xml({init_alias_name: init_xpath_list}) + + test.log.info("TEST_STEP4: Hotplug a virtio-mem memory device") + clean_empty_memory_device_config(plug_mem_device_dict) + mem_device = memory.Memory() + mem_device.setup_attrs(**plug_mem_device_dict) + virsh.attach_device(vm_name, mem_device.xml, debug=True, ignore_status=False) + if not utils_misc.wait_for( + lambda: check_current_mem_size(plug_alias_name, plug_requested), 20): + test.fail('Hot-plugged virtio-mem mem devices with alias name %s should have ' + 'current memory size %d' % (plug_alias_name, plug_requested)) + + test.log.info("TEST_STEP5: Consume the guest memory") + session = vm.wait_for_login() + libvirt_memory.consume_vm_freememory(session) + + test.log.info( + "TEST_STEP6: Check virtio-mem memory device config by virsh dumpxml") + check_virtio_mem_device_xml({init_alias_name: init_xpath_list, plug_alias_name: plug_xpath_list}) + + test.log.info("TEST_STEP7: Life cycle test") + virsh.suspend(vm_name, ignore_status=False, debug=True) + virsh.resume(vm_name, ignore_status=False, debug=True) + check_virtio_mem_device_xml({init_alias_name: init_xpath_list, plug_alias_name: plug_xpath_list}) + + virsh.save(vm_name, state_file, ignore_status=False, debug=True) + virsh.restore(state_file, ignore_status=False, debug=True) + check_virtio_mem_device_xml({init_alias_name: init_xpath_list, plug_alias_name: plug_xpath_list}) + + virsh.managedsave(vm_name, ignore_status=False, debug=True) + vm.start() + check_virtio_mem_device_xml({init_alias_name: init_xpath_list, plug_alias_name: plug_xpath_list}) + + vm.reboot() + vm.wait_for_login().close() + check_virtio_mem_device_xml({init_alias_name: init_xpath_list, plug_alias_name: plug_xpath_list}) + + utils_libvirtd.libvirtd_restart() + check_virtio_mem_device_xml({init_alias_name: init_xpath_list, plug_alias_name: plug_xpath_list}) + + def teardown_test(): + """ + 1. Restore guest config xml + 2. Clean huge page memory + 3. Remove state file + """ + bkxml.sync() + if use_huge_page: + hpc = test_setup.HugePageConfig(params) + hpc.cleanup() + if os.path.exists(state_file): + os.remove(state_file) + + vm_name = params.get("main_vm") + vm = env.get_vm(vm_name) + vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) + bkxml = vmxml.copy() + + vm_attrs = eval(params.get("vm_attrs", "{}")) + init_mem_device_dict = eval( + params.get("init_mem_device_dict", "{}")) + plug_mem_device_dict = eval( + params.get("plug_mem_device_dict", "{}")) + init_xpath_list = eval(params.get("init_xpath_list")) + plug_xpath_list = eval(params.get("plug_xpath_list")) + init_alias_name = params.get("init_alias_name") + plug_alias_name = params.get("plug_alias_name") + plug_requested = int(params.get("plug_requested")) + page_size = int(params.get("page_size")) + default_hp_size = int(params.get("default_hp_size")) + init_size = int(params.get("init_size")) + plug_size = int(params.get("plug_size")) + nodeset_num = int(params.get("nodeset_num", "0")) + use_huge_page = "yes" == params.get("use_huge_page") + state_file = params.get('state_file', '/tmp/%s.save') % vm_name + + try: + check_case_availability() + setup_test() + run_test() + + finally: + teardown_test()