From 450f2047d6eae08322aaccf37a60d12d298e1523 Mon Sep 17 00:00:00 2001 From: nanli Date: Sun, 19 May 2024 21:14:26 +0800 Subject: [PATCH] add case for dimm memory hotplug xxxx-300402 Dimm memory device hot-plug Signed-off-by: nanli --- .../memory_devices/dimm_memory_hotplug.cfg | 68 +++++ .../memory_devices/dimm_memory_hotplug.py | 245 ++++++++++++++++++ provider/memory/memory_base.py | 78 +++++- 3 files changed, 390 insertions(+), 1 deletion(-) create mode 100644 libvirt/tests/cfg/memory/memory_devices/dimm_memory_hotplug.cfg create mode 100644 libvirt/tests/src/memory/memory_devices/dimm_memory_hotplug.py diff --git a/libvirt/tests/cfg/memory/memory_devices/dimm_memory_hotplug.cfg b/libvirt/tests/cfg/memory/memory_devices/dimm_memory_hotplug.cfg new file mode 100644 index 0000000000..e6691180e6 --- /dev/null +++ b/libvirt/tests/cfg/memory/memory_devices/dimm_memory_hotplug.cfg @@ -0,0 +1,68 @@ +- memory.devices.dimm.hotplug: + no s390-virtio + type = dimm_memory_hotplug + start_vm = yes + mem_model = 'dimm' + allocate_size = "1572864" + allocate_memory = "${allocate_size}KiB" + target_size = "524288" + size_unit = 'KiB' + slot = '0' + node = 0 + plug_node = 0 + plug_slot = 1 + mem_value = 2097152 + current_mem = 2097152 + numa_mem = 1048576 + max_mem = 4194304 + max_mem_slots = 16 + plug_event = "device-added" + audit_cmd = "ausearch --start today -m VIRT_RESOURCE | grep 'mem'" + ausearch_check = 'old-mem=%d new-mem=%d' + expected_log = "ACPI_DEVICE_OST|device_add" + kernel_hp_file = '/sys/devices/system/node/node0/hugepages/hugepages-%skB/nr_hugepages' + max_dict = '"max_mem_rt": ${max_mem}, "max_mem_rt_slots": ${max_mem_slots}, "max_mem_rt_unit": "KiB"' + numa_dict = "'vcpu': 4,'cpu':{'numa_cell': [{'id': '0', 'cpus': '0-1', 'memory': '${numa_mem}'}, {'id': '1', 'cpus': '2-3', 'memory': '${numa_mem}'}]}" + vm_attrs = {${numa_dict},${max_dict},'memory_unit':'KiB','memory':${mem_value},'current_mem':${current_mem},'current_mem_unit':'KiB'} + variants plug_dimm_type: + - target_and_address: + plug_node = 1 + plug_target_size = '1048576' + addr_dict = {'attrs': {'type':'dimm','slot':'${slot}'}} + plug_addr = {'attrs': {'type':'dimm','slot':'${plug_slot}'}} + plug_size_unit = "${size_unit}" + - source_and_mib: + target_size = "1024" + size_unit = 'MiB' + node = 1 + plug_node = 1 + plug_target_size = "512" + plug_size_unit = "MiB" + source_dict = {'nodemask': '0','pagesize': %d, 'pagesize_unit':'KiB'} + - plug_exceeded_max_mem: + plug_size_unit = "G" + plug_target_size = "2" + plug_error = "exceed domain's maxMemory config size '${max_mem}'" + - duplicate_addr: + plug_size_unit = "${size_unit}" + plug_target_size = "${target_size}" + base = "0x100000000" + addr_dict = {'attrs': {'type':'dimm','base': '${base}', 'slot':'${slot}'}} + plug_addr = {'attrs': {'type':'dimm','base': '${base}', 'slot':'${plug_slot}'}} + plug_error = "same address|overlaps" + - duplicate_slot: + base = "0x100000000" + plug_size_unit = "${size_unit}" + plug_slot = 0 + plug_target_size = "${target_size}" + addr_dict = {'attrs': {'type':'dimm','slot':'${slot}'}} + plug_addr = {'attrs': {'type':'dimm','base': '${base}', 'slot':'${plug_slot}'}} + plug_error = "memory device slot '0' is already being used by another memory device" + - zero_memory_unit_gb: + plug_size_unit = "G" + plug_target_size = "0" + plug_error = "property 'size' of memory-backend-ram doesn't take value '0'" + dimm_dict = {'mem_model':'${mem_model}','target': {'size':${target_size}, 'size_unit':'${size_unit}', 'node':${node}}} + plug_dimm_dict = {'mem_model':'${mem_model}','target': {'size':${plug_target_size}, 'size_unit':'${plug_size_unit}', 'node':${plug_node}}} + base_xpath = [{'element_attrs':[".//memory[@unit='KiB']"],'text':'%d'},{'element_attrs':[".//currentMemory[@unit='KiB']"],'text':'%d'}] + dimm_xpath = [{'element_attrs':[".//target/size[@unit='KiB']"],'text':'%s'},{'element_attrs':[".//address[@slot='%s']"]}] diff --git a/libvirt/tests/src/memory/memory_devices/dimm_memory_hotplug.py b/libvirt/tests/src/memory/memory_devices/dimm_memory_hotplug.py new file mode 100644 index 0000000000..e00b410306 --- /dev/null +++ b/libvirt/tests/src/memory/memory_devices/dimm_memory_hotplug.py @@ -0,0 +1,245 @@ +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +# +# Copyright Redhat +# +# SPDX-License-Identifier: GPL-2.0 + +# Author: Nannan Li +# +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +import os + +from avocado.utils import process +from avocado.utils import memory + +from virttest import utils_misc + +from virttest.utils_test import libvirt +from virttest.libvirt_xml import vm_xml +from virttest.utils_libvirt import libvirt_vmxml + +from provider.memory import memory_base +from virttest.staging import utils_memory + +virsh_dargs = {"ignore_status": False, "debug": True} + + +def adjust_dimm_dict(params): + """ + Adjust dimm memory dict and plugging dimm dict. + + :param params: dictionary with the test parameters + """ + source_dict = params.get("source_dict", "") + addr_dict = params.get("addr_dict", "") + plug_addr = params.get("plug_addr", "") + dimm_dict = eval(params.get("dimm_dict", "{}")) + plug_dimm_dict = eval(params.get("plug_dimm_dict", "{}")) + default_hugepage_size = memory.get_huge_page_size() + + if source_dict: + dimm_dict['source'] = eval(source_dict % default_hugepage_size) + plug_dimm_dict['source'] = eval(source_dict % default_hugepage_size) + if addr_dict: + dimm_dict['address'] = eval(addr_dict) + plug_dimm_dict['address'] = eval(plug_addr) + params.update({'dimm_dict': dimm_dict}) + params.update({'plug_dimm_dict': plug_dimm_dict}) + + +def adjust_size_unit(params, plugged=False): + """ + Adjust the unit of dimm target size and plugged dimm target size. + + :param params: cartesian config parameters. + :param plugged: the flag of after plugging. + :return target_size, plugged_size, init define dimm target size and + plugged dimm target size + """ + plug_dimm_type = params.get("plug_dimm_type") + if plug_dimm_type in ["source_and_mib", "zero_memory_unit_gb"]: + target_size = memory_base.convert_data_size( + params.get("target_size") + params.get('size_unit'), 'KiB') + else: + target_size = params.get("target_size") + + if plugged: + if plug_dimm_type in ["source_and_mib", "zero_memory_unit_gb"]: + plugged_size = memory_base.convert_data_size( + params.get("plug_target_size") + params.get('plug_size_unit'), 'KiB') + else: + plugged_size = params.get('plug_target_size') + else: + plugged_size = 0 + return int(target_size), int(plugged_size) + + +def attach_dimm_and_check_result(test, params): + """ + Attach dimm memory and check result. + + :param test: test object. + :param params: dictionary with the test parameters. + """ + plug_error = params.get('plug_error') + plug_event = params.get('plug_event') + plug_dict = params.get("plug_dimm_dict") + memory_base.plug_memory_and_check_result( + test, params, mem_dict=plug_dict, operation='attach', + expected_error=plug_error, expected_event=plug_event) + + +def check_guest_virsh_dominfo(vm, test, params, plugged=False): + """ + Check current memory value and memory value in virsh dominfo result. + + :param vm: vm object. + :param test: test object. + :param params: dictionary with the test parameters. + :param plugged: the flag of checking after plugging dimm. + """ + mem_value = int(params.get("mem_value")) + current_mem = int(params.get("current_mem")) + target_size, plugged_size = adjust_size_unit(params, plugged=plugged) + expected_mem = str(mem_value + target_size + plugged_size) + expected_curr = str(current_mem + plugged_size) + + memory_base.check_dominfo(vm, test, expected_mem, expected_curr) + + +def check_after_attach(vm, test, params): + """ + Check the below points after plugging or unplugging + 1. Check the audit log by ausearch. + 2. Check the libvirtd log. + 3. Check the memory allocation and memory device config. + 4. Check the memory info by virsh dominfo. + 5. Check the guest memory. + + :param vm: vm object. + :param test: test object. + :param params: dictionary with the test parameters. + :param operation: string, the flag for attaching or detaching. + """ + mem_value = int(params.get("mem_value")) + current_mem = int(params.get("current_mem")) + expected_log = params.get("expected_log") + audit_cmd = params.get("audit_cmd") + plug_slot = params.get("plug_slot") + libvirtd_log_file = os.path.join(test.debugdir, "libvirtd.log") + target_size, plugged_size = adjust_size_unit(params, plugged=True) + base_xpath, dimm_xpath = params.get("base_xpath"), params.get("dimm_xpath") + ausearch_check = params.get("ausearch_check") % ( + mem_value+target_size, mem_value+target_size+plugged_size) + + # Check the audit log by ausearch. + ausearch_result = process.run(audit_cmd, shell=True) + libvirt.check_result(ausearch_result, expected_match=ausearch_check) + test.log.debug("Check audit log %s successfully." % ausearch_check) + + # Check the libvirtd log. + result = utils_misc.wait_for( + lambda: libvirt.check_logfile(expected_log, libvirtd_log_file), timeout=20) + if not result: + test.fail("Can't get expected log %s in %s" % (expected_log, libvirtd_log_file)) + + # Check the memory allocation and memory device config. + vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name) + libvirt_vmxml.check_guest_xml_by_xpaths( + vmxml, eval(base_xpath % (mem_value + target_size + plugged_size, + current_mem + plugged_size))) + libvirt_vmxml.check_guest_xml_by_xpaths( + vmxml.devices.by_device_tag("memory")[1], eval(dimm_xpath % (plugged_size, plug_slot))) + + # Check the memory info by virsh dominfo. + check_guest_virsh_dominfo(vm, test, params, plugged=True) + + # Check the guest memory. + session = vm.wait_for_login() + new_memtotal = utils_memory.memtotal(session) + session.close() + expected_memtotal = params.get('old_memtotal') + plugged_size + if new_memtotal != expected_memtotal: + test.fail("Memtotal is %s, should be %s " % (new_memtotal, expected_memtotal)) + test.log.debug("Check Memtotal successfully.") + + +def run(test, params, env): + """ + Verify dimm memory device hot-plug with different configs. + """ + def setup_test(): + """ + Allocate memory on the host. + """ + process.run("echo %d > %s" % ( + allocate_size / default_hugepage_size, + kernel_hp_file % default_hugepage_size), shell=True) + + def run_test(): + """ + 1. Define vm with dimm memory device. + 2. Hotplug dimm memory. + 3. Check audit log, libvirtd log, memory allocation and memory device + config. + """ + test.log.info("TEST_STEP1: Define vm with dimm memory") + memory_base.define_guest_with_memory_device(params, params.get("dimm_dict"), vm_attrs) + + test.log.info("TEST_STEP2: Start guest") + vm.start() + session = vm.wait_for_login() + + test.log.info("TEST_STEP3: Get the guest memory") + params.update({'old_memtotal': utils_memory.memtotal(session)}) + session.close() + + test.log.info("TEST_STEP4: Check the memory allocation and dimm config") + target_size, _ = adjust_size_unit(params) + vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) + libvirt_vmxml.check_guest_xml_by_xpaths( + vmxml, eval(base_xpath % (mem_value + target_size, current_mem))) + libvirt_vmxml.check_guest_xml_by_xpaths( + vmxml.devices.by_device_tag("memory")[0], eval(dimm_xpath % (target_size, slot))) + + test.log.info("TEST_STEP5: Check the memory info by virsh dominfo") + check_guest_virsh_dominfo(vm, test, params) + + test.log.info("TEST_STEP6: Hot plug one dimm memory device") + attach_dimm_and_check_result(test, params) + + if plug_dimm_type in ["target_and_address", "source_and_mib"]: + test.log.info("TEST_STEP7:Check audit and libvirt log, memory " + "allocation and memory device config ") + check_after_attach(vm, test, params) + + def teardown_test(): + """ + Clean data. + """ + test.log.info("TEST_TEARDOWN: Clean up env.") + bkxml.sync() + process.run("echo 0 > %s" % (kernel_hp_file % default_hugepage_size), shell=True) + + vm_name = params.get("main_vm") + original_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) + bkxml = original_xml.copy() + vm = env.get_vm(vm_name) + adjust_dimm_dict(params) + + slot = params.get("slot") + kernel_hp_file = params.get("kernel_hp_file") + vm_attrs = eval(params.get("vm_attrs", "{}")) + allocate_size = int(params.get("allocate_size")) + mem_value = int(params.get("mem_value")) + current_mem = int(params.get("current_mem")) + base_xpath, dimm_xpath = params.get("base_xpath"), params.get("dimm_xpath") + plug_dimm_type = params.get("plug_dimm_type") + default_hugepage_size = memory.get_huge_page_size() + + try: + setup_test() + run_test() + + finally: + teardown_test() diff --git a/provider/memory/memory_base.py b/provider/memory/memory_base.py index f878fc9f6b..18171d4259 100644 --- a/provider/memory/memory_base.py +++ b/provider/memory/memory_base.py @@ -5,11 +5,17 @@ from avocado.utils import cpu from avocado.utils import memory as avocado_mem +from virttest import virsh from virttest import libvirt_version from virttest import utils_misc - from virttest.libvirt_xml.devices import memory +from virttest.libvirt_xml import vm_xml +from virttest.utils_test import libvirt from virttest.utils_version import VersionInterval +from virttest.utils_libvirt import libvirt_vmxml +from virttest.utils_libvirt import libvirt_misc + +virsh_dargs = {"ignore_status": False, "debug": True} def convert_data_size(current_size, dest_unit="KiB"): @@ -149,3 +155,73 @@ def adjust_memory_size(params): params.update({'block_size': default_pagesize_KiB}) params.update({'request_size': default_pagesize_KiB}) params.update({'target_size': default_pagesize_KiB*2}) + + +def define_guest_with_memory_device(params, mem_attr_list, vm_attrs=None): + """ + Define guest with specified memory device. + + :param params: a dict for parameters. + :param mem_attr_list: memory device attributes list. + :param vm_attrs: basic vm attributes to define. + """ + vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(params.get("main_vm")) + if vm_attrs: + vmxml.setup_attrs(**vm_attrs) + + if not isinstance(mem_attr_list, list): + mem_attr_list = [mem_attr_list] + for mem in mem_attr_list: + memory_object = libvirt_vmxml.create_vm_device_by_type('memory', mem) + vmxml.devices = vmxml.devices.append(memory_object) + vmxml.sync() + + +def plug_memory_and_check_result(test, params, mem_dict, operation='attach', + expected_error='', expected_event='', **kwargs): + """ + Hot plug or hot unplug memory and check event. + + :param test: test object. + :param params: dictionary with the test parameters. + :param mem_dict: the memory dict to plug. + :param operation: the operation of plug or unplug. + :param expected_error: expected error after plug or unplug. + :param expected_event: expected event for plug or unplug. + """ + vm_name = params.get('main_vm') + plug_dimm = libvirt_vmxml.create_vm_device_by_type('memory', mem_dict) + + wait_event = True if expected_event else False + if operation == "attach": + res = virsh.attach_device(vm_name, plug_dimm.xml, wait_for_event=wait_event, + event_type=expected_event, debug=True, **kwargs) + elif operation == "detach": + res = virsh.detach_device(vm_name, plug_dimm.xml, wait_for_event=wait_event, + event_type=expected_event, debug=True, **kwargs) + + if expected_error: + libvirt.check_result(res, expected_fails=expected_error) + else: + libvirt.check_exit_status(res) + + +def check_dominfo(vm, test, expected_max, expected_used): + """ + Check Max memory value and Used memory in virsh dominfo result. + + :param vm: vm object. + :param test: test object. + :param params: dictionary with the test parameters. + :param expected_max: expected Max memory in virsh dominfo. + :param expected_used: expected Used memory in virsh dominfo. + """ + result = virsh.dominfo(vm.name, **virsh_dargs).stdout_text.strip() + + dominfo_dict = libvirt_misc.convert_to_dict( + result, pattern=r'(\S+ \S+):\s+(\S+)') + if dominfo_dict["Max memory"] != expected_max: + test.fail("Memory value should be %s " % expected_max) + if dominfo_dict["Used memory"] != expected_used: + test.fail("Current memory should be %s " % expected_used) + test.log.debug("Check virsh dominfo successfully.")