diff --git a/libvirt/tests/cfg/viommu_netperf.cfg b/libvirt/tests/cfg/viommu_netperf.cfg new file mode 100644 index 0000000000..950667f8ea --- /dev/null +++ b/libvirt/tests/cfg/viommu_netperf.cfg @@ -0,0 +1,49 @@ +- vIOMMU.netperf: + type = viommu_netperf + vms = avocado-vt-vm1 + net_name = network_conn + ip_attrs = {"netmask": "255.255.255.0", "address": "192.168.144.1", "dhcp_ranges": {"attrs": {"end": "192.168.144.254", "start": "192.168.144.2"}}} + iface_attrs = {"source": {"network": "${net_name}"}, "type_name": "network", "model": "virtio"} + network_attrs = {"name": "${net_name}", "forward": {"mode": "nat"}, "ips": [${ip_attrs}]} + start_vm = "no" + variants: + - virtio: + only q35, aarch64 + func_supported_since_libvirt_ver = (8, 3, 0) + iommu_dict = {'model': 'virtio'} + - intel: + only q35 + start_vm = "yes" + enable_guest_iommu = "yes" + iommu_dict = {'model': 'intel', 'driver': {'intremap': 'on', 'caching_mode': 'on', 'eim': 'on', 'iotlb': 'on', 'aw_bits': '48'}} + - smmuv3: + only aarch64 + func_supported_since_libvirt_ver = (5, 5, 0) + iommu_dict = {'model': 'smmuv3'} + variants: + - e1000e: + only q35 + iface_model = 'e1000e' + iface_dict = {'type_name': 'network', 'model': '${iface_model}', 'source': {'network': 'default'}} + - virtio_interface: + interface_driver_name = "vhost" + interface_driver = {'driver_attr': {'name': '${interface_driver_name}', 'iommu': 'on'}} + iface_dict = {'type_name': 'network', 'model': 'virtio', 'driver': ${interface_driver}, 'source': {'network': 'default'}} + variants: + - guest2guest: + vms = avocado-vt-vm1 vm2 + netperf_client = avocado-vt-vm1 + netperf_server = vm2 + - host2guest: + netperf_client = ${local_ip} + netperf_server = ${main_vm} + - guest2host: + netperf_client = ${main_vm} + netperf_server = ${local_ip} + UDP_STREAM: + extra_cmd_opts = "-- -R 1" + variants: + - TCP_STREAM: + test_protocol = TCP_STREAM + - UDP_STREAM: + test_protocol = UDP_STREAM diff --git a/libvirt/tests/cfg/virtual_network/connectivity/netperf_nat_interface.cfg b/libvirt/tests/cfg/virtual_network/connectivity/netperf_nat_interface.cfg new file mode 100644 index 0000000000..8b5890da0d --- /dev/null +++ b/libvirt/tests/cfg/virtual_network/connectivity/netperf_nat_interface.cfg @@ -0,0 +1,27 @@ +- virtual_network.netperf.nat_interface: + type = netperf_nat_interface + vms = avocado-vt-vm1 + start_vm = no + net_name = network_conn + ip_attrs = {"netmask": "255.255.255.0", "address": "192.168.144.1", "dhcp_ranges": {"attrs": {"end": "192.168.144.254", "start": "192.168.144.2"}}} + iface_attrs = {"source": {"network": "${net_name}"}, "type_name": "network", "model": "virtio"} + network_attrs = {"name": "${net_name}", "forward": {"mode": "nat"}, "ips": [${ip_attrs}]} + + variants: + - guest2guest: + vms = avocado-vt-vm1 vm2 + netperf_client = avocado-vt-vm1 + netperf_server = vm2 + - host2guest: + netperf_client = ${local_ip} + netperf_server = ${main_vm} + - guest2host: + netperf_client = ${main_vm} + netperf_server = ${local_ip} + UDP_STREAM: + extra_cmd_opts = "-- -R 1" + variants: + - TCP_STREAM: + test_protocol = TCP_STREAM + - UDP_STREAM: + test_protocol = UDP_STREAM diff --git a/libvirt/tests/src/sriov/vIOMMU/viommu_netperf.py b/libvirt/tests/src/sriov/vIOMMU/viommu_netperf.py new file mode 100644 index 0000000000..b8c20a2893 --- /dev/null +++ b/libvirt/tests/src/sriov/vIOMMU/viommu_netperf.py @@ -0,0 +1,39 @@ +from virttest.libvirt_xml import vm_xml +from virttest.utils_libvirt import libvirt_vmxml + +from provider.viommu import viommu_base +from provider.virtual_network import network_base + + +def run(test, params, env): + """ + Run netperf testing between host and vm with iommu device + """ + cleanup_ifaces = "yes" == params.get("cleanup_ifaces", "yes") + iommu_dict = eval(params.get('iommu_dict', '{}')) + + vms = params.get('vms').split() + vm_objs = [env.get_vm(vm_i) for vm_i in vms] + + test_objs = [viommu_base.VIOMMUTest(vm, test, params) for vm in vm_objs] + + try: + test.log.info("TEST_SETUP: Update VM XML.") + for test_obj in test_objs: + test_obj.setup_iommu_test(iommu_dict=iommu_dict, + cleanup_ifaces=cleanup_ifaces) + + iface_dict = test_objs[0].parse_iface_dict() + if cleanup_ifaces: + vmxml_lists = list(map(vm_xml.VMXML.new_from_inactive_dumpxml, vms)) + [libvirt_vmxml.modify_vm_device(vmxml_i, 'interface', iface_dict) for vmxml_i in vmxml_lists] + + test.log.info('TEST_STEP: Start the VM(s)') + [vm_inst.start() for vm_inst in vm_objs] + [vm_inst.wait_for_login() for vm_inst in vm_objs] + + test.log.info("TEST_STEP: Run netperf testing between host(vm) and vm.") + network_base.exec_netperf_test(params, env) + finally: + for test_obj in test_objs: + test_obj.teardown_iommu_test() diff --git a/libvirt/tests/src/virtual_network/connectivity/netperf_nat_interface.py b/libvirt/tests/src/virtual_network/connectivity/netperf_nat_interface.py new file mode 100644 index 0000000000..1e350dc901 --- /dev/null +++ b/libvirt/tests/src/virtual_network/connectivity/netperf_nat_interface.py @@ -0,0 +1,36 @@ +from provider.virtual_network import network_base + +from virttest import virsh + +from virttest.libvirt_xml import vm_xml +from virttest.utils_libvirt import libvirt_network +from virttest.utils_libvirt import libvirt_vmxml + + +def run(test, params, env): + """ + Verify the guest can work well under the netperf stress test + """ + vms = params.get('vms').split() + vm_objs = [env.get_vm(vm_i) for vm_i in vms] + network_attrs = eval(params.get('network_attrs')) + iface_attrs = eval(params.get('iface_attrs')) + + bkxmls = list(map(vm_xml.VMXML.new_from_inactive_dumpxml, vms)) + + try: + libvirt_network.create_or_del_network(network_attrs) + test.log.debug(f'Network xml:\n' + f'{virsh.net_dumpxml(network_attrs["name"]).stdout_text}') + vmxml_lists = list(map(vm_xml.VMXML.new_from_inactive_dumpxml, vms)) + [libvirt_vmxml.modify_vm_device(vmxml_i, 'interface', iface_attrs) + for vmxml_i in vmxml_lists] + + test.log.info('TEST_STEP: Start the VM(s)') + [vm_inst.start() for vm_inst in vm_objs] + [vm_inst.wait_for_login() for vm_inst in vm_objs] + network_base.exec_netperf_test(params, env) + + finally: + [backup_xml.sync() for backup_xml in bkxmls] + libvirt_network.create_or_del_network(network_attrs, is_del=True) diff --git a/provider/virtual_network/network_base.py b/provider/virtual_network/network_base.py index d2e380a946..f3ff741096 100644 --- a/provider/virtual_network/network_base.py +++ b/provider/virtual_network/network_base.py @@ -9,6 +9,7 @@ from virttest import remote from virttest import utils_misc from virttest import utils_net +from virttest import utils_package from virttest import virsh from virttest.libvirt_xml import network_xml from virttest.libvirt_xml import vm_xml @@ -376,3 +377,77 @@ def check_throughput(serv_runner, cli_runner, ip_addr, bw, th_type): raise exceptions.TestFail(f'Actual {th_type} is not close to expected ' f'{th_type}:\n{msg}') LOG.debug(msg) + + +def exec_netperf_test(params, env): + """ + Verify the guest can work well under the netperf stress test + + :param params: Dictionary with the test parameters. + :param env: Dictionary with test environment. + """ + netperf_client = params.get("netperf_client") + netperf_server = params.get("netperf_server") + extra_cmd_opts = params.get("extra_cmd_opts", "") + netperf_timeout = params.get("netperf_timeout", "60") + test_protocol = params.get("test_protocol") + vms = params.get('vms').split() + vm_objs = {vm_i: env.get_vm(vm_i) for vm_i in vms} + before_test_cores = process.run("coredumpctl list", ignore_status=True, verbose=True).stdout_text + + def _get_access_info(netperf_address): + LOG.debug(f"check {netperf_address}...") + session = None + if re.match(r"((\d){1,3}\.){3}(\d){1,3}", netperf_address): + func = process.run + test_ip = netperf_address + else: + if netperf_address not in vms: + raise exceptions.TestError(f"Unable to get {netperf_address} from {vms}!") + vm = vm_objs.get(netperf_address) + if not vm.is_alive(): + vm.start() + session = vm.wait_for_login() + test_ip = vm.get_address() + func = session.cmd + return func, test_ip, session + + c_func, c_ip, c_session = _get_access_info(netperf_client) + s_func, s_ip, s_session = _get_access_info(netperf_server) + + try: + if not utils_package.package_install("netperf", c_session): + raise exceptions.TestError("Unable to install netperf in the client host!") + if not utils_package.package_install("netperf", s_session): + raise exceptions.TestError("Unable to install netperf in the server host!") + c_func("systemctl stop firewalld") + s_func("systemctl stop firewalld") + + LOG.debug("Start netserver...") + if s_ip == netperf_server: + s_func("killall netserver", ignore_status=True) + s_func("netserver") + + LOG.debug("Run netperf command...") + test_cmd = f"netperf -H {s_ip} -l {netperf_timeout} -C -c -t {test_protocol} {extra_cmd_opts}" + c_func(test_cmd, timeout=120) + + for vm in vm_objs.values(): + try: + vm.wait_for_login().close() + except (remote.LoginError, aexpect.ShellError) as e: + LOG.error(f"Unable to access to {vm.name}, guest os may have crashed - {e}") + vm.destroy() + vm.start() + vm.wait_for_login().close() + after_test_cores = process.run("coredumpctl list", + ignore_status=True, verbose=True).stdout_text + if after_test_cores != before_test_cores: + raise exceptions.TestFail("There are coredump files during the test!") + + finally: + LOG.info("Test teardown: Cleanup env.") + s_func("killall netserver") + s_func("systemctl start firewalld") + # TODO: Start firewalld on guest + process.run("systemctl start firewalld", ignore_status=True)