From c85207f8e680081275f8ddf8b3ca9e854e2ecaa9 Mon Sep 17 00:00:00 2001 From: Mateusz Junkier Date: Wed, 15 May 2024 14:42:56 +0200 Subject: [PATCH 1/2] twister: call pre/post scripts from yaml file Add the execution of external scripts at precise moments. These scripts can be strategically deployed in three distinct phases: pre-script, post-flash-script and post-script. This functionality could help configuring the environment optimally before testing. Signed-off-by: Mateusz Junkier --- doc/develop/test/twister.rst | 50 +++++++ .../pylib/twister/twisterlib/environment.py | 14 ++ scripts/pylib/twister/twisterlib/scripting.py | 141 ++++++++++++++++++ scripts/pylib/twister/twisterlib/testplan.py | 87 +++++++++++ scripts/schemas/twister/scripting-schema.yaml | 67 +++++++++ scripts/tests/twister/test_testplan.py | 1 + 6 files changed, 360 insertions(+) create mode 100644 scripts/pylib/twister/twisterlib/scripting.py create mode 100644 scripts/schemas/twister/scripting-schema.yaml diff --git a/doc/develop/test/twister.rst b/doc/develop/test/twister.rst index e45d87188448..b5ec44c496e3 100644 --- a/doc/develop/test/twister.rst +++ b/doc/develop/test/twister.rst @@ -1360,6 +1360,56 @@ using an external J-Link probe. The ``probe_id`` keyword overrides the runner: jlink serial: null +Additional Scripts +++++++++++++++++++ + +Twister offers users the flexibility to automate the execution of external +scripts at precise moments. These scripts can be strategically deployed in +three distinct phases: pre-script, post-flash-script and post-script. +This functionality could help configuring the environment optimally +before testing. + +To leverage the scripting capability, users must append the argument +``--scripting-list `` to a twister call. +Parameter ``override_script`` added to explicitly confirm the intent +to execute the specified script. When set to true, this flag allow to +override ``--pre_script``, ``--post_flash_script``, ``--post_script`` +commands specified via other sources. + +The scripting YAML should consist of a series of dictionaries, +each containing the keys scenarios, ``scenarios``, ``platforms``, +``pre_script``, ``post_flash_script``, ``post_script``. Each script +is defined by ``path`` representing path to script, ``timeout`` optional +integer specifying the maximum duration allowed for the script execution, +and ``override_script``. These keys define the specific combinations +of scenarios and platforms, as well as the corresponding scripts +to be executed at each stage. Additionally, it is mandatory +to include a comment entry ``comment`` which is used to give more +details about scripts and purpose of use. + +An example of entries in a scripting list yaml: + +.. code-block:: yaml + + - scenarios: + - sample.basic.helloworld + platforms: + - frdm_k64f + pre_script: + path: + timeout: + override_script: + post_flash_script: + path: + timeout: + override_script: + post_script: + path: + timeout: + override_script: + comment: + Testing extra scripts + Using Single Board For Multiple Variants ++++++++++++++++++++++++++++++++++++++++ diff --git a/scripts/pylib/twister/twisterlib/environment.py b/scripts/pylib/twister/twisterlib/environment.py index e64fb6934460..738e35a3b3dc 100644 --- a/scripts/pylib/twister/twisterlib/environment.py +++ b/scripts/pylib/twister/twisterlib/environment.py @@ -686,6 +686,16 @@ def add_parse_arguments(parser = None) -> argparse.ArgumentParser: help="""quit twister once there is build / run failure """) + parser.add_argument( + "--scripting-list", + action="append", + metavar="YAML_FILE", + help="YAML configuration file with device handler hooks to run additional " + "pre-/post- flash phase scripts for selected platform and test scenario combinations. " + "The file must comply with `scripting-schema.yaml`. " + "Overrides `--pre-script` and `--hardware-map` settings. " + "Requires `--device-testing`") + parser.add_argument( "--report-name", help="""Create a report with a custom name. @@ -922,6 +932,10 @@ def parse_arguments( ) sys.exit(1) + if options.scripting_list and not options.device_testing: + logger.error("When --scripting_list is used --device-testing is required") + sys.exit(1) + if ( options.device_testing and (options.device_serial or options.device_serial_pty) and len(options.platform) != 1 diff --git a/scripts/pylib/twister/twisterlib/scripting.py b/scripts/pylib/twister/twisterlib/scripting.py new file mode 100644 index 000000000000..904d84c03a7c --- /dev/null +++ b/scripts/pylib/twister/twisterlib/scripting.py @@ -0,0 +1,141 @@ +# Copyright (c) 2024 Intel Corporation. +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import annotations + +import logging +import sys +from dataclasses import dataclass, field +from pathlib import Path + +import scl + +logger = logging.getLogger('twister') + + +# Handles test scripting configurations. +class Scripting: + def __init__(self, scripting_files: list[Path | str], scripting_schema: dict) -> None: + self.scripting = ScriptingData() + self.scripting_files = scripting_files or [] + self.scripting_schema = scripting_schema + self.load_and_validate_files() + + # Finds and returns the scripting element that matches the given test name and platform. + def get_matched_scripting(self, testname: str, platform: str) -> ScriptingElement | None: + matched_scripting = self.scripting.find_matching_scripting(testname, platform) + if matched_scripting: + logger.debug( + f"'{testname}' on '{platform}' device handler scripts '{str(matched_scripting)}'" + ) + return matched_scripting + return None + + def load_and_validate_files(self): + for scripting_file in self.scripting_files: + self.scripting.extend( + ScriptingData.load_from_yaml(scripting_file, self.scripting_schema) + ) + + +@dataclass +class Script: + path: str | None = None + timeout: int | None = None + override_script: bool = False + + +@dataclass +# Represents a single scripting element with associated scripts and metadata. +class ScriptingElement: + scenarios: list[str] = field(default_factory=list) + platforms: list[str] = field(default_factory=list) + pre_script: Script | None = None + post_flash_script: Script | None = None + post_script: Script | None = None + comment: str = 'NA' + + # Ensures all required scripts are present and validates the element. + def __post_init__(self): + if not any([self.pre_script, self.post_flash_script, self.post_script]): + logger.error("At least one of the scripts must be specified") + sys.exit(1) + self.pre_script = self._convert_to_script(self.pre_script) + self.post_flash_script = self._convert_to_script(self.post_flash_script) + self.post_script = self._convert_to_script(self.post_script) + + # Converts a dictionary to a Script instance if necessary. + def _convert_to_script(self, script: dict | Script | None) -> Script | None: + if isinstance(script, dict): + return Script(**script) + return script + + +@dataclass +# Holds a collection of scripting elements. +class ScriptingData: + elements: list[ScriptingElement] = field(default_factory=list) + + # Ensures all elements are ScriptingElement instances. + def __post_init__(self): + self.elements = [ + elem if isinstance(elem, ScriptingElement) else ScriptingElement(**elem) + for elem in self.elements + ] + + @classmethod + # Loads scripting data from a YAML file. + def load_from_yaml(cls, filename: Path | str, schema: dict) -> ScriptingData: + try: + raw_data = scl.yaml_load_verify(filename, schema) or [] + return cls(raw_data) + except scl.EmptyYamlFileException: + logger.error(f'Scripting file {filename} is empty') + sys.exit(1) + except FileNotFoundError: + logger.error(f'Scripting file {filename} not found') + sys.exit(1) + except Exception as e: + logger.error(f'Error loading {filename}: {e}') + sys.exit(1) + + # Extends the current scripting data with another set of scripting data. + def extend(self, other: ScriptingData) -> None: + self.elements.extend(other.elements) + + # Finds a scripting element that matches the given scenario and platform. + def find_matching_scripting(self, scenario: str, platform: str) -> ScriptingElement | None: + matched_elements = [] + + for element in self.elements: + if element.scenarios and not _matches_element(scenario, element.scenarios): + continue + if element.platforms and not _matches_element(platform, element.platforms): + continue + matched_elements.append(element) + + # Check for override_script + override_scripts = [ + elem + for elem in matched_elements + if ( + (elem.pre_script and elem.pre_script.override_script) + or (elem.post_flash_script and elem.post_flash_script.override_script) + or (elem.post_script and elem.post_script.override_script) + ) + ] + + if len(override_scripts) > 1: + logger.error("Multiple override definition for scripts found") + sys.exit(1) + elif len(override_scripts) == 1: + return override_scripts[0] + elif matched_elements: + return matched_elements[0] + + return None + + +# Checks if the given element matches any of the provided patterns. +def _matches_element(element: str, patterns: list[str]) -> bool: + return any(pattern in element for pattern in patterns) diff --git a/scripts/pylib/twister/twisterlib/testplan.py b/scripts/pylib/twister/twisterlib/testplan.py index 5d17f5cc5466..dcbbcdd930b9 100755 --- a/scripts/pylib/twister/twisterlib/testplan.py +++ b/scripts/pylib/twister/twisterlib/testplan.py @@ -34,6 +34,7 @@ from twisterlib.error import TwisterRuntimeError from twisterlib.platform import Platform from twisterlib.quarantine import Quarantine +from twisterlib.scripting import Scripting from twisterlib.statuses import TwisterStatus from twisterlib.testinstance import TestInstance from twisterlib.testsuite import TestSuite, scan_testsuite_path @@ -94,6 +95,10 @@ class TestPlan: os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "quarantine-schema.yaml")) + scripting_schema = scl.yaml_load( + os.path.join(ZEPHYR_BASE, + "scripts", "schemas", "twister", "scripting-schema.yaml")) + tc_schema_path = os.path.join( ZEPHYR_BASE, "scripts", @@ -113,6 +118,7 @@ def __init__(self, env: Namespace): # Keep track of which test cases we've filtered out and why self.testsuites = {} self.quarantine = None + self.scripting = None self.platforms = [] self.platform_names = [] self.selected_platforms = [] @@ -225,6 +231,11 @@ def discover(self): logger.debug(f'Quarantine file {quarantine_file} is empty') self.quarantine = Quarantine(ql) + # handle extra scripts + sl = self.options.scripting_list + if sl: + self.scripting = Scripting(sl, self.scripting_schema) + def load(self): if self.options.report_suffix: @@ -265,6 +276,21 @@ def load(self): else: self.apply_filters() + if self.scripting: + # Check if at least one provided script met the conditions. + # Summarize logs for all calls. + was_script_matched = False + for instance in self.instances.values(): + was_script_matched = ( + was_script_matched + or self.handle_additional_scripts(instance.platform.name, instance) + ) + + if not was_script_matched: + logger.info( + "Scripting list was provided, none of the specified conditions were met" + ) + if self.options.subset: s = self.options.subset try: @@ -1307,6 +1333,67 @@ def _create_build_dir_link(self, links_dir_path, instance): self.link_dir_counter += 1 + def handle_additional_scripts( + self, platform_name: str, testsuite: TestInstance + ) -> bool: + logger.debug(testsuite.testsuite.id) + matched_scripting = self.scripting.get_matched_scripting( + testsuite.testsuite.id, platform_name + ) + if matched_scripting: + # Define a function to validate + # if the platform is supported by the matched scripting + def validate_boards(platform_scope, platform_from_yaml): + return any(board in platform_scope for board in platform_from_yaml) + + # Define the types of scripts we are interested in as a set + script_types = { + "pre_script": "pre_script_timeout", + "post_flash_script": "post_flash_timeout", + "post_script": "post_script_timeout", + } + + # Iterate over all DUTs to set the appropriate scripts + # if they match the platform and are supported + for dut in self.env.hwm.duts: + # Check if the platform matches and if the platform + # is supported by the matched scripting + if dut.platform in platform_name and validate_boards( + platform_name, matched_scripting.platforms + ): + for script_type, script_timeout in script_types.items(): + # Get the script object from matched_scripting + script_obj = getattr(matched_scripting, script_type, None) + # If a script object is provided, check if the script path is a valid file + if script_obj and script_obj.path: + # Check if there's an existing script and if override is not allowed + if not script_obj.override_script: + logger.info( + f"{script_type} will not be overridden on {platform_name}." + ) + continue + # Check if the script path is a valid file and set it on the DUT + if Path(script_obj.path).is_file(): + setattr(dut, script_type, script_obj.path) + # Check if the script timeout is provided and set it on the DUT + if script_obj.timeout is not None: + setattr(dut, script_timeout, script_obj.timeout) + logger.info( + f"{script_type} {script_obj.path} will be executed on " + f"{platform_name} with timeout {script_obj.timeout}" + ) + else: + logger.info( + f"{script_type} {script_obj.path} will be executed on " + f"{platform_name} with no timeout specified" + ) + else: + raise TwisterRuntimeError( + f"{script_type} script not found under path: {script_obj.path}" + ) + return True + return False + def change_skip_to_error_if_integration(options, instance): ''' All skips on integration_platforms are treated as errors.''' diff --git a/scripts/schemas/twister/scripting-schema.yaml b/scripts/schemas/twister/scripting-schema.yaml new file mode 100644 index 000000000000..576ac1d93dcf --- /dev/null +++ b/scripts/schemas/twister/scripting-schema.yaml @@ -0,0 +1,67 @@ +type: seq +matching: all +sequence: + - type: map + required: true + matching: all + mapping: + "scenarios": + type: seq + required: true + sequence: + - type: str + - unique: true + "platforms": + required: true + type: seq + sequence: + - type: str + - unique: true + "pre_script": + type: map + required: false + mapping: + "path": + type: str + required: true + "timeout": + type: int + default: 30 + required: false + "override_script": + type: bool + default: false + required: false + "post_flash_script": + type: map + required: false + mapping: + "path": + type: str + required: true + "timeout": + type: int + default: 30 + required: false + "override_script": + type: bool + default: false + required: false + "post_script": + type: map + required: false + mapping: + "path": + type: str + required: true + "timeout": + type: int + default: 30 + required: false + "override_script": + type: bool + default: false + required: false + "comment": + type: str + required: true diff --git a/scripts/tests/twister/test_testplan.py b/scripts/tests/twister/test_testplan.py index 2a006043870e..91db89f1190c 100644 --- a/scripts/tests/twister/test_testplan.py +++ b/scripts/tests/twister/test_testplan.py @@ -566,6 +566,7 @@ def test_testplan_discover( test='ts1', quarantine_list=[tmp_path / qf for qf in ql], quarantine_verify=qv, + scripting_list=[], ) testplan.testsuites = { 'ts1': mock.Mock(id=1), From 826aa698b3c471a09a318f2ab6c0b78e68c41f94 Mon Sep 17 00:00:00 2001 From: Mateusz Junkier Date: Wed, 15 May 2024 14:45:44 +0200 Subject: [PATCH 2/2] twister: add tests for additional scripts feature Add unit tests designed for testing additional scripting feature. Signed-off-by: Mateusz Junkier --- scripts/tests/twister/test_scripting.py | 388 ++++++++++++++++++++++++ 1 file changed, 388 insertions(+) create mode 100644 scripts/tests/twister/test_scripting.py diff --git a/scripts/tests/twister/test_scripting.py b/scripts/tests/twister/test_scripting.py new file mode 100644 index 000000000000..eed97b6999b3 --- /dev/null +++ b/scripts/tests/twister/test_scripting.py @@ -0,0 +1,388 @@ +#!/usr/bin/env python3 +# Copyright (c) 2024 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +from unittest.mock import mock_open, patch + +import pytest +from twisterlib.scripting import ( + Script, + Scripting, + ScriptingData, + ScriptingElement, + _matches_element, +) + + +# Group related tests into a class for ScriptingElement +class TestScriptingElement: + def test_initialization_with_properties(self): + # Test initialization with all properties set + element = ScriptingElement( + scenarios=['scenario1', 'scenario2'], + platforms=['platform1', 'platform2'], + pre_script=Script(path='pre_script.sh', timeout=10, override_script=True), + post_flash_script=Script( + path='post_flash_script.sh', timeout=20, override_script=False + ), + post_script=Script(path='post_script.sh'), # No timeout or override_script specified + comment='Test comment', + ) + # Check if the properties are set correctly + assert element.scenarios == ['scenario1', 'scenario2'] + assert element.platforms == ['platform1', 'platform2'] + assert element.pre_script.path == 'pre_script.sh' # Compare the path attribute + assert ( + element.post_flash_script.path == 'post_flash_script.sh' + ) # Compare the path attribute + assert element.post_script.path == 'post_script.sh' # Compare the path attribute + assert element.comment == 'Test comment' + assert element.pre_script.timeout == 10 + assert element.pre_script.override_script is True + assert element.post_flash_script.timeout == 20 + assert element.post_flash_script.override_script is False + assert element.post_script.timeout is None + assert element.post_script.override_script is False + + def test_initialization_with_no_properties(self): + # Test initialization with no properties set, which should trigger an error + with pytest.raises(SystemExit) as excinfo: + ScriptingElement() + # Check if the correct exit code is set in the SystemExit exception + assert excinfo.value.code == 1 + + def test_initialization_with_empty_properties(self): + # Test initialization with empty properties, which should trigger an error + with pytest.raises(SystemExit) as excinfo: + ScriptingElement( + scenarios=[], + platforms=[], + pre_script=None, + post_flash_script=None, + post_script=None, + comment='', + ) + # Check if the correct exit code is set in the SystemExit exception + assert excinfo.value.code == 1 + + +# Group related tests into a class for ScriptingData +class TestScriptingData: + @pytest.fixture + def mock_scripting_elements(self): + return [ + ScriptingElement( + scenarios=['scenario1'], + platforms=['platform1'], + pre_script=Script(path='pre_script1.sh'), + post_flash_script=Script(path='post_flash_script1.sh'), + post_script=Script(path='post_script1.sh'), + comment='Test comment 1', + ), + ScriptingElement( + scenarios=['scenario2'], + platforms=['platform2'], + pre_script=Script(path='pre_script2.sh'), + post_flash_script=Script(path='post_flash_script2.sh'), + post_script=Script(path='post_script2.sh'), + comment='Test comment 2', + ), + ] + + def test_initialization_with_scripting_elements(self, mock_scripting_elements): + # Initialize ScriptingData with the list of mock ScriptingElement instances + scripting_data = ScriptingData(elements=mock_scripting_elements) + # Check if the elements are stored correctly + assert len(scripting_data.elements) == 2 + assert all(isinstance(elem, ScriptingElement) for elem in scripting_data.elements) + + def test_load_from_yaml(self): + # Mock YAML content + yaml_content = ''' + - scenarios: ['scenario1'] + platforms: ['platform1'] + pre_script: + path: 'pre_script1.sh' + post_flash_script: + path: 'post_flash_script1.sh' + post_script: + path: 'post_script1.sh' + comment: 'Test comment 1' + - scenarios: ['scenario2'] + platforms: ['platform2'] + pre_script: + path: 'pre_script2.sh' + post_flash_script: + path: 'post_flash_script2.sh' + post_script: + path: 'post_script2.sh' + comment: 'Test comment 2' + ''' + # Define the schema as provided + mock_schema = { + 'type': 'seq', + 'matching': 'all', + 'sequence': [ + { + 'type': 'map', + 'required': True, + 'matching': 'all', + 'mapping': { + 'scenarios': { + 'type': 'seq', + 'required': True, + 'sequence': [{'type': 'str'}, {'unique': True}], + }, + 'platforms': { + 'required': False, + 'type': 'seq', + 'sequence': [{'type': 'str'}, {'unique': True}], + }, + 'pre_script': {'type': 'str', 'required': False}, + 'post_flash_script': {'type': 'str', 'required': False}, + 'post_script': {'type': 'str', 'required': False}, + 'comment': {'type': 'str', 'required': True}, + }, + } + ], + } + # Use mock_open to simulate file reading + with ( + patch('builtins.open', mock_open(read_data=yaml_content)), + patch('scl.yaml_load_verify') as mock_yaml_load_verify, + ): + # Mock the yaml_load_verify function to return a list of dictionaries + mock_yaml_load_verify.return_value = [ + { + 'scenarios': ['scenario1'], + 'platforms': ['platform1'], + 'pre_script': {'path': 'pre_script1.sh'}, + 'post_flash_script': {'path': 'post_flash_script1.sh'}, + 'post_script': {'path': 'post_script1.sh'}, + 'comment': 'Test comment 1', + }, + { + 'scenarios': ['scenario2'], + 'platforms': ['platform2'], + 'pre_script': {'path': 'pre_script2.sh'}, + 'post_flash_script': {'path': 'post_flash_script2.sh'}, + 'post_script': {'path': 'post_script2.sh'}, + 'comment': 'Test comment 2', + }, + ] + # Load ScriptingData from a YAML file with the mock schema + scripting_data = ScriptingData.load_from_yaml('dummy_file.yaml', mock_schema) + # Check if the data was loaded correctly + assert len(scripting_data.elements) == 2 + assert all(isinstance(elem, ScriptingElement) for elem in scripting_data.elements) + + +class TestMatchingFunctionality: + @pytest.fixture + def scripting_data_with_elements(self): + return ScriptingData( + elements=[ + ScriptingElement( + scenarios=['test_scenario1'], + platforms=['platform1'], + pre_script=Script(path='pre_script1.sh'), + comment='Match 1', + ), + ScriptingElement( + scenarios=['test_scenario2'], + platforms=['platform2'], + pre_script=Script(path='pre_script2.sh'), + comment='Match 2', + ), + ScriptingElement( + scenarios=[''], + platforms=['platform3'], + pre_script=Script(path='pre_script3.sh'), + comment='Wildcard scenario', + ), + ScriptingElement( + scenarios=['test_scenario3'], + platforms=[''], + pre_script=Script(path='pre_script4.sh'), + comment='Wildcard platform', + ), + ] + ) + + def test_find_matching_scripting_exact_match(self, scripting_data_with_elements): + # Test finding a matching scripting element + # for a given scenario and platform that should match exactly + matched_element = scripting_data_with_elements.find_matching_scripting( + 'test_scenario1', 'platform1' + ) + assert matched_element is not None, "Expected a matching scripting element but found None." + assert ( + matched_element.comment == 'Match 1' + ), f"Expected comment 'Match 1', but got '{matched_element.comment}'." + + def test_find_matching_scripting_no_match(self, scripting_data_with_elements): + # Test finding a matching scripting element + # for a given scenario and platform that should not match + matched_element = scripting_data_with_elements.find_matching_scripting( + 'nonexistent_test', 'platform1' + ) + assert matched_element is None + + def test_find_matching_scripting_wildcard_scenario(self, scripting_data_with_elements): + # Test finding a matching scripting element with a wildcard scenario + matched_element = scripting_data_with_elements.find_matching_scripting( + 'any_scenario', 'platform3' + ) + assert matched_element is not None + assert matched_element.comment == 'Wildcard scenario' + + def test_find_matching_scripting_wildcard_platform(self, scripting_data_with_elements): + # Test finding a matching scripting element with a wildcard platform + matched_element = scripting_data_with_elements.find_matching_scripting( + 'test_scenario3', 'any_platform' + ) + assert matched_element is not None + assert matched_element.comment == 'Wildcard platform' + + +# Test function for the _matches_element helper function +@pytest.mark.parametrize( + "element,patterns,expected", + [ + ("test_scenario1", [("test_scenario1")], True), + ("test_scenario1", [("test_scenario2")], False), + ("test_scenario1", [("scenario1")], True), + ("test_scenario1", [("test_")], True), + ("test_scenario1", [("scenario1$")], False), + ("test_scenario1", [("^scenario")], False), + ("test_scenario1", [("test_scenario1")], True), + ("test_scenario1", ["test_scenario", "scenario1"], True), + ("test_scenario1", [], False), # No patterns to match + ], +) +def test_matches_element(element, patterns, expected): + # Test if the element matches any of the provided regex patterns + assert _matches_element(element, patterns) == expected + + +# Fixture to mock the load_from_yaml method +@pytest.fixture +def mock_scripting_data(): + elements = [ + ScriptingElement( + scenarios=['test_scenario1'], + platforms=['platform1'], + pre_script=Script(path='pre_script1.sh'), + comment='Match 1', + ), + ScriptingElement( + scenarios=['test_scenario2'], + platforms=['platform2'], + pre_script=Script(path='pre_script2.sh'), + comment='Match 2', + ), + ] + return ScriptingData(elements=elements) + + +# Define the mock_load_from_yaml fixture +@pytest.fixture +def mock_load_from_yaml(): + with patch('twisterlib.scripting.ScriptingData.load_from_yaml') as mock_method: + # Set the return_value of the mocked load_from_yaml method + mock_method.return_value = ScriptingData( + [ + ScriptingElement( + scenarios=['scenario1'], + platforms=['platform1'], + comment='Test comment 1', + pre_script='mock_pre_script_1.sh', # Include a mock pre_script attribute + ), + ScriptingElement( + scenarios=['scenario2'], + platforms=['platform2'], + comment='Test comment 2', + pre_script='mock_pre_script_2.sh', # Include a mock pre_script attribute + ), + ] + ) + yield mock_method + + +def test_scripting_initialization(mock_load_from_yaml): + # Initialize Scripting with a list of dummy file paths + scripting = Scripting(scripting_files=['dummy_path1.yaml'], scripting_schema={}) + # Check if the scripting data was loaded correctly + # The mock_load_from_yaml fixture should have been used to mock the load_from_yaml method + assert len(scripting.scripting.elements) == 2 + + +# Test function for getting matched scripting elements +def test_get_matched_scripting(mock_scripting_data): + # Initialize Scripting without any scripting files + scripting = Scripting(scripting_files=[], scripting_schema={}) + # Manually set the scripting data to the mock_scripting_data + scripting.scripting = mock_scripting_data + + # Test get_matched_scripting with a test name and platform that should find a match + matched_element = scripting.get_matched_scripting('test_scenario1', 'platform1') + assert matched_element is not None + assert matched_element.comment == 'Match 1' + + # Test get_matched_scripting with a test name and platform that should not find a match + matched_element = scripting.get_matched_scripting('nonexistent_test', 'platform1') + assert matched_element is None + + +@pytest.fixture +def scripting_data_instances(): + scripting_data1 = ScriptingData( + elements=[ + ScriptingElement( + scenarios=['scenario1'], + platforms=['platform1'], + pre_script=Script(path='pre_script1.sh'), + comment='Data1 Match 1', + ), + ScriptingElement( + scenarios=['scenario2'], + platforms=['platform2'], + pre_script=Script(path='pre_script2.sh'), + comment='Data1 Match 2', + ), + ] + ) + scripting_data2 = ScriptingData( + elements=[ + ScriptingElement( + scenarios=['scenario3'], + platforms=['platform3'], + pre_script=Script(path='pre_script3.sh'), + comment='Data2 Match 1', + ), + ScriptingElement( + scenarios=['scenario4'], + platforms=['platform4'], + pre_script=Script(path='pre_script4.sh'), + comment='Data2 Match 2', + ), + ] + ) + return scripting_data1, scripting_data2 + + +def test_scripting_data_extension(scripting_data_instances): + scripting_data1, scripting_data2 = scripting_data_instances + # Extend the first ScriptingData instance with the second one + scripting_data1.extend(scripting_data2) + + # Check if the elements are combined correctly + assert len(scripting_data1.elements) == 4 + assert scripting_data1.elements[0].comment == 'Data1 Match 1' + assert scripting_data1.elements[1].comment == 'Data1 Match 2' + assert scripting_data1.elements[2].comment == 'Data2 Match 1' + assert scripting_data1.elements[3].comment == 'Data2 Match 2' + + # Check if the elements are instances of ScriptingElement + for element in scripting_data1.elements: + assert isinstance(element, ScriptingElement)