From 895cd0bd3070e2dec59c9b31d292ad5d39dde057 Mon Sep 17 00:00:00 2001 From: Evan Tung Date: Mon, 17 Jun 2024 20:52:10 -0700 Subject: [PATCH] fix: Remove dev_enabled and viz_enabled, format with black to avoid multiline f-strings and standardize code style, use GitHub default python gitignore --- .gitignore | 133 ++- Pipfile | 18 + Pipfile.lock | 262 +++++ conftest.py | 54 +- driver.py | 101 +- environment.yml | 1 + onair/config/default_config.ini | 2 +- onair/config/reporter_config.ini | 2 - onair/data_handling/csv_parser.py | 15 +- onair/data_handling/on_air_data_source.py | 18 +- onair/data_handling/parser_util.py | 26 +- onair/data_handling/redis_adapter.py | 100 +- onair/data_handling/tlm_json_parser.py | 56 +- .../ai_plugin_abstract/__init__.py | 2 +- .../ai_plugin_abstract/ai_plugin.py | 6 +- onair/src/ai_components/learners_interface.py | 3 +- onair/src/ai_components/planners_interface.py | 5 +- onair/src/reasoning/agent.py | 49 +- .../reasoning/complex_reasoning_interface.py | 12 +- onair/src/reasoning/diagnosis.py | 44 +- onair/src/run_scripts/execution_engine.py | 142 +-- onair/src/run_scripts/sim.py | 21 +- onair/src/systems/status.py | 9 +- onair/src/systems/telemetry_test_suite.py | 138 +-- onair/src/systems/vehicle_rep.py | 20 +- onair/src/util/cleanup.py | 1 + onair/src/util/data_conversion.py | 8 +- onair/src/util/file_io.py | 24 +- onair/src/util/plugin_import.py | 10 +- onair/src/util/print_io.py | 138 ++- onair/src/util/sim_io.py | 59 +- plugins/generic/generic_plugin.py | 3 +- plugins/kalman/kalman_plugin.py | 69 +- plugins/reporter/reporter_plugin.py | 5 +- redis-experiment-publisher.py | 31 +- test/onair/data_handling/test_csv_parser.py | 123 ++- .../data_handling/test_on_air_data_source.py | 57 +- test/onair/data_handling/test_parser_util.py | 493 ++++++--- .../onair/data_handling/test_redis_adapter.py | 580 ++++++---- .../data_handling/test_tlm_json_parser.py | 353 ++++--- .../ai_plugin_abstract/test_AI_plugin_core.py | 25 +- .../ai_components/test_learners_interface.py | 61 +- .../ai_components/test_planners_interface.py | 56 +- test/onair/src/reasoning/test_agent.py | 148 ++- .../test_complex_resoning_interface.py | 56 +- test/onair/src/reasoning/test_diagnosis.py | 155 ++- .../src/run_scripts/test_execution_engine.py | 735 +++++++------ test/onair/src/run_scripts/test_sim.py | 193 ++-- test/onair/src/systems/test_status.py | 60 +- .../src/systems/test_telemetry_test_suite.py | 993 ++++++++++++------ test/onair/src/systems/test_vehicle_rep.py | 178 +++- test/onair/src/util/test_cleanup.py | 44 +- test/onair/src/util/test_data_conversion.py | 17 +- test/onair/src/util/test_file_io.py | 452 ++++---- test/onair/src/util/test_plugin_import.py | 256 +++-- test/onair/src/util/test_print_io.py | 860 ++++++++------- test/onair/src/util/test_sim_io.py | 637 ++++++----- test/plugins/generic/test_generic_plugin.py | 2 + test/plugins/kalman/test_kalman_plugin.py | 456 +++++--- test/plugins/reporter/test_reporter_plugin.py | 82 +- test/test_driver.py | 8 +- 61 files changed, 5552 insertions(+), 3115 deletions(-) create mode 100644 Pipfile create mode 100644 Pipfile.lock diff --git a/.gitignore b/.gitignore index 3d0969cd..f48b3919 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,132 @@ -*.pyc -.DS_Store +# Byte-compiled / optimized / DLL files __pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ .coverage -.vscode/ \ No newline at end of file +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# Visual Studio Code +.vscode/ diff --git a/Pipfile b/Pipfile new file mode 100644 index 00000000..d51dc905 --- /dev/null +++ b/Pipfile @@ -0,0 +1,18 @@ +[[source]] +url = "https://pypi.org/simple" +verify_ssl = true +name = "pypi" + +[packages] +numpy = "*" +redis = "*" + +[dev-packages] +pytest = "*" +pytest-mock = "*" +pytest-randomly = "*" +coverage = "*" +black = "*" + +[requires] +python_version = "3.12" diff --git a/Pipfile.lock b/Pipfile.lock new file mode 100644 index 00000000..7a113eb0 --- /dev/null +++ b/Pipfile.lock @@ -0,0 +1,262 @@ +{ + "_meta": { + "hash": { + "sha256": "2182da2a7c777fe9f3cf8284450b6939956bc0bad902225ca14b0595389a8546" + }, + "pipfile-spec": 6, + "requires": { + "python_version": "3.12" + }, + "sources": [ + { + "name": "pypi", + "url": "https://pypi.org/simple", + "verify_ssl": true + } + ] + }, + "default": { + "numpy": { + "hashes": [ + "sha256:04494f6ec467ccb5369d1808570ae55f6ed9b5809d7f035059000a37b8d7e86f", + "sha256:0a43f0974d501842866cc83471bdb0116ba0dffdbaac33ec05e6afed5b615238", + "sha256:0e50842b2295ba8414c8c1d9d957083d5dfe9e16828b37de883f51fc53c4016f", + "sha256:0ec84b9ba0654f3b962802edc91424331f423dcf5d5f926676e0150789cb3d95", + "sha256:17067d097ed036636fa79f6a869ac26df7db1ba22039d962422506640314933a", + "sha256:1cde1753efe513705a0c6d28f5884e22bdc30438bf0085c5c486cdaff40cd67a", + "sha256:1e72728e7501a450288fc8e1f9ebc73d90cfd4671ebbd631f3e7857c39bd16f2", + "sha256:2635dbd200c2d6faf2ef9a0d04f0ecc6b13b3cad54f7c67c61155138835515d2", + "sha256:2ce46fd0b8a0c947ae047d222f7136fc4d55538741373107574271bc00e20e8f", + "sha256:34f003cb88b1ba38cb9a9a4a3161c1604973d7f9d5552c38bc2f04f829536609", + "sha256:354f373279768fa5a584bac997de6a6c9bc535c482592d7a813bb0c09be6c76f", + "sha256:38ecb5b0582cd125f67a629072fed6f83562d9dd04d7e03256c9829bdec027ad", + "sha256:3e8e01233d57639b2e30966c63d36fcea099d17c53bf424d77f088b0f4babd86", + "sha256:3f6bed7f840d44c08ebdb73b1825282b801799e325bcbdfa6bc5c370e5aecc65", + "sha256:4554eb96f0fd263041baf16cf0881b3f5dafae7a59b1049acb9540c4d57bc8cb", + "sha256:46e161722e0f619749d1cd892167039015b2c2817296104487cd03ed4a955995", + "sha256:49d9f7d256fbc804391a7f72d4a617302b1afac1112fac19b6c6cec63fe7fe8a", + "sha256:4d2f62e55a4cd9c58c1d9a1c9edaedcd857a73cb6fda875bf79093f9d9086f85", + "sha256:5f64641b42b2429f56ee08b4f427a4d2daf916ec59686061de751a55aafa22e4", + "sha256:63b92c512d9dbcc37f9d81b123dec99fdb318ba38c8059afc78086fe73820275", + "sha256:6d7696c615765091cc5093f76fd1fa069870304beaccfd58b5dcc69e55ef49c1", + "sha256:79e843d186c8fb1b102bef3e2bc35ef81160ffef3194646a7fdd6a73c6b97196", + "sha256:821eedb7165ead9eebdb569986968b541f9908979c2da8a4967ecac4439bae3d", + "sha256:84554fc53daa8f6abf8e8a66e076aff6ece62de68523d9f665f32d2fc50fd66e", + "sha256:8d83bb187fb647643bd56e1ae43f273c7f4dbcdf94550d7938cfc32566756514", + "sha256:903703372d46bce88b6920a0cd86c3ad82dae2dbef157b5fc01b70ea1cfc430f", + "sha256:9416a5c2e92ace094e9f0082c5fd473502c91651fb896bc17690d6fc475128d6", + "sha256:9a1712c015831da583b21c5bfe15e8684137097969c6d22e8316ba66b5baabe4", + "sha256:9c27f0946a3536403efb0e1c28def1ae6730a72cd0d5878db38824855e3afc44", + "sha256:a356364941fb0593bb899a1076b92dfa2029f6f5b8ba88a14fd0984aaf76d0df", + "sha256:a7039a136017eaa92c1848152827e1424701532ca8e8967fe480fe1569dae581", + "sha256:acd3a644e4807e73b4e1867b769fbf1ce8c5d80e7caaef0d90dcdc640dfc9787", + "sha256:ad0c86f3455fbd0de6c31a3056eb822fc939f81b1618f10ff3406971893b62a5", + "sha256:b4c76e3d4c56f145d41b7b6751255feefae92edbc9a61e1758a98204200f30fc", + "sha256:b6f6a8f45d0313db07d6d1d37bd0b112f887e1369758a5419c0370ba915b3871", + "sha256:c5a59996dc61835133b56a32ebe4ef3740ea5bc19b3983ac60cc32be5a665d54", + "sha256:c73aafd1afca80afecb22718f8700b40ac7cab927b8abab3c3e337d70e10e5a2", + "sha256:cee6cc0584f71adefe2c908856ccc98702baf95ff80092e4ca46061538a2ba98", + "sha256:cef04d068f5fb0518a77857953193b6bb94809a806bd0a14983a8f12ada060c9", + "sha256:cf5d1c9e6837f8af9f92b6bd3e86d513cdc11f60fd62185cc49ec7d1aba34864", + "sha256:e61155fae27570692ad1d327e81c6cf27d535a5d7ef97648a17d922224b216de", + "sha256:e7f387600d424f91576af20518334df3d97bc76a300a755f9a8d6e4f5cadd289", + "sha256:ed08d2703b5972ec736451b818c2eb9da80d66c3e84aed1deeb0c345fefe461b", + "sha256:fbd6acc766814ea6443628f4e6751d0da6593dae29c08c0b2606164db026970c", + "sha256:feff59f27338135776f6d4e2ec7aeeac5d5f7a08a83e80869121ef8164b74af9" + ], + "index": "pypi", + "markers": "python_version >= '3.9'", + "version": "==2.0.0" + }, + "redis": { + "hashes": [ + "sha256:38473cd7c6389ad3e44a91f4c3eaf6bcb8a9f746007f29bf4fb20824ff0b2197", + "sha256:c0d6d990850c627bbf7be01c5c4cbaadf67b48593e913bb71c9819c30df37eee" + ], + "index": "pypi", + "markers": "python_version >= '3.7'", + "version": "==5.0.6" + } + }, + "develop": { + "black": { + "hashes": [ + "sha256:257d724c2c9b1660f353b36c802ccece186a30accc7742c176d29c146df6e474", + "sha256:37aae07b029fa0174d39daf02748b379399b909652a806e5708199bd93899da1", + "sha256:415e686e87dbbe6f4cd5ef0fbf764af7b89f9057b97c908742b6008cc554b9c0", + "sha256:48a85f2cb5e6799a9ef05347b476cce6c182d6c71ee36925a6c194d074336ef8", + "sha256:7768a0dbf16a39aa5e9a3ded568bb545c8c2727396d063bbaf847df05b08cd96", + "sha256:7e122b1c4fb252fd85df3ca93578732b4749d9be076593076ef4d07a0233c3e1", + "sha256:88c57dc656038f1ab9f92b3eb5335ee9b021412feaa46330d5eba4e51fe49b04", + "sha256:8e537d281831ad0e71007dcdcbe50a71470b978c453fa41ce77186bbe0ed6021", + "sha256:98e123f1d5cfd42f886624d84464f7756f60ff6eab89ae845210631714f6db94", + "sha256:accf49e151c8ed2c0cdc528691838afd217c50412534e876a19270fea1e28e2d", + "sha256:b1530ae42e9d6d5b670a34db49a94115a64596bc77710b1d05e9801e62ca0a7c", + "sha256:b9176b9832e84308818a99a561e90aa479e73c523b3f77afd07913380ae2eab7", + "sha256:bdde6f877a18f24844e381d45e9947a49e97933573ac9d4345399be37621e26c", + "sha256:be8bef99eb46d5021bf053114442914baeb3649a89dc5f3a555c88737e5e98fc", + "sha256:bf10f7310db693bb62692609b397e8d67257c55f949abde4c67f9cc574492cc7", + "sha256:c872b53057f000085da66a19c55d68f6f8ddcac2642392ad3a355878406fbd4d", + "sha256:d36ed1124bb81b32f8614555b34cc4259c3fbc7eec17870e8ff8ded335b58d8c", + "sha256:da33a1a5e49c4122ccdfd56cd021ff1ebc4a1ec4e2d01594fef9b6f267a9e741", + "sha256:dd1b5a14e417189db4c7b64a6540f31730713d173f0b63e55fabd52d61d8fdce", + "sha256:e151054aa00bad1f4e1f04919542885f89f5f7d086b8a59e5000e6c616896ffb", + "sha256:eaea3008c281f1038edb473c1aa8ed8143a5535ff18f978a318f10302b254063", + "sha256:ef703f83fc32e131e9bcc0a5094cfe85599e7109f896fe8bc96cc402f3eb4b6e" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==24.4.2" + }, + "click": { + "hashes": [ + "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28", + "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de" + ], + "markers": "python_version >= '3.7'", + "version": "==8.1.7" + }, + "colorama": { + "hashes": [ + "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", + "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6" + ], + "markers": "sys_platform == 'win32'", + "version": "==0.4.6" + }, + "coverage": { + "hashes": [ + "sha256:015eddc5ccd5364dcb902eaecf9515636806fa1e0d5bef5769d06d0f31b54523", + "sha256:04aefca5190d1dc7a53a4c1a5a7f8568811306d7a8ee231c42fb69215571944f", + "sha256:05ac5f60faa0c704c0f7e6a5cbfd6f02101ed05e0aee4d2822637a9e672c998d", + "sha256:0bbddc54bbacfc09b3edaec644d4ac90c08ee8ed4844b0f86227dcda2d428fcb", + "sha256:1d2a830ade66d3563bb61d1e3c77c8def97b30ed91e166c67d0632c018f380f0", + "sha256:239a4e75e09c2b12ea478d28815acf83334d32e722e7433471fbf641c606344c", + "sha256:244f509f126dc71369393ce5fea17c0592c40ee44e607b6d855e9c4ac57aac98", + "sha256:25a5caf742c6195e08002d3b6c2dd6947e50efc5fc2c2205f61ecb47592d2d83", + "sha256:296a7d9bbc598e8744c00f7a6cecf1da9b30ae9ad51c566291ff1314e6cbbed8", + "sha256:2e079c9ec772fedbade9d7ebc36202a1d9ef7291bc9b3a024ca395c4d52853d7", + "sha256:33ca90a0eb29225f195e30684ba4a6db05dbef03c2ccd50b9077714c48153cac", + "sha256:33fc65740267222fc02975c061eb7167185fef4cc8f2770267ee8bf7d6a42f84", + "sha256:341dd8f61c26337c37988345ca5c8ccabeff33093a26953a1ac72e7d0103c4fb", + "sha256:34d6d21d8795a97b14d503dcaf74226ae51eb1f2bd41015d3ef332a24d0a17b3", + "sha256:3538d8fb1ee9bdd2e2692b3b18c22bb1c19ffbefd06880f5ac496e42d7bb3884", + "sha256:38a3b98dae8a7c9057bd91fbf3415c05e700a5114c5f1b5b0ea5f8f429ba6614", + "sha256:3d5a67f0da401e105753d474369ab034c7bae51a4c31c77d94030d59e41df5bd", + "sha256:50084d3516aa263791198913a17354bd1dc627d3c1639209640b9cac3fef5807", + "sha256:55f689f846661e3f26efa535071775d0483388a1ccfab899df72924805e9e7cd", + "sha256:5bc5a8c87714b0c67cfeb4c7caa82b2d71e8864d1a46aa990b5588fa953673b8", + "sha256:62bda40da1e68898186f274f832ef3e759ce929da9a9fd9fcf265956de269dbc", + "sha256:705f3d7c2b098c40f5b81790a5fedb274113373d4d1a69e65f8b68b0cc26f6db", + "sha256:75e3f4e86804023e991096b29e147e635f5e2568f77883a1e6eed74512659ab0", + "sha256:7b2a19e13dfb5c8e145c7a6ea959485ee8e2204699903c88c7d25283584bfc08", + "sha256:7cec2af81f9e7569280822be68bd57e51b86d42e59ea30d10ebdbb22d2cb7232", + "sha256:8383a6c8cefba1b7cecc0149415046b6fc38836295bc4c84e820872eb5478b3d", + "sha256:8c836309931839cca658a78a888dab9676b5c988d0dd34ca247f5f3e679f4e7a", + "sha256:8e317953bb4c074c06c798a11dbdd2cf9979dbcaa8ccc0fa4701d80042d4ebf1", + "sha256:923b7b1c717bd0f0f92d862d1ff51d9b2b55dbbd133e05680204465f454bb286", + "sha256:990fb20b32990b2ce2c5f974c3e738c9358b2735bc05075d50a6f36721b8f303", + "sha256:9aad68c3f2566dfae84bf46295a79e79d904e1c21ccfc66de88cd446f8686341", + "sha256:a5812840d1d00eafae6585aba38021f90a705a25b8216ec7f66aebe5b619fb84", + "sha256:a6519d917abb15e12380406d721e37613e2a67d166f9fb7e5a8ce0375744cd45", + "sha256:ab0b028165eea880af12f66086694768f2c3139b2c31ad5e032c8edbafca6ffc", + "sha256:aea7da970f1feccf48be7335f8b2ca64baf9b589d79e05b9397a06696ce1a1ec", + "sha256:b1196e13c45e327d6cd0b6e471530a1882f1017eb83c6229fc613cd1a11b53cd", + "sha256:b368e1aee1b9b75757942d44d7598dcd22a9dbb126affcbba82d15917f0cc155", + "sha256:bde997cac85fcac227b27d4fb2c7608a2c5f6558469b0eb704c5726ae49e1c52", + "sha256:c4c2872b3c91f9baa836147ca33650dc5c172e9273c808c3c3199c75490e709d", + "sha256:c59d2ad092dc0551d9f79d9d44d005c945ba95832a6798f98f9216ede3d5f485", + "sha256:d1da0a2e3b37b745a2b2a678a4c796462cf753aebf94edcc87dcc6b8641eae31", + "sha256:d8b7339180d00de83e930358223c617cc343dd08e1aa5ec7b06c3a121aec4e1d", + "sha256:dd4b3355b01273a56b20c219e74e7549e14370b31a4ffe42706a8cda91f19f6d", + "sha256:e08c470c2eb01977d221fd87495b44867a56d4d594f43739a8028f8646a51e0d", + "sha256:f5102a92855d518b0996eb197772f5ac2a527c0ec617124ad5242a3af5e25f85", + "sha256:f542287b1489c7a860d43a7d8883e27ca62ab84ca53c965d11dac1d3a1fab7ce", + "sha256:f78300789a708ac1f17e134593f577407d52d0417305435b134805c4fb135adb", + "sha256:f81bc26d609bf0fbc622c7122ba6307993c83c795d2d6f6f6fd8c000a770d974", + "sha256:f836c174c3a7f639bded48ec913f348c4761cbf49de4a20a956d3431a7c9cb24", + "sha256:fa21a04112c59ad54f69d80e376f7f9d0f5f9123ab87ecd18fbb9ec3a2beed56", + "sha256:fcf7d1d6f5da887ca04302db8e0e0cf56ce9a5e05f202720e49b3e8157ddb9a9", + "sha256:fd27d8b49e574e50caa65196d908f80e4dff64d7e592d0c59788b45aad7e8b35" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==7.5.3" + }, + "iniconfig": { + "hashes": [ + "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3", + "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374" + ], + "markers": "python_version >= '3.7'", + "version": "==2.0.0" + }, + "mypy-extensions": { + "hashes": [ + "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d", + "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782" + ], + "markers": "python_version >= '3.5'", + "version": "==1.0.0" + }, + "packaging": { + "hashes": [ + "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002", + "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124" + ], + "markers": "python_version >= '3.8'", + "version": "==24.1" + }, + "pathspec": { + "hashes": [ + "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08", + "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712" + ], + "markers": "python_version >= '3.8'", + "version": "==0.12.1" + }, + "platformdirs": { + "hashes": [ + "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee", + "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3" + ], + "markers": "python_version >= '3.8'", + "version": "==4.2.2" + }, + "pluggy": { + "hashes": [ + "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1", + "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669" + ], + "markers": "python_version >= '3.8'", + "version": "==1.5.0" + }, + "pytest": { + "hashes": [ + "sha256:c434598117762e2bd304e526244f67bf66bbd7b5d6cf22138be51ff661980343", + "sha256:de4bb8104e201939ccdc688b27a89a7be2079b22e2bd2b07f806b6ba71117977" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==8.2.2" + }, + "pytest-mock": { + "hashes": [ + "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f", + "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==3.14.0" + }, + "pytest-randomly": { + "hashes": [ + "sha256:0516f4344b29f4e9cdae8bce31c4aeebf59d0b9ef05927c33354ff3859eeeca6", + "sha256:b908529648667ba5e54723088edd6f82252f540cc340d748d1fa985539687047" + ], + "index": "pypi", + "markers": "python_version >= '3.8'", + "version": "==3.15.0" + } + } +} diff --git a/conftest.py b/conftest.py index ec4029ad..f6c82c65 100644 --- a/conftest.py +++ b/conftest.py @@ -13,32 +13,34 @@ from unittest.mock import MagicMock import sys + def pytest_addoption(parser): - parser.addoption("--conftest-seed", action="store", type=int, default=None) + parser.addoption("--conftest-seed", action="store", type=int, default=None) + def pytest_configure(config): - seed = config.getoption("--conftest-seed") - if config.getoption("--conftest-seed") == None: - seed = int(time()) - pytest.gen = random.Random(seed) - print(f"Using --conftest-seed={seed}") - - # Mock simdkalman for kalman_plugin testing - simdkalman = MagicMock() - sys.modules['simdkalman'] = simdkalman - - # Mock sbn_client for sbn_adapter testing - sc = MagicMock() - sys.modules['sbn_client'] = sc - - # Mock message_headers for sbn_adapter testing - mh = MagicMock() - mh.sample_data_tlm_t = MagicMock() - mh.sample_data_tlm_t.__name__ = 'mock_sample_data_tlm_t' - mh.sample_data_power_t = MagicMock() - mh.sample_data_power_t.__name__ = 'mock_sample_data_power_t' - mh.sample_data_thermal_t = MagicMock() - mh.sample_data_thermal_t.__name__ = 'mock_sample_data_thermal_t' - mh.sample_data_gps_t = MagicMock() - mh.sample_data_gps_t.__name__ = 'mock_sample_data_gps_t' - sys.modules['message_headers'] = mh + seed = config.getoption("--conftest-seed") + if config.getoption("--conftest-seed") == None: + seed = int(time()) + pytest.gen = random.Random(seed) + print(f"Using --conftest-seed={seed}") + + # Mock simdkalman for kalman_plugin testing + simdkalman = MagicMock() + sys.modules["simdkalman"] = simdkalman + + # Mock sbn_client for sbn_adapter testing + sc = MagicMock() + sys.modules["sbn_client"] = sc + + # Mock message_headers for sbn_adapter testing + mh = MagicMock() + mh.sample_data_tlm_t = MagicMock() + mh.sample_data_tlm_t.__name__ = "mock_sample_data_tlm_t" + mh.sample_data_power_t = MagicMock() + mh.sample_data_power_t.__name__ = "mock_sample_data_power_t" + mh.sample_data_thermal_t = MagicMock() + mh.sample_data_thermal_t.__name__ = "mock_sample_data_thermal_t" + mh.sample_data_gps_t = MagicMock() + mh.sample_data_gps_t.__name__ = "mock_sample_data_gps_t" + sys.modules["message_headers"] = mh diff --git a/driver.py b/driver.py index 5e2fc7a3..9f970cea 100644 --- a/driver.py +++ b/driver.py @@ -27,34 +27,53 @@ def main(): Check the .ini file for the filenames used """ - arg_parser = argparse.ArgumentParser(description='') - arg_parser.add_argument('configfile', nargs='?', - default='./onair/config/default_config.ini', - help='Config file to be used') - arg_parser.add_argument('--save', '-s', action='store_true', - help='Should log files be saved?') - arg_parser.add_argument('--save_name', '--name', '-n', - help='Name of saved log files') - arg_parser.add_argument('--mute', '-m', action='store_true', - help='Mute all non-error output') + arg_parser = argparse.ArgumentParser(description="") + arg_parser.add_argument( + "configfile", + nargs="?", + default="./onair/config/default_config.ini", + help="Config file to be used", + ) + arg_parser.add_argument( + "--save", "-s", action="store_true", help="Should log files be saved?" + ) + arg_parser.add_argument( + "--save_name", "--name", "-n", help="Name of saved log files" + ) + arg_parser.add_argument( + "--mute", "-m", action="store_true", help="Mute all non-error output" + ) """ Testing specific arguments """ - arg_parser.add_argument('--test', '-t', action='store_true', - help='Run tests') - arg_parser.add_argument('--verbose', '-v', action='count', default=0, - help="Increase verbosity in tests") - arg_parser.add_argument('-k', action='store', dest='keyword', default="", - metavar='EXPRESSION', - help="Pass thru for pytest's -k option. Runs only" - " tests with names that match EXPRESSION.") - arg_parser.add_argument('--conftest-seed', action='store', - type=int, default=None, - help="Set the random seed for test values") - arg_parser.add_argument('--randomly-seed', action='store', - type=int, default=None, - help="Set the random seed for test run order") + arg_parser.add_argument("--test", "-t", action="store_true", help="Run tests") + arg_parser.add_argument( + "--verbose", "-v", action="count", default=0, help="Increase verbosity in tests" + ) + arg_parser.add_argument( + "-k", + action="store", + dest="keyword", + default="", + metavar="EXPRESSION", + help="Pass thru for pytest's -k option. Runs only" + " tests with names that match EXPRESSION.", + ) + arg_parser.add_argument( + "--conftest-seed", + action="store", + type=int, + default=None, + help="Set the random seed for test values", + ) + arg_parser.add_argument( + "--randomly-seed", + action="store", + type=int, + default=None, + help="Set the random seed for test run order", + ) args = arg_parser.parse_args() """ @@ -63,7 +82,8 @@ def main(): """ if args.test: import coverage - cov = coverage.Coverage(source=['onair', 'plugins'], branch=True) + + cov = coverage.Coverage(source=["onair", "plugins"], branch=True) cov.start() """ @@ -80,10 +100,11 @@ def main(): """ Runs all unit tests """ if args.test: import pytest + test_directory_name = "test" pytest_args = [test_directory_name] - pytest_args.extend(['-v'] * args.verbose) + pytest_args.extend(["-v"] * args.verbose) if args.conftest_seed: pytest_args.extend([f"--conftest-seed={args.conftest_seed}"]) if args.randomly_seed: @@ -95,7 +116,7 @@ def main(): cov.save() cov.html_report() else: - setup_folders(os.environ['RESULTS_PATH']) + setup_folders(os.environ["RESULTS_PATH"]) if args.save_name: save_name = args.save_name else: @@ -108,26 +129,28 @@ def init_global_paths(test=False): """ Initializes global paths, used throughout execution """ - run_path = 'onair/src/test' if test else './' - results_path = 'onair/src/test/results' if test else 'results/' + run_path = "onair/src/test" if test else "./" + results_path = "onair/src/test/results" if test else "results/" - os.environ['BASE_PATH'] = os.path.dirname(os.path.realpath(__file__)) - os.environ['RUN_PATH'] = os.path.join(os.path.dirname( - os.path.realpath(__file__)), run_path) - os.environ['RESULTS_PATH'] = os.path.join(os.path.dirname( - os.path.realpath(__file__)), results_path) - os.environ['SRC_ROOT_PATH'] = os.path.dirname(os.path.realpath(__file__)) + os.environ["BASE_PATH"] = os.path.dirname(os.path.realpath(__file__)) + os.environ["RUN_PATH"] = os.path.join( + os.path.dirname(os.path.realpath(__file__)), run_path + ) + os.environ["RESULTS_PATH"] = os.path.join( + os.path.dirname(os.path.realpath(__file__)), results_path + ) + os.environ["SRC_ROOT_PATH"] = os.path.dirname(os.path.realpath(__file__)) def blockPrint(): - """ Disable terminal output """ - sys.stdout = open(os.devnull, 'w') + """Disable terminal output""" + sys.stdout = open(os.devnull, "w") def enablePrint(): - """ Restore terminal output """ + """Restore terminal output""" sys.stdout = sys.__stdout__ -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/environment.yml b/environment.yml index cc70880e..a75bfd51 100644 --- a/environment.yml +++ b/environment.yml @@ -6,6 +6,7 @@ dependencies: - python>=3.8,<3.13 - numpy - coverage + - black - pytest - pytest-mock - pytest-randomly diff --git a/onair/config/default_config.ini b/onair/config/default_config.ini index c107ab0c..c5e4a922 100644 --- a/onair/config/default_config.ini +++ b/onair/config/default_config.ini @@ -27,7 +27,7 @@ PlannersPluginDict = {'generic':'plugins/generic/__init__.py'} # Required Key: ComplexPluginDict(s) are used by Agent for complex reasoning ComplexPluginDict = {'generic':'plugins/generic/__init__.py'} -# Required Section: OPTIONS are settable values to change running experience +# Optional Section: OPTIONS are settable values to change running experience [OPTIONS] # Optional Key: IO_Flag denotes whether or not to provide console output # default = false diff --git a/onair/config/reporter_config.ini b/onair/config/reporter_config.ini index 65b3a9bc..8eac4a47 100644 --- a/onair/config/reporter_config.ini +++ b/onair/config/reporter_config.ini @@ -19,5 +19,3 @@ ComplexPluginDict = {'Complex Reporter 1':'plugins/reporter', [OPTIONS] IO_Enabled = true -Dev_Enabled = false -Viz_Enabled = false diff --git a/onair/data_handling/csv_parser.py b/onair/data_handling/csv_parser.py index cc3114d7..8ac79cb7 100644 --- a/onair/data_handling/csv_parser.py +++ b/onair/data_handling/csv_parser.py @@ -18,22 +18,23 @@ from onair.src.util.print_io import * from onair.data_handling.parser_util import * + class DataSource(OnAirDataSource): def process_data_file(self, data_file): self.sim_data = self.parse_csv_data(data_file) self.frame_index = 0 -##### INITIAL PROCESSING #### + ##### INITIAL PROCESSING #### def parse_csv_data(self, data_file): - #Read in the data set + # Read in the data set all_data = [] - with open(data_file, 'r', newline='') as csv_file: - dataset = csv.reader(csv_file, delimiter=',') + with open(data_file, "r", newline="") as csv_file: + dataset = csv.reader(csv_file, delimiter=",") - #Initialize the entire data dictionary + # Initialize the entire data dictionary index = 0 for row in dataset: if index == 0: @@ -49,10 +50,10 @@ def parse_csv_data(self, data_file): def parse_meta_data_file(self, meta_data_file, ss_breakdown): return extract_meta_data_handle_ss_breakdown(meta_data_file, ss_breakdown) -##### GETTERS ################################## + ##### GETTERS ################################## def get_vehicle_metadata(self): - return self.all_headers, self.binning_configs['test_assignments'] + return self.all_headers, self.binning_configs["test_assignments"] # Get the data at self.index and increment the index def get_next(self): diff --git a/onair/data_handling/on_air_data_source.py b/onair/data_handling/on_air_data_source.py index edd60066..d0f6e8c4 100644 --- a/onair/data_handling/on_air_data_source.py +++ b/onair/data_handling/on_air_data_source.py @@ -10,14 +10,16 @@ from abc import ABC, abstractmethod from .parser_util import * + class ConfigKeyError(KeyError): pass + class OnAirDataSource(ABC): - def __init__(self, data_file, meta_file, ss_breakdown = False): + def __init__(self, data_file, meta_file, ss_breakdown=False): """An initial parsing needs to happen in order to use the parser classes - This means that, if you want to use this class to parse in real time, - it needs to at least have seen one sample of the anticipated format """ + This means that, if you want to use this class to parse in real time, + it needs to at least have seen one sample of the anticipated format""" self.raw_data_file = data_file self.meta_data_file = meta_file @@ -27,10 +29,12 @@ def __init__(self, data_file, meta_file, ss_breakdown = False): self.binning_configs = {} configs = self.parse_meta_data_file(self.meta_data_file, ss_breakdown) - self.binning_configs['subsystem_assignments'] = configs['subsystem_assignments'] - self.binning_configs['test_assignments'] = configs['test_assignments'] - self.binning_configs['description_assignments'] = configs['description_assignments'] - self.all_headers = configs['data_labels'] + self.binning_configs["subsystem_assignments"] = configs["subsystem_assignments"] + self.binning_configs["test_assignments"] = configs["test_assignments"] + self.binning_configs["description_assignments"] = configs[ + "description_assignments" + ] + self.all_headers = configs["data_labels"] self.process_data_file(self.raw_data_file) diff --git a/onair/data_handling/parser_util.py b/onair/data_handling/parser_util.py index d8639498..3922d018 100644 --- a/onair/data_handling/parser_util.py +++ b/onair/data_handling/parser_util.py @@ -11,28 +11,32 @@ from .tlm_json_parser import parseTlmConfJson, str2lst import datetime + def extract_meta_data_handle_ss_breakdown(meta_data_file, ss_breakdown): parsed_meta_data = extract_meta_data(meta_data_file) if ss_breakdown == False: - num_elements = len(parsed_meta_data['subsystem_assignments']) - parsed_meta_data['subsystem_assignments'] = [['MISSION'] for elem in range(num_elements)] + num_elements = len(parsed_meta_data["subsystem_assignments"]) + parsed_meta_data["subsystem_assignments"] = [ + ["MISSION"] for elem in range(num_elements) + ] return parsed_meta_data + ## Method to extract configuration data and return 3 dictionaries def extract_meta_data(meta_data_file): - assert meta_data_file != '' + assert meta_data_file != "" configs = parseTlmConfJson(meta_data_file) - configs_len = len(configs['subsystem_assignments']) + configs_len = len(configs["subsystem_assignments"]) for i in range(configs_len): - if configs['subsystem_assignments'][i] != 'NONE': - configs['subsystem_assignments'][i] = [configs['subsystem_assignments'][i]] + if configs["subsystem_assignments"][i] != "NONE": + configs["subsystem_assignments"][i] = [configs["subsystem_assignments"][i]] else: - configs['subsystem_assignments'][i] = [] + configs["subsystem_assignments"][i] = [] - test_assign = configs['test_assignments'][i] + test_assign = configs["test_assignments"][i] for j in range(len(test_assign)): if len(test_assign[j]) > 1: @@ -42,6 +46,7 @@ def extract_meta_data(meta_data_file): return configs + def floatify_input(_input, remove_str=False): floatified = [] for i in _input: @@ -60,12 +65,13 @@ def floatify_input(_input, remove_str=False): continue return floatified + def convert_str_to_timestamp(time_str): try: - t = datetime.datetime.strptime(time_str, '%Y-%j-%H:%M:%S.%f') + t = datetime.datetime.strptime(time_str, "%Y-%j-%H:%M:%S.%f") return t.timestamp() except: - min_sec = time_str.split(':') + min_sec = time_str.split(":") # Use 1 am on Jan 1st, 2000 as the date if only minutes and seconds are specified t = datetime.datetime(2000, 1, 1, 1, int(min_sec[0]), int(min_sec[1]), 0) return t.timestamp() diff --git a/onair/data_handling/redis_adapter.py b/onair/data_handling/redis_adapter.py index ba346004..23179b2c 100644 --- a/onair/data_handling/redis_adapter.py +++ b/onair/data_handling/redis_adapter.py @@ -25,32 +25,35 @@ from onair.src.util.print_io import * from onair.data_handling.parser_util import * + class DataSource(OnAirDataSource): - def __init__(self, data_file, meta_file, ss_breakdown = False): + def __init__(self, data_file, meta_file, ss_breakdown=False): super().__init__(data_file, meta_file, ss_breakdown) - self.address = 'localhost' + self.address = "localhost" self.port = 6379 self.db = 0 self.server = None self.new_data_lock = threading.Lock() self.new_data = False self.currentData = [] - self.currentData.append({'headers':self.order, - 'data':list('-' * len(self.order))}) - self.currentData.append({'headers':self.order, - 'data':list('-' * len(self.order))}) + self.currentData.append( + {"headers": self.order, "data": list("-" * len(self.order))} + ) + self.currentData.append( + {"headers": self.order, "data": list("-" * len(self.order))} + ) self.double_buffer_read_index = 0 self.connect() self.subscribe(self.subscriptions) def connect(self): """Establish connection to REDIS server.""" - print_msg('Redis adapter connecting to server...') + print_msg("Redis adapter connecting to server...") self.server = redis.Redis(self.address, self.port, self.db) if self.server.ping(): - print_msg('... connected!') + print_msg("... connected!") def subscribe(self, subscriptions): """Subscribe to REDIS message channel(s) and launch listener thread.""" @@ -67,19 +70,19 @@ def subscribe(self, subscriptions): print_msg(f"No subscriptions given!") def parse_meta_data_file(self, meta_data_file, ss_breakdown): - configs = extract_meta_data_handle_ss_breakdown( - meta_data_file, ss_breakdown) + configs = extract_meta_data_handle_ss_breakdown(meta_data_file, ss_breakdown) meta = parseJson(meta_data_file) keys = meta.keys() - if 'order' in keys: - self.order = meta['order'] + if "order" in keys: + self.order = meta["order"] else: - raise ConfigKeyError(f'Config file: \'{meta_data_file}\' ' \ - 'missing required key \'order\'') + raise ConfigKeyError( + f"Config file: '{meta_data_file}' " "missing required key 'order'" + ) - if 'redis_subscriptions' in meta.keys(): - self.subscriptions = meta['redis_subscriptions'] + if "redis_subscriptions" in meta.keys(): + self.subscriptions = meta["redis_subscriptions"] else: self.subscriptions = [] @@ -89,7 +92,7 @@ def process_data_file(self, data_file): print("Redis Adapter ignoring file") def get_vehicle_metadata(self): - return self.all_headers, self.binning_configs['test_assignments'] + return self.all_headers, self.binning_configs["test_assignments"] def get_next(self): """Provides the latest data from REDIS channel""" @@ -105,11 +108,10 @@ def get_next(self): read_index = 0 with self.new_data_lock: self.new_data = False - self.double_buffer_read_index = ( - self.double_buffer_read_index + 1) % 2 + self.double_buffer_read_index = (self.double_buffer_read_index + 1) % 2 read_index = self.double_buffer_read_index - return self.currentData[read_index]['data'] + return self.currentData[read_index]["data"] def has_more(self): """Live connection should always return True""" @@ -118,59 +120,69 @@ def has_more(self): def message_listener(self): """Loop for listening for messages on channels""" for message in self.pubsub.listen(): - if message['type'] == 'message': + if message["type"] == "message": channel_name = f"{message['channel'].decode()}" # Attempt to load message as json try: - data = json.loads(message['data']) + data = json.loads(message["data"]) except ValueError: # Warn of non-json conforming channel data received - non_json_msg = f'Subscribed channel `{channel_name}\' ' \ - 'message received but is not in json ' \ - f'format.\nMessage:\n{message["data"]}' - print_msg(non_json_msg, ['WARNING']) + non_json_msg = ( + f"Subscribed channel `{channel_name}' " + "message received but is not in json " + f'format.\nMessage:\n{message["data"]}' + ) + print_msg(non_json_msg, ["WARNING"]) continue # Select the current data - currentData = self.currentData[ - (self.double_buffer_read_index + 1) % 2] + currentData = self.currentData[(self.double_buffer_read_index + 1) % 2] # turn all data points to unknown - currentData['data'] = ['-' for _ in currentData['data']] + currentData["data"] = ["-" for _ in currentData["data"]] # Find expected keys for received channel - expected_message_keys = \ - [k for k in currentData['headers'] if channel_name in k] + expected_message_keys = [ + k for k in currentData["headers"] if channel_name in k + ] # Time is an expected key for all channels expected_message_keys.append("time") # Parse through the message keys for data points for key in list(data.keys()): - if key.lower() == 'time': + if key.lower() == "time": header_string = key.lower() else: header_string = f"{channel_name}.{key}" # Look for channel specific values try: - index = currentData['headers'].index(header_string) - currentData['data'][index] = data[key] + index = currentData["headers"].index(header_string) + currentData["data"][index] = data[key] expected_message_keys.remove(header_string) # Unexpected key in data except ValueError: # warn user about key in data that is not in header - print_msg(f"Unused key `{key}' in message " \ - f'from channel `{channel_name}.\'', - ['WARNING']) + print_msg( + f"Unused key `{key}' in message " + f"from channel `{channel_name}.'", + ["WARNING"], + ) with self.new_data_lock: self.new_data = True # Warn user about expected keys missing from received data for k in expected_message_keys: - print_msg(f'Message from channel `{channel_name}\' ' \ - f'did not contain `{k}\' key\nMessage:\n' \ - f'{data}', ['WARNING']) + print_msg( + f"Message from channel `{channel_name}' " + f"did not contain `{k}' key\nMessage:\n" + f"{data}", + ["WARNING"], + ) else: # Warn user about non message receipts - print_msg(f"Redis adapter: channel " \ - f"'{message['channel'].decode()}' received " \ - f"message type: {message['type']}.", ['WARNING']) + print_msg( + f"Redis adapter: channel " + f"'{message['channel'].decode()}' received " + f"message type: {message['type']}.", + ["WARNING"], + ) # When listener loop exits warn user - print_msg("Redis subscription listener exited.", ['WARNING']) + print_msg("Redis subscription listener exited.", ["WARNING"]) def has_data(self): return self.new_data diff --git a/onair/data_handling/tlm_json_parser.py b/onair/data_handling/tlm_json_parser.py index 783ef0db..48356bdf 100644 --- a/onair/data_handling/tlm_json_parser.py +++ b/onair/data_handling/tlm_json_parser.py @@ -10,6 +10,7 @@ import ast import json + # parse tlm config json file def parseTlmConfJson(file_path): data = parseJson(file_path) @@ -19,59 +20,67 @@ def parseTlmConfJson(file_path): subsys_assignments = [] mnemonic_tests = [] descriptions = [] - + for label in reorg_data: curr_datapt = reorg_data[label] - subsys = curr_datapt['subsystem'] + subsys = curr_datapt["subsystem"] - tests = curr_datapt['tests'] if 'tests' in curr_datapt else {} + tests = curr_datapt["tests"] if "tests" in curr_datapt else {} if tests == {}: - mnemonics = [['NOOP']] + mnemonics = [["NOOP"]] else: mnemonics = [] for key in tests: - mnemonics.append([key, curr_datapt['tests'][key]]) - desc = curr_datapt['description'] if 'description' in curr_datapt else ['No description'] - + mnemonics.append([key, curr_datapt["tests"][key]]) + desc = ( + curr_datapt["description"] + if "description" in curr_datapt + else ["No description"] + ) + labels.append(label) subsys_assignments.append(subsys) mnemonic_tests.append(mnemonics) descriptions.append(desc) # if given an order, reorder data to match - if 'order' in data and data['order'] != []: + if "order" in data and data["order"] != []: original_order = {} - for i in range(len(data['order'])): - original_order[data['order'][i]] = i + for i in range(len(data["order"])): + original_order[data["order"][i]] = i ordering_list = [] for label in labels: ordering_list.append(original_order[label]) labels = [y for x, y in sorted(zip(ordering_list, labels))] - subsys_assignments = [y for x, y in sorted(zip(ordering_list, subsys_assignments))] + subsys_assignments = [ + y for x, y in sorted(zip(ordering_list, subsys_assignments)) + ] mnemonic_tests = [y for x, y in sorted(zip(ordering_list, mnemonic_tests))] descriptions = [y for x, y in sorted(zip(ordering_list, descriptions))] configs = {} - configs['subsystem_assignments'] = subsys_assignments - configs['test_assignments'] = mnemonic_tests - configs['description_assignments'] = descriptions - configs['data_labels'] = labels - + configs["subsystem_assignments"] = subsys_assignments + configs["test_assignments"] = mnemonic_tests + configs["description_assignments"] = descriptions + configs["data_labels"] = labels + return configs + # process tlm dict into dict of labels and their attributes def reorganizeTlmDict(data): processed_data = {} - - for s in data['subsystems']: - for label in data['subsystems'][s]: - processed_data[label] = data['subsystems'][s][label] - processed_data[label]['subsystem'] = s - + + for s in data["subsystems"]: + for label in data["subsystems"][s]: + processed_data[label] = data["subsystems"][s][label] + processed_data[label]["subsystem"] = s + return processed_data + def str2lst(string): try: return ast.literal_eval(string) @@ -79,8 +88,9 @@ def str2lst(string): print("Unable to process string representation of list") # return string + def parseJson(path): - file = open(path, 'rb') + file = open(path, "rb") file_str = file.read() data = json.loads(file_str) diff --git a/onair/src/ai_components/ai_plugin_abstract/__init__.py b/onair/src/ai_components/ai_plugin_abstract/__init__.py index 05b7860f..8beae574 100644 --- a/onair/src/ai_components/ai_plugin_abstract/__init__.py +++ b/onair/src/ai_components/ai_plugin_abstract/__init__.py @@ -1 +1 @@ -from .ai_plugin import AIPlugin \ No newline at end of file +from .ai_plugin import AIPlugin diff --git a/onair/src/ai_components/ai_plugin_abstract/ai_plugin.py b/onair/src/ai_components/ai_plugin_abstract/ai_plugin.py index e9acec00..2b203988 100644 --- a/onair/src/ai_components/ai_plugin_abstract/ai_plugin.py +++ b/onair/src/ai_components/ai_plugin_abstract/ai_plugin.py @@ -8,17 +8,20 @@ # See "NOSA GSC-19165-1 OnAIR.pdf" from abc import ABC, abstractmethod + """This object serves as a proxy for all plugins. Therefore, the AIPlugin object is meant to induce standards and structures of compliance for user-created and/or imported plugins/libraries """ + + class AIPlugin(ABC): def __init__(self, _name, _headers): """ Superclass for data driven components: VAE, PPO, etc. Allows for easier modularity. """ - assert(len(_headers)>0) + assert len(_headers) > 0 self.component_name = _name self.headers = _headers @@ -35,4 +38,3 @@ def render_reasoning(self): System should return its diagnosis """ raise NotImplementedError - diff --git a/onair/src/ai_components/learners_interface.py b/onair/src/ai_components/learners_interface.py index 07ff0fa9..b9e8b457 100644 --- a/onair/src/ai_components/learners_interface.py +++ b/onair/src/ai_components/learners_interface.py @@ -13,9 +13,10 @@ from ..util.plugin_import import import_plugins from ..util.data_conversion import * + class LearnersInterface: def __init__(self, headers, _learner_plugins={}): - assert(len(headers)>0), 'Headers are required' + assert len(headers) > 0, "Headers are required" self.headers = headers self.learner_constructs = import_plugins(self.headers, _learner_plugins) diff --git a/onair/src/ai_components/planners_interface.py b/onair/src/ai_components/planners_interface.py index 54f670fc..71c29040 100644 --- a/onair/src/ai_components/planners_interface.py +++ b/onair/src/ai_components/planners_interface.py @@ -13,11 +13,12 @@ from ..util.plugin_import import import_plugins from ..util.data_conversion import * + class PlannersInterface: def __init__(self, headers, _planner_plugins={}): - assert(len(headers)>0), 'Headers are required' + assert len(headers) > 0, "Headers are required" self.headers = headers - self.planner_constructs = import_plugins(self.headers,_planner_plugins) + self.planner_constructs = import_plugins(self.headers, _planner_plugins) def update(self, high_level_data): # Raw TLM should be transformed into high-leve state representation here diff --git a/onair/src/reasoning/agent.py b/onair/src/reasoning/agent.py index 165e7e9c..9f911b53 100644 --- a/onair/src/reasoning/agent.py +++ b/onair/src/reasoning/agent.py @@ -16,34 +16,55 @@ from ..reasoning.complex_reasoning_interface import ComplexReasoningInterface from ..reasoning.diagnosis import Diagnosis + class Agent: - def __init__(self, vehicle, learners_plugin_dict, planners_plugin_dict, complex_plugin_dict): + def __init__( + self, vehicle, learners_plugin_dict, planners_plugin_dict, complex_plugin_dict + ): self.vehicle_rep = vehicle self.mission_status = self.vehicle_rep.get_status() self.bayesian_status = self.vehicle_rep.get_bayesian_status() # AI Interfaces - self.learning_systems = LearnersInterface(self.vehicle_rep.get_headers(),learners_plugin_dict) - self.planning_systems = PlannersInterface(self.vehicle_rep.get_headers(),planners_plugin_dict) - self.complex_reasoning_systems = ComplexReasoningInterface(self.vehicle_rep.get_headers(),complex_plugin_dict) + self.learning_systems = LearnersInterface( + self.vehicle_rep.get_headers(), learners_plugin_dict + ) + self.planning_systems = PlannersInterface( + self.vehicle_rep.get_headers(), planners_plugin_dict + ) + self.complex_reasoning_systems = ComplexReasoningInterface( + self.vehicle_rep.get_headers(), complex_plugin_dict + ) def reason(self, frame): aggregate_high_level_info = {} self.vehicle_rep.update(frame) - aggregate_high_level_info['vehicle_rep'] = self.vehicle_rep.get_state_information() - self.learning_systems.update(self.vehicle_rep.curr_data, aggregate_high_level_info) - aggregate_high_level_info['learning_systems'] = self.learning_systems.render_reasoning() + aggregate_high_level_info["vehicle_rep"] = ( + self.vehicle_rep.get_state_information() + ) + self.learning_systems.update( + self.vehicle_rep.curr_data, aggregate_high_level_info + ) + aggregate_high_level_info["learning_systems"] = ( + self.learning_systems.render_reasoning() + ) self.planning_systems.update(aggregate_high_level_info) - aggregate_high_level_info['planning_systems'] = self.planning_systems.render_reasoning() + aggregate_high_level_info["planning_systems"] = ( + self.planning_systems.render_reasoning() + ) - return self.complex_reasoning_systems.update_and_render_reasoning(aggregate_high_level_info) + return self.complex_reasoning_systems.update_and_render_reasoning( + aggregate_high_level_info + ) def diagnose(self, time_step): - """ Grab the mnemonics from the """ + """Grab the mnemonics from the""" learning_system_results = self.learning_systems.render_reasoning() - diagnosis = Diagnosis(time_step, - learning_system_results, - self.bayesian_status, - self.vehicle_rep.get_current_faulting_mnemonics()) + diagnosis = Diagnosis( + time_step, + learning_system_results, + self.bayesian_status, + self.vehicle_rep.get_current_faulting_mnemonics(), + ) return diagnosis.perform_diagnosis() diff --git a/onair/src/reasoning/complex_reasoning_interface.py b/onair/src/reasoning/complex_reasoning_interface.py index 2074185f..a16625f2 100644 --- a/onair/src/reasoning/complex_reasoning_interface.py +++ b/onair/src/reasoning/complex_reasoning_interface.py @@ -14,20 +14,22 @@ from ..util.data_conversion import * from ..util.plugin_import import import_plugins + class ComplexReasoningInterface: def __init__(self, headers, _reasoning_plugins={}): - assert(len(headers)>0), 'Headers are required' + assert len(headers) > 0, "Headers are required" self.headers = headers - self.reasoning_constructs = import_plugins(self.headers,_reasoning_plugins) + self.reasoning_constructs = import_plugins(self.headers, _reasoning_plugins) def update_and_render_reasoning(self, high_level_data): intelligent_outcomes = high_level_data - intelligent_outcomes['complex_systems'] = {} + intelligent_outcomes["complex_systems"] = {} for plugin in self.reasoning_constructs: plugin.update(high_level_data=intelligent_outcomes) - intelligent_outcomes['complex_systems'].update({plugin.component_name:plugin.render_reasoning()}) + intelligent_outcomes["complex_systems"].update( + {plugin.component_name: plugin.render_reasoning()} + ) return intelligent_outcomes def check_for_salient_event(self): pass - diff --git a/onair/src/reasoning/diagnosis.py b/onair/src/reasoning/diagnosis.py index c63fe13c..ae189004 100644 --- a/onair/src/reasoning/diagnosis.py +++ b/onair/src/reasoning/diagnosis.py @@ -9,19 +9,22 @@ import copy import numpy as np -import random +import random class Diagnosis: - """ Diagnosis Class used to store and summarize diagnosis results from individaul AIComponent""" + """Diagnosis Class used to store and summarize diagnosis results from individaul AIComponent""" + NO_DIAGNOSIS = "NO_DIAGNOSIS" - def __init__(self, - time_step, - learning_system_results, - status_confidence, - currently_faulting_mnemonics, - ground_truth=None) -> None: + def __init__( + self, + time_step, + learning_system_results, + status_confidence, + currently_faulting_mnemonics, + ground_truth=None, + ) -> None: self.time_step = time_step self.status_confidence = status_confidence @@ -30,10 +33,12 @@ def __init__(self, self.ground_truth = ground_truth self.has_kalman = "kalman" in learning_system_results - self.kalman_results = learning_system_results["kalman"] if self.has_kalman else None - + self.kalman_results = ( + learning_system_results["kalman"] if self.has_kalman else None + ) + def perform_diagnosis(self): - """ Diagnose the learning system results """ + """Diagnose the learning system results""" ret = {} if self.has_kalman: @@ -41,29 +46,26 @@ def perform_diagnosis(self): mnemonic_name = random.choice(list(self.kalman_results[0])) top = self.walkdown(mnemonic_name) - ret = { - "top": top - } + ret = {"top": top} return ret - def walkdown(self, mnemonic_name, used_mnemonics=[]): - """ - Go through the active AIComponents in an ordered way to decide on a diagnosis. - There's a lot of specificity in this function until the method of combining the AIComponents is learned + """ + Go through the active AIComponents in an ordered way to decide on a diagnosis. + There's a lot of specificity in this function until the method of combining the AIComponents is learned """ if len(used_mnemonics) == 0: used_mnemonics = copy.deepcopy(self.currently_faulting_mnemonics) - if mnemonic_name == '': + if mnemonic_name == "": return Diagnosis.NO_DIAGNOSIS if self.has_kalman: # NOTE: This is certainly wrong since the logic is pulled from a statement with many AIComponents if not (mnemonic_name in list(self.kalman_results[0])): return self.kalman_results[0][0] - else: return Diagnosis.NO_DIAGNOSIS + else: + return Diagnosis.NO_DIAGNOSIS else: return Diagnosis.NO_DIAGNOSIS - diff --git a/onair/src/run_scripts/execution_engine.py b/onair/src/run_scripts/execution_engine.py index 5f46cd04..c09e8bd7 100644 --- a/onair/src/run_scripts/execution_engine.py +++ b/onair/src/run_scripts/execution_engine.py @@ -16,14 +16,14 @@ import importlib import ast import shutil -from distutils.dir_util import copy_tree +from shutil import copytree from time import gmtime, strftime from ..run_scripts.sim import Simulator class ExecutionEngine: - def __init__(self, config_file='', run_name='', save_flag=False): + def __init__(self, config_file="", run_name="", save_flag=False): # Init Housekeeping self.run_name = run_name @@ -33,32 +33,33 @@ def __init__(self, config_file='', run_name='', save_flag=False): self.IO_Enabled = False # Init Paths - self.dataFilePath = '' - self.telemetryFile = '' - self.fullTelemetryFile = '' - self.metadataFilePath = '' - self.metaFile = '' - self.fullMetaFile = '' + self.dataFilePath = "" + self.telemetryFile = "" + self.fullTelemetryFile = "" + self.metadataFilePath = "" + self.metaFile = "" + self.fullMetaFile = "" # Init parsing/sim info - self.data_source_file = '' + self.data_source_file = "" self.simDataSource = None self.sim = None # Init plugins - self.knowledge_rep_plugin_dict = [''] - self.learners_plugin_dict = [''] - self.planners_plugin_dict = [''] - self.complex_plugin_dict = [''] + self.knowledge_rep_plugin_dict = [""] + self.learners_plugin_dict = [""] + self.planners_plugin_dict = [""] + self.complex_plugin_dict = [""] self.save_flag = save_flag self.save_name = run_name - if config_file != '': + if config_file != "": self.init_save_paths() self.parse_configs(config_file) - self.parse_data(self.data_source_file, - self.fullTelemetryFile, self.fullMetaFile) + self.parse_data( + self.data_source_file, self.fullTelemetryFile, self.fullMetaFile + ) self.setup_sim() def parse_configs(self, config_filepath): @@ -66,44 +67,44 @@ def parse_configs(self, config_filepath): if len(config.read(config_filepath)) == 0: raise FileNotFoundError( - f"Config file at '{config_filepath}' could not be read.") + f"Config file at '{config_filepath}' could not be read." + ) try: # Parse Required Data: FILES - self.dataFilePath = config['FILES']['TelemetryFilePath'] + self.dataFilePath = config["FILES"]["TelemetryFilePath"] # Vehicle telemetry data - self.telemetryFile = config['FILES']['TelemetryFile'] - self.fullTelemetryFile = os.path.join( - self.dataFilePath, self.telemetryFile) - self.metadataFilePath = config['FILES']['MetaFilePath'] + self.telemetryFile = config["FILES"]["TelemetryFile"] + self.fullTelemetryFile = os.path.join(self.dataFilePath, self.telemetryFile) + self.metadataFilePath = config["FILES"]["MetaFilePath"] # Config for vehicle telemetry - self.metaFile = config['FILES']['MetaFile'] - self.fullMetaFile = os.path.join( - self.metadataFilePath, self.metaFile) + self.metaFile = config["FILES"]["MetaFile"] + self.fullMetaFile = os.path.join(self.metadataFilePath, self.metaFile) # Parse Required Data: DATA_HANDLING - self.data_source_file = config['DATA_HANDLING']['DataSourceFile'] + self.data_source_file = config["DATA_HANDLING"]["DataSourceFile"] # Parse Required Data: PLUGINS self.knowledge_rep_plugin_dict = self.parse_plugins_dict( - config['PLUGINS']['KnowledgeRepPluginDict']) + config["PLUGINS"]["KnowledgeRepPluginDict"] + ) self.learners_plugin_dict = self.parse_plugins_dict( - config['PLUGINS']['LearnersPluginDict']) + config["PLUGINS"]["LearnersPluginDict"] + ) self.planners_plugin_dict = self.parse_plugins_dict( - config['PLUGINS']['PlannersPluginDict']) + config["PLUGINS"]["PlannersPluginDict"] + ) self.complex_plugin_dict = self.parse_plugins_dict( - config['PLUGINS']['ComplexPluginDict']) + config["PLUGINS"]["ComplexPluginDict"] + ) # Parse Optional Data: OPTIONS - # 'OPTIONS' must exist, but individual options return False if missing - if config.has_section('OPTIONS'): - self.IO_Enabled = config['OPTIONS'].getboolean('IO_Enabled') - else: - self.IO_Enabled = False + # OPTIONS section is not required to exist. + if config.has_section("OPTIONS"): + self.IO_Enabled = config["OPTIONS"].getboolean("IO_Enabled") except KeyError as e: - new_message = f"Config file: '{ - config_filepath}', missing key: {e.args[0]}" + new_message = f"Config file: '{config_filepath}', missing key: {e.args[0]}" raise KeyError(new_message) from e def parse_plugins_dict(self, config_plugin_dict): @@ -112,29 +113,41 @@ def parse_plugins_dict(self, config_plugin_dict): if isinstance(ast_plugin_dict.body, ast.Dict): temp_plugin_dict = ast.literal_eval(config_plugin_dict) else: - raise ValueError(f"Plugin dict {config_plugin_dict} from { - self.config_filepath} is invalid. It must be a dict.") + raise ValueError( + f"Plugin dict {config_plugin_dict} from {self.config_filepath} is invalid. It must be a dict." + ) for plugin_file in temp_plugin_dict.values(): if not (os.path.exists(plugin_file)): - raise FileNotFoundError(f"In config file '{self.config_filepath}' Plugin path '{ - plugin_file}' does not exist.") + raise FileNotFoundError( + f"In config file '{self.config_filepath}' Plugin path '{plugin_file}' does not exist." + ) return temp_plugin_dict - def parse_data(self, parser_file_name, data_file_name, metadata_file_name, subsystems_breakdown=False): + def parse_data( + self, + parser_file_name, + data_file_name, + metadata_file_name, + subsystems_breakdown=False, + ): data_source_spec = importlib.util.spec_from_file_location( - 'data_source', parser_file_name) + "data_source", parser_file_name + ) data_source_module = importlib.util.module_from_spec(data_source_spec) data_source_spec.loader.exec_module(data_source_module) self.simDataSource = data_source_module.DataSource( - data_file_name, metadata_file_name, subsystems_breakdown) + data_file_name, metadata_file_name, subsystems_breakdown + ) def setup_sim(self): - self.sim = Simulator(self.simDataSource, - self.knowledge_rep_plugin_dict, - self.learners_plugin_dict, - self.planners_plugin_dict, - self.complex_plugin_dict) + self.sim = Simulator( + self.simDataSource, + self.knowledge_rep_plugin_dict, + self.learners_plugin_dict, + self.planners_plugin_dict, + self.complex_plugin_dict, + ) def run_sim(self): self.sim.run_sim(self.IO_Enabled) @@ -142,39 +155,40 @@ def run_sim(self): self.save_results(self.save_name) def init_save_paths(self): - save_path = os.environ['RESULTS_PATH'] - temp_save_path = os.path.join(save_path, 'tmp') - temp_models_path = os.path.join(temp_save_path, 'models') - temp_diagnosis_path = os.path.join(temp_save_path, 'diagnosis') + save_path = os.environ["RESULTS_PATH"] + temp_save_path = os.path.join(save_path, "tmp") + temp_models_path = os.path.join(temp_save_path, "models") + temp_diagnosis_path = os.path.join(temp_save_path, "diagnosis") self.delete_save_paths() os.mkdir(temp_save_path) os.mkdir(temp_models_path) os.mkdir(temp_diagnosis_path) - os.environ['ONAIR_SAVE_PATH'] = save_path - os.environ['ONAIR_TMP_SAVE_PATH'] = temp_save_path - os.environ['ONAIR_MODELS_SAVE_PATH'] = temp_models_path - os.environ['ONAIR_DIAGNOSIS_SAVE_PATH'] = temp_diagnosis_path + os.environ["ONAIR_SAVE_PATH"] = save_path + os.environ["ONAIR_TMP_SAVE_PATH"] = temp_save_path + os.environ["ONAIR_MODELS_SAVE_PATH"] = temp_models_path + os.environ["ONAIR_DIAGNOSIS_SAVE_PATH"] = temp_diagnosis_path def delete_save_paths(self): - save_path = os.environ['RESULTS_PATH'] + save_path = os.environ["RESULTS_PATH"] sub_dirs = os.listdir(save_path) - if 'tmp' in sub_dirs: + if "tmp" in sub_dirs: try: - shutil.rmtree(save_path + '/tmp') + shutil.rmtree(save_path + "/tmp") except OSError as e: print("Error: %s : %s" % (save_path, e.strerror)) def save_results(self, save_name): complete_time = strftime("%H-%M-%S", gmtime()) - save_path = os.environ['ONAIR_SAVE_PATH'] + \ - 'saved/' + save_name + '_' + complete_time + save_path = ( + os.environ["ONAIR_SAVE_PATH"] + "saved/" + save_name + "_" + complete_time + ) os.makedirs(save_path, exist_ok=True) - copy_tree(os.environ['ONAIR_TMP_SAVE_PATH'], save_path) + copytree(os.environ["ONAIR_TMP_SAVE_PATH"], save_path) def set_run_param(self, name, val): setattr(self, name, val) def ast_parse_eval(self, config_list): - return ast.parse(config_list, mode='eval') + return ast.parse(config_list, mode="eval") diff --git a/onair/src/run_scripts/sim.py b/onair/src/run_scripts/sim.py index c2a149fa..40f32a27 100644 --- a/onair/src/run_scripts/sim.py +++ b/onair/src/run_scripts/sim.py @@ -22,15 +22,26 @@ DIAGNOSIS_INTERVAL = 100 + class Simulator: - def __init__(self, dataSource, knowledge_rep_plugin_dict, learners_plugin_dict, planners_plugin_dict, complex_plugin_dict): + def __init__( + self, + dataSource, + knowledge_rep_plugin_dict, + learners_plugin_dict, + planners_plugin_dict, + complex_plugin_dict, + ): self.simData = dataSource headers, tests = dataSource.get_vehicle_metadata() vehicle = VehicleRepresentation(headers, tests, knowledge_rep_plugin_dict) - self.agent = Agent(vehicle, learners_plugin_dict, planners_plugin_dict, complex_plugin_dict) + self.agent = Agent( + vehicle, learners_plugin_dict, planners_plugin_dict, complex_plugin_dict + ) def run_sim(self, IO_Flag=False): - if IO_Flag == True: print_sim_header() + if IO_Flag == True: + print_sim_header() diagnosis_list = [] time_step = 0 last_diagnosis = time_step @@ -42,8 +53,8 @@ def run_sim(self, IO_Flag=False): self.IO_check(time_step, IO_Flag) ### Stop when a fault is reached - if self.agent.mission_status == 'RED': - if last_fault == time_step - 1: #if they are consecutive + if self.agent.mission_status == "RED": + if last_fault == time_step - 1: # if they are consecutive if (time_step - last_diagnosis) % DIAGNOSIS_INTERVAL == 0: diagnosis_list.append(self.agent.diagnose(time_step)) last_diagnosis = time_step diff --git a/onair/src/systems/status.py b/onair/src/systems/status.py index 5fc1c018..f22065d5 100644 --- a/onair/src/systems/status.py +++ b/onair/src/systems/status.py @@ -11,15 +11,16 @@ Status Class """ + class Status: - def __init__(self, name='MISSION', stat='---', conf=-1.0): - self.name = name + def __init__(self, name="MISSION", stat="---", conf=-1.0): + self.name = name self.set_status(stat, conf) ##### GETTERS & SETTERS ################################## def set_status(self, stat, bayesianConf=-1.0): - assert(-1.0 <= bayesianConf <= 1.0) - assert(stat in ['---', 'RED', 'YELLOW', 'GREEN']) + assert -1.0 <= bayesianConf <= 1.0 + assert stat in ["---", "RED", "YELLOW", "GREEN"] self.status = stat self.bayesian_conf = bayesianConf diff --git a/onair/src/systems/telemetry_test_suite.py b/onair/src/systems/telemetry_test_suite.py index e31f4c33..cc89e9d7 100644 --- a/onair/src/systems/telemetry_test_suite.py +++ b/onair/src/systems/telemetry_test_suite.py @@ -6,7 +6,7 @@ # # Licensed under the NASA Open Source Agreement version 1.3 # See "NOSA GSC-19165-1 OnAIR.pdf" - + """ TelemetryTestSuite Class Handles telemetry mnemonic testing @@ -16,18 +16,21 @@ from .status import Status from collections import Counter + class TelemetryTestSuite: def __init__(self, headers=[], tests=[]): self.dataFields = headers self.tests = tests self.latest_results = None - self.epsilon = 0.00001 # should define this intelligently - self.all_tests = {'STATE' : self.state, - 'FEASIBILITY' : self.feasibility, - 'NOOP' : self.noop} + self.epsilon = 0.00001 # should define this intelligently + self.all_tests = { + "STATE": self.state, + "FEASIBILITY": self.feasibility, + "NOOP": self.noop, + } ################################################ - ################ Running Tests ############### + ################ Running Tests ############### def execute_suite(self, updated_frame, sync_data={}): results = [] @@ -43,14 +46,15 @@ def run_tests(self, header_index, test_val, sync_data): test_name = test[0] test_data = test[1:] - stat, mass_assignments = self.all_tests[test_name](test_val, test_data, self.epsilon) - status.append(stat) # tuple + stat, mass_assignments = self.all_tests[test_name]( + test_val, test_data, self.epsilon + ) + status.append(stat) # tuple bayesian = self.calc_single_status(status) # TODO: Add all mass assignments? return Status(self.dataFields[header_index], bayesian[0], bayesian[1]) - def get_latest_result(self, fieldName): if self.latest_results == None: return None @@ -58,7 +62,7 @@ def get_latest_result(self, fieldName): return self.latest_results[hdr_index] ################################################ - ################ Test Suites ################ + ################ Test Suites ################ def state(self, val, test_params, epsilon): mass_assignments = [] @@ -68,29 +72,29 @@ def state(self, val, test_params, epsilon): red = test_params[2] if int(val) in green: - stat = 'GREEN' + stat = "GREEN" mass_assignments.append(({stat}, 1.0)) return stat, mass_assignments if int(val) in yellow: - stat = 'YELLOW' + stat = "YELLOW" mass_assignments.append(({stat}, 1.0)) return stat, mass_assignments if int(val) in red: - stat = 'RED' + stat = "RED" mass_assignments.append(({stat}, 1.0)) return stat, mass_assignments - - stat = '---' - mass_assignments.append(({'RED', 'YELLOW', 'GREEN'}, 1.0)) + + stat = "---" + mass_assignments.append(({"RED", "YELLOW", "GREEN"}, 1.0)) return stat, mass_assignments def feasibility(self, val, test_params, epsilon): - ''' + """ Test_Params : threshold ranges an attribute should fall in # if len(test_params == 4) then the thresholds are as follows: # before [0] is red # between [0] - [1] is yellow - # between [1] - [2] is green + # between [1] - [2] is green # between [2] - [3] is yellow # after [3] is red @@ -98,29 +102,32 @@ def feasibility(self, val, test_params, epsilon): # before [0] is red # between [0] - [1] is green # after [1] is red - ''' - assert( (len(test_params) == 2) or (len(test_params) == 4)) + """ + assert (len(test_params) == 2) or (len(test_params) == 4) - stat = '---' + stat = "---" mass_assignments = [] val = float(val) lowest_bound = test_params[0] - highest_bound = test_params[len(test_params)-1] - deltas = [abs(test_params[i+1] - test_params[i]) for i in range(0, len(test_params) - 1)] + highest_bound = test_params[len(test_params) - 1] + deltas = [ + abs(test_params[i + 1] - test_params[i]) + for i in range(0, len(test_params) - 1) + ] delta = epsilon * min(deltas) - statuses = ['RED', 'YELLOW', 'GREEN', 'YELLOW', 'RED'] + statuses = ["RED", "YELLOW", "GREEN", "YELLOW", "RED"] if len(test_params) == 2: - statuses = ['RED', 'GREEN', 'RED'] + statuses = ["RED", "GREEN", "RED"] - #Lower boundary values stat: red stat_right: green / yellow - if val <= lowest_bound: + # Lower boundary values stat: red stat_right: green / yellow + if val <= lowest_bound: stat = statuses[0] right_stat = statuses[1] - l_range = lowest_bound - delta + l_range = lowest_bound - delta if val == lowest_bound: mass_assignments.append(({stat, right_stat}, 1.0)) @@ -128,17 +135,17 @@ def feasibility(self, val, test_params, epsilon): if val < l_range: mass_assignments.append(({stat}, 1.0)) else: - mass = abs(lowest_bound - val)/delta + mass = abs(lowest_bound - val) / delta mass_assignments.append(({stat}, mass)) red_yellow_mass = 1.0 - mass mass_assignments.append(({stat, right_stat}, red_yellow_mass)) - # Upper boundary values stat : red stat_left: green/yellow + # Upper boundary values stat : red stat_left: green/yellow elif val >= highest_bound: - stat = statuses[len(statuses)-1] - left_stat = statuses[len(statuses)-2] + stat = statuses[len(statuses) - 1] + left_stat = statuses[len(statuses) - 2] - u_range = highest_bound + delta + u_range = highest_bound + delta if val == highest_bound: mass_assignments.append(({left_stat, stat}, 1.0)) @@ -146,35 +153,35 @@ def feasibility(self, val, test_params, epsilon): if val > u_range: mass_assignments.append(({stat}, 1.0)) else: - mass = abs(highest_bound - val)/delta + mass = abs(highest_bound - val) / delta mass_assignments.append(({stat}, mass)) red_yellow_mass = 1.0 - mass mass_assignments.append(({left_stat, stat}, red_yellow_mass)) - #Between boundaries + # Between boundaries else: - for i in range(0, len(test_params) - 1): #This may need to change... + for i in range(0, len(test_params) - 1): # This may need to change... l_bound = test_params[i] - u_bound = test_params[i+1] + u_bound = test_params[i + 1] - left_stat = statuses[i] - temp_mid_stat = statuses[i+1] - right_stat = statuses[i+2] + left_stat = statuses[i] + temp_mid_stat = statuses[i + 1] + right_stat = statuses[i + 2] - lb_buffer = l_bound + delta - ub_buffer = u_bound - delta + lb_buffer = l_bound + delta + ub_buffer = u_bound - delta if l_bound < val < u_bound: - # Lower bound + # Lower bound if val < lb_buffer: stat = temp_mid_stat - mass = abs(l_bound - val)/delta + mass = abs(l_bound - val) / delta mass_assignments.append(({stat}, mass)) mass_assignments.append(({left_stat, stat}, 1.0 - mass)) - # Upper bound + # Upper bound elif val > ub_buffer: stat = temp_mid_stat - mass = abs(u_bound - val)/delta + mass = abs(u_bound - val) / delta mass_assignments.append(({stat}, mass)) mass_assignments.append(({stat, right_stat}, 1.0 - mass)) else: @@ -184,45 +191,44 @@ def feasibility(self, val, test_params, epsilon): if val == l_bound: stat = temp_mid_stat mass_assignments.append(({left_stat, stat}, 1.0)) - #elif val == u_bound: + # elif val == u_bound: # stat = temp_mid_stat # mass_assignments.append(({stat, right_stat}, 1.0)) return stat, mass_assignments def noop(self, val, test_params, epsilon): - stat = 'GREEN' + stat = "GREEN" mass_assignments = [({stat}, 1.0)] return stat, mass_assignments - ################################################ - ############## Combining statuses ############## - def calc_single_status(self, status_list, mode='strict'): + ############## Combining statuses ############## + def calc_single_status(self, status_list, mode="strict"): occurrences = Counter(status_list) max_occurrence = occurrences.most_common(1)[0][0] - if mode == 'strict': - if occurrences['RED'] > 0: - conf = occurrences['RED']/len(status_list) - return 'RED', conf + if mode == "strict": + if occurrences["RED"] > 0: + conf = occurrences["RED"] / len(status_list) + return "RED", conf else: - return max_occurrence, 1.0 # return max + return max_occurrence, 1.0 # return max - if mode == 'distr': - conf = occurrences[max_occurrence]/len(status_list) + if mode == "distr": + conf = occurrences[max_occurrence] / len(status_list) return max_occurrence, conf - elif mode == 'max': + elif mode == "max": return max_occurrence, 1.0 - else: - return max_occurrence, 1.0 # return max + else: + return max_occurrence, 1.0 # return max def get_suite_status(self): status_strings = [res.get_status() for res in self.latest_results] - return self.calc_single_status(status_strings) + return self.calc_single_status(status_strings) - def get_status_specific_mnemonics(self, status='RED'): - names = [res.get_name() for res in self.latest_results if res.get_status() == status] + def get_status_specific_mnemonics(self, status="RED"): + names = [ + res.get_name() for res in self.latest_results if res.get_status() == status + ] return names - - diff --git a/onair/src/systems/vehicle_rep.py b/onair/src/systems/vehicle_rep.py index c4b6f68d..b4c46906 100644 --- a/onair/src/systems/vehicle_rep.py +++ b/onair/src/systems/vehicle_rep.py @@ -17,18 +17,22 @@ from ..util.print_io import * from ..util.plugin_import import import_plugins + # from ..util.data_conversion import * + class VehicleRepresentation: def __init__(self, headers, tests, _knowledge_rep_plugins={}): - assert(len(headers) == len(tests)) + assert len(headers) == len(tests) self.headers = headers - self.knowledge_synthesis_constructs = import_plugins(self.headers,_knowledge_rep_plugins) + self.knowledge_synthesis_constructs = import_plugins( + self.headers, _knowledge_rep_plugins + ) - self.status = Status('MISSION') + self.status = Status("MISSION") self.test_suite = TelemetryTestSuite(headers, tests) - - self.curr_data = ['-']* len(self.headers) #stale data + + self.curr_data = ["-"] * len(self.headers) # stale data ##### UPDATERS ################################# def update(self, frame): @@ -44,7 +48,7 @@ def update_constructs(self, frame): def update_curr_data(self, frame): for i in range(len(frame)): - if frame[i] != '-': + if frame[i] != "-": self.curr_data[i] = frame[i] ##### GETTERS AND SETTERS ##### @@ -69,10 +73,8 @@ def get_bayesian_status(self): def get_batch_status_reports(self, batch_data): return - def get_state_information(self, scope=['status']): + def get_state_information(self, scope=["status"]): state_info = {} for construct in self.knowledge_synthesis_constructs: state_info[construct.component_name] = construct.render_reasoning() return state_info - - diff --git a/onair/src/util/cleanup.py b/onair/src/util/cleanup.py index 9cd1c2da..428ca4cd 100644 --- a/onair/src/util/cleanup.py +++ b/onair/src/util/cleanup.py @@ -14,6 +14,7 @@ import os + def setup_folders(results_path): if not os.path.isdir(results_path): os.mkdir(results_path) diff --git a/onair/src/util/data_conversion.py b/onair/src/util/data_conversion.py index d02110c3..c888d4e0 100644 --- a/onair/src/util/data_conversion.py +++ b/onair/src/util/data_conversion.py @@ -11,12 +11,10 @@ data_conversion.py Utility file to perform conversions for supervised learning, and beyond """ -import numpy as np +import numpy as np + +classes = {"RED": 0, "YELLOW": 1, "GREEN": 2, "---": 3} -classes = {'RED' : 0, - 'YELLOW' : 1, - 'GREEN' : 2, - '---' : 3} def status_to_oneHot(status): if isinstance(status, np.ndarray): diff --git a/onair/src/util/file_io.py b/onair/src/util/file_io.py index e14b44c4..ff3e21df 100644 --- a/onair/src/util/file_io.py +++ b/onair/src/util/file_io.py @@ -14,28 +14,30 @@ import json + def parse_associations_from_json(filepath): with open(filepath) as f: - data = json.load(f) + data = json.load(f) associations_list = [] - raw_associations = data['children'] + raw_associations = data["children"] for association in raw_associations: - antecedant = association['name'] - for connection in association['connections']: - consequent = connection['target'] - weight = connection['weight'] + antecedant = association["name"] + for connection in association["connections"]: + consequent = connection["target"] + weight = connection["weight"] relationship = (antecedant, consequent) weighted_relationship = (relationship, weight) associations_list.append(weighted_relationship) - - associations_list.sort(key = lambda x: x[1], reverse=True) - + + associations_list.sort(key=lambda x: x[1], reverse=True) + for elem in associations_list: ant = elem[0][0] cons = elem[0][1] wei = elem[1] - print(str(ant) + ' --> ' + str(cons) + ', ' + str(wei)) + print(str(ant) + " --> " + str(cons) + ", " + str(wei)) + def aggregate_results(): - return \ No newline at end of file + return diff --git a/onair/src/util/plugin_import.py b/onair/src/util/plugin_import.py index 04352677..beb0d539 100644 --- a/onair/src/util/plugin_import.py +++ b/onair/src/util/plugin_import.py @@ -16,6 +16,7 @@ import sys import os + def import_plugins(headers, module_dict): plugin_list = [] init_filename = "__init__.py" @@ -23,7 +24,7 @@ def import_plugins(headers, module_dict): true_path = module_path # Compatibility for plugin paths that already include __init__.py if module_path.endswith(init_filename): - true_path = module_path[:-len(init_filename) - 1] + true_path = module_path[: -len(init_filename) - 1] # Last directory name is the module name mod_name = os.path.basename(true_path) # import module if not already available @@ -39,9 +40,8 @@ def import_plugins(headers, module_dict): # add plugin module to system for importation sys.modules[mod_name] = module # import the created module's plugin file for use - plugin_name = f'{mod_name}_plugin' - plugin = __import__(f'{mod_name}.{plugin_name}', - fromlist=[plugin_name]) + plugin_name = f"{mod_name}_plugin" + plugin = __import__(f"{mod_name}.{plugin_name}", fromlist=[plugin_name]) # add an instance of the module's was an OnAIR plugin plugin_list.append(plugin.Plugin(construct_name, headers)) - return(plugin_list) + return plugin_list diff --git a/onair/src/util/print_io.py b/onair/src/util/print_io.py index 6abef33b..4593d553 100644 --- a/onair/src/util/print_io.py +++ b/onair/src/util/print_io.py @@ -12,67 +12,96 @@ Helper script used by sim.py to print out simulation data with pretty colors """ + ############################# COLORS ############################# # Static class to hold color constants class bcolors: - HEADER = '\033[95m' - OKBLUE = '\033[94m' - OKGREEN = '\033[92m' - WARNING = '\033[93m' - FAIL = '\033[91m' - ENDC = '\033[0m' - BOLD = '\033[1m' - UNDERLINE = '\033[4m' + HEADER = "\033[95m" + OKBLUE = "\033[94m" + OKGREEN = "\033[92m" + WARNING = "\033[93m" + FAIL = "\033[91m" + ENDC = "\033[0m" + BOLD = "\033[1m" + UNDERLINE = "\033[4m" + # Global colors dictionary -scolors = {'HEADER' : bcolors.HEADER, - 'OKBLUE' : bcolors.OKBLUE, - 'OKGREEN' : bcolors.OKGREEN, - 'WARNING' : bcolors.WARNING, - 'FAIL' : bcolors.FAIL, - 'ENDC' : bcolors.ENDC, - 'BOLD' : bcolors.BOLD, - 'UNDERLINE' : bcolors.UNDERLINE} +scolors = { + "HEADER": bcolors.HEADER, + "OKBLUE": bcolors.OKBLUE, + "OKGREEN": bcolors.OKGREEN, + "WARNING": bcolors.WARNING, + "FAIL": bcolors.FAIL, + "ENDC": bcolors.ENDC, + "BOLD": bcolors.BOLD, + "UNDERLINE": bcolors.UNDERLINE, +} # Global dictionary for STATUS -> COLOR -status_colors = {'GREEN' : bcolors.OKGREEN, - 'YELLOW' : bcolors.WARNING, - 'RED' : bcolors.FAIL, - '---' : bcolors.OKBLUE} +status_colors = { + "GREEN": bcolors.OKGREEN, + "YELLOW": bcolors.WARNING, + "RED": bcolors.FAIL, + "---": bcolors.OKBLUE, +} + ############################# I/O ############################# # Print that the simulation started def print_sim_header(): - print(bcolors.HEADER + bcolors.BOLD+ "\n***************************************************") + print( + bcolors.HEADER + + bcolors.BOLD + + "\n***************************************************" + ) print("************ SIMULATION STARTED ************") print("***************************************************" + bcolors.ENDC) + # Print when a new step is starting def print_sim_step(step_num): - print(bcolors.HEADER + bcolors.BOLD + "\n--------------------- STEP " + str(step_num) + " ---------------------\n" + bcolors.ENDC) + print( + bcolors.HEADER + + bcolors.BOLD + + "\n--------------------- STEP " + + str(step_num) + + " ---------------------\n" + + bcolors.ENDC + ) + # Print a line to separate things def print_separator(color=bcolors.HEADER): - print(color + bcolors.BOLD + "\n------------------------------------------------\n" + bcolors.ENDC) + print( + color + + bcolors.BOLD + + "\n------------------------------------------------\n" + + bcolors.ENDC + ) + # Print header update def update_header(msg, clr=bcolors.BOLD): - print(clr + "--------- " + msg + ' update' + bcolors.ENDC) + print(clr + "--------- " + msg + " update" + bcolors.ENDC) + # Print header update -def print_msg(msg, clrs=['HEADER']): +def print_msg(msg, clrs=["HEADER"]): for clr in clrs: print(scolors[clr]) print("---- " + msg + bcolors.ENDC) + # Print interpreted system status -def print_system_status(agent, data = None): +def print_system_status(agent, data=None): # print_separator(bcolors.OKBLUE) if data != None: - print("CURRENT DATA: " + str(data)) + print("CURRENT DATA: " + str(data)) print("INTERPRETED SYSTEM STATUS: " + str(format_status(agent.mission_status))) # print_separator(bcolors.OKBLUE) + # Print diagnosis info def print_diagnosis(diagnosis): status_list = diagnosis.get_status_list() @@ -82,50 +111,63 @@ def print_diagnosis(diagnosis): print(bcolors.HEADER + bcolors.BOLD + "DIAGNOSIS INFO: \n" + bcolors.ENDC) for status in status_list: stat = status[1] - print(status[0] + ': ' + format_status(stat)) + print(status[0] + ": " + format_status(stat)) print(bcolors.HEADER + bcolors.BOLD + "\nCURRENT ACTIVATIONS: \n" + bcolors.ENDC) if len(activations) > 0: for activation in activations: - print('---' + str(activation)) + print("---" + str(activation)) print_separator() + # Print subsystem status def subsystem_status_str(ss): - s = bcolors.BOLD + '[' + str(ss.type)+ '] : ' + bcolors.ENDC + s = bcolors.BOLD + "[" + str(ss.type) + "] : " + bcolors.ENDC stat = ss.get_status() - s = s + '\n' + status_colors[stat] + ' ---- ' + str(stat) + bcolors.ENDC + ' (' + str(ss.uncertainty) + ')' - return s + '\n' + s = ( + s + + "\n" + + status_colors[stat] + + " ---- " + + str(stat) + + bcolors.ENDC + + " (" + + str(ss.uncertainty) + + ")" + ) + return s + "\n" + # Print out subsystem information def subsystem_str(ss): - s = bcolors.BOLD + ss.type + '\n' + bcolors.ENDC - s = s + '--[headers] ' + s = bcolors.BOLD + ss.type + "\n" + bcolors.ENDC + s = s + "--[headers] " for h in ss.headers: - s = s + '\n---' + str(h) - s = s + '\n--[tests] ' + s = s + "\n---" + str(h) + s = s + "\n--[tests] " for t in ss.tests: - s = s + '\n---' + str(t) - s = s + '\n--[test data] ' + s = s + "\n---" + str(t) + s = s + "\n--[test data] " for d in ss.test_data: - s = s + '\n---' + str(d) + s = s + "\n---" + str(d) return s + # Print out headers def headers_string(headers): - s = '' + s = "" for hdr in headers: - s = s + '\n -- ' + hdr + s = s + "\n -- " + hdr return s + # Print out status def format_status(stat): if type(stat) == str: - return status_colors[stat] + stat + scolors['ENDC'] - else: - s = '(' + return status_colors[stat] + stat + scolors["ENDC"] + else: + s = "(" for status in stat: - s = s + format_status(status) + ', ' - s = s[:-2] + ')' - return s - + s = s + format_status(status) + ", " + s = s[:-2] + ")" + return s diff --git a/onair/src/util/sim_io.py b/onair/src/util/sim_io.py index d5b538cd..6026f8fe 100644 --- a/onair/src/util/sim_io.py +++ b/onair/src/util/sim_io.py @@ -13,51 +13,68 @@ """ import os -import json +import json + def render_reasoning(diagnosis_list): - with open(os.path.join(os.environ.get('ONAIR_DIAGNOSIS_SAVE_PATH'), 'diagnosis.txt'), mode='a') as out: - out.write('==========================================================\n') - out.write(' DIAGNOSIS \n') - out.write('==========================================================\n') + with open( + os.path.join(os.environ.get("ONAIR_DIAGNOSIS_SAVE_PATH"), "diagnosis.txt"), + mode="a", + ) as out: + out.write("==========================================================\n") + out.write(" DIAGNOSIS \n") + out.write("==========================================================\n") for diagnosis in diagnosis_list: - out.write('\n----------------------------------------------------------\n') - out.write('*** DIAGNOSIS AT FRAME {} ***\n'.format(diagnosis.get_time_step())) + out.write("\n----------------------------------------------------------\n") + out.write( + "*** DIAGNOSIS AT FRAME {} ***\n".format( + diagnosis.get_time_step() + ) + ) out.write(diagnosis.__str__()) - out.write('----------------------------------------------------------\n') - with open(os.path.join(os.environ.get('ONAIR_DIAGNOSIS_SAVE_PATH'), 'diagnosis.csv'), mode='a') as out: - out.write('time_step, cohens_kappa, faults, subgraph\n') + out.write("----------------------------------------------------------\n") + with open( + os.path.join(os.environ.get("ONAIR_DIAGNOSIS_SAVE_PATH"), "diagnosis.csv"), + mode="a", + ) as out: + out.write("time_step, cohens_kappa, faults, subgraph\n") for diagnosis in diagnosis_list: out.write(diagnosis.results_csv()) + def render_viz(status_data, sensor_data, sim_name, diagnosis=None): # Status Staburst - status_report = {} - status_report['filename'] = sim_name - status_report['data'] = status_data - with open(os.path.join(os.environ.get('ONAIR_VIZ_SAVE_PATH'), 'system.json'), 'w') as outfile: + status_report = {} + status_report["filename"] = sim_name + status_report["data"] = status_data + with open( + os.path.join(os.environ.get("ONAIR_VIZ_SAVE_PATH"), "system.json"), "w" + ) as outfile: json.dump(status_report, outfile) # Associativity sensor_status_report = {} - sensor_status_report['name'] = 'MISSION' - sensor_status_report['children'] = sensor_data + sensor_status_report["name"] = "MISSION" + sensor_status_report["children"] = sensor_data - with open(os.path.join(os.environ.get('ONAIR_VIZ_SAVE_PATH'), 'faults.json'), 'w') as outfile: + with open( + os.path.join(os.environ.get("ONAIR_VIZ_SAVE_PATH"), "faults.json"), "w" + ) as outfile: json.dump(sensor_status_report, outfile) # Diagnosis info if diagnosis is not None: results = diagnosis.get_diagnosis_viz_json() - with open(os.path.join(os.environ.get('ONAIR_VIZ_SAVE_PATH'), 'results.json'), 'w') as outfile: + with open( + os.path.join(os.environ.get("ONAIR_VIZ_SAVE_PATH"), "results.json"), "w" + ) as outfile: json.dump(results, outfile) + def print_dots(ts): incrFlag = ts % 20 if incrFlag < 10: dots = ts % 10 else: dots = 10 - (ts % 10) - print('\033[95m' + (dots+1)*'.' + '\033[0m') - - + print("\033[95m" + (dots + 1) * "." + "\033[0m") diff --git a/plugins/generic/generic_plugin.py b/plugins/generic/generic_plugin.py index 9fd04f8f..b6194a11 100644 --- a/plugins/generic/generic_plugin.py +++ b/plugins/generic/generic_plugin.py @@ -10,8 +10,9 @@ import numpy as np from onair.src.ai_components.ai_plugin_abstract.ai_plugin import AIPlugin + class Plugin(AIPlugin): - def update(self,low_level_data=[], high_level_data={}): + def update(self, low_level_data=[], high_level_data={}): """ Given streamed data point, system should update internally """ diff --git a/plugins/kalman/kalman_plugin.py b/plugins/kalman/kalman_plugin.py index 6fbe09d8..c421fd3f 100644 --- a/plugins/kalman/kalman_plugin.py +++ b/plugins/kalman/kalman_plugin.py @@ -11,6 +11,7 @@ import numpy as np from onair.src.ai_components.ai_plugin_abstract.ai_plugin import AIPlugin + class Plugin(AIPlugin): def __init__(self, name, headers, window_size=3): """ @@ -24,10 +25,11 @@ def __init__(self, name, headers, window_size=3): self.window_size = window_size self.kf = simdkalman.KalmanFilter( - state_transition = [[1,1],[0,1]], # matrix A - process_noise = np.diag([0.1, 0.01]), # Q - observation_model = np.array([[1,0]]), # H - observation_noise = 1.0) # R + state_transition=[[1, 1], [0, 1]], # matrix A + process_noise=np.diag([0.1, 0.01]), # Q + observation_model=np.array([[1, 0]]), # H + observation_noise=1.0, + ) # R #### START: Classes mandated by plugin architecture def update(self, frame): @@ -36,13 +38,17 @@ def update(self, frame): :return: None """ for data_point_index in range(len(frame)): - if len(self.frames) < len(frame): # If the frames variable is empty, append each data point in frame to it, each point wrapped as a list + if len(self.frames) < len( + frame + ): # If the frames variable is empty, append each data point in frame to it, each point wrapped as a list # This is done so the data can have each attribute grouped in one list before being passed to kalman # Ex: [[1:00, 1:01, 1:02, 1:03, 1:04, 1:05], [1, 2, 3, 4, 5]] self.frames.append([frame[data_point_index]]) else: self.frames[data_point_index].append(frame[data_point_index]) - if len(self.frames[data_point_index]) > self.window_size: # If after adding a point to the frame, that attribute is larger than the window_size, take out the first element + if ( + len(self.frames[data_point_index]) > self.window_size + ): # If after adding a point to the frame, that attribute is larger than the window_size, take out the first element self.frames[data_point_index].pop(0) def render_reasoning(self): @@ -51,41 +57,48 @@ def render_reasoning(self): """ broken_attributes = self.frame_diagnosis(self.frames, self.headers) return broken_attributes + #### END: Classes mandated by plugin architecture # Gets mean of values def mean(self, values): - return sum(values)/len(values) + return sum(values) / len(values) # Gets absolute value residual from actual and predicted value def residual(self, predicted, actual): return abs(float(actual) - float(predicted)) - #Gets standard deviation of data + # Gets standard deviation of data def std_dev(self, data): return np.std(data) # Takes in the kf being used, the data, how many prediction "steps" it will make, and an optional initial value # Gives a prediction values based on given parameters - def predict(self, data, forward_steps, inital_val = None): + def predict(self, data, forward_steps, inital_val=None): for i in range(len(data)): - data[i] = float(data[i]) # Makes sure all the data being worked with is a float - if(inital_val != None): - smoothed = self.kf.smooth(data, initial_value = [float(inital_val),0]) # If there's an initial value, smooth it along that value + data[i] = float( + data[i] + ) # Makes sure all the data being worked with is a float + if inital_val != None: + smoothed = self.kf.smooth( + data, initial_value=[float(inital_val), 0] + ) # If there's an initial value, smooth it along that value else: - smoothed = self.kf.smooth(data) # If not, smooth it however you like - predicted = self.kf.predict(data, forward_steps) # Make a prediction on the smoothed data + smoothed = self.kf.smooth(data) # If not, smooth it however you like + predicted = self.kf.predict( + data, forward_steps + ) # Make a prediction on the smoothed data return predicted def predictions_for_given_data(self, data): returned_data = [] initial_val = data[0] - for item in range(len(data)-1): - predicted = self.predict(data[0:item+1], 1, initial_val) - actual_next_state = data[item+1] + for item in range(len(data) - 1): + predicted = self.predict(data[0 : item + 1], 1, initial_val) + actual_next_state = data[item + 1] pred_mean = predicted.observations.mean returned_data.append(pred_mean) - if(len(returned_data) == 0): # If there's not enough data just set it to 0 + if len(returned_data) == 0: # If there's not enough data just set it to 0 returned_data.append(0) return returned_data @@ -93,29 +106,31 @@ def predictions_for_given_data(self, data): def generate_residuals_for_given_data(self, data): residuals = [] initial_val = data[0] - for item in range(len(data)-1): - predicted = self.predict(data[0:item+1], 1, initial_val) - actual_next_state = data[item+1] + for item in range(len(data) - 1): + predicted = self.predict(data[0 : item + 1], 1, initial_val) + actual_next_state = data[item + 1] pred_mean = predicted.observations.mean residual_error = float(self.residual(pred_mean, actual_next_state)) residuals.append(residual_error) - if(len(residuals) == 0): # If there are no residuals because the data is of length 1, then just say residuals is equal to [0] + if ( + len(residuals) == 0 + ): # If there are no residuals because the data is of length 1, then just say residuals is equal to [0] residuals.append(0) return residuals - #Info: takes a chunk of data of n size. Walks through it and gets residual errors. - #Takes the mean of the errors and determines if they're too large overall in order to determine whether or not there's a chunk in said error. + # Info: takes a chunk of data of n size. Walks through it and gets residual errors. + # Takes the mean of the errors and determines if they're too large overall in order to determine whether or not there's a chunk in said error. def current_attribute_chunk_get_error(self, data): residuals = self.generate_residuals_for_given_data(data) mean_residuals = abs(self.mean(residuals)) - if (abs(mean_residuals) < 1.5): - return False + if abs(mean_residuals) < 1.5: + return False return True def frame_diagnosis(self, frame, headers): kal_broken_attributes = [] for attribute_index in range(len(frame)): error = self.current_attribute_chunk_get_error(frame[attribute_index]) - if error and not headers[attribute_index].upper() == 'TIME': + if error and not headers[attribute_index].upper() == "TIME": kal_broken_attributes.append(headers[attribute_index]) return kal_broken_attributes diff --git a/plugins/reporter/reporter_plugin.py b/plugins/reporter/reporter_plugin.py index 986ba942..9a2f986d 100644 --- a/plugins/reporter/reporter_plugin.py +++ b/plugins/reporter/reporter_plugin.py @@ -9,6 +9,7 @@ from onair.src.ai_components.ai_plugin_abstract.ai_plugin import AIPlugin + class Plugin(AIPlugin): verbose_mode = False @@ -23,7 +24,9 @@ def update(self, low_level_data=[], high_level_data={}): if self.verbose_mode: print(f" : headers {self.headers}") print(f" : low_level_data {low_level_data.__class__} = '{low_level_data}'") - print(f" : high_level_data {high_level_data.__class__} = '{high_level_data}'") + print( + f" : high_level_data {high_level_data.__class__} = '{high_level_data}'" + ) def render_reasoning(self): """ diff --git a/redis-experiment-publisher.py b/redis-experiment-publisher.py index b5b6b53e..e603dc76 100644 --- a/redis-experiment-publisher.py +++ b/redis-experiment-publisher.py @@ -8,12 +8,13 @@ # When your Redis server requires a password, fill it in here redis_password = "" # Connect to Redis -r = redis.Redis(host=redis_host, - port=redis_port, - password=redis_password, - decode_responses=True) +r = redis.Redis( + host=redis_host, port=redis_port, password=redis_password, decode_responses=True +) # List of channel names -channels = ['state_0', 'state_1', 'state_2'] +channels = ["state_0", "state_1", "state_2"] + + # Publish messages on each channel in random order def publish_messages(): loop_count = 0 @@ -22,17 +23,23 @@ def publish_messages(): while loop_count < max_loops: random.shuffle(channels) for channel in channels: - r.publish(channel, f'{{"time":{inner_loop_count}, ' \ - f'"x":{inner_loop_count+0.1}, ' \ - f'"y":{inner_loop_count+0.2}}}') - print(f"Published data to {channel}, " \ - f"[{inner_loop_count}, " \ - f"{inner_loop_count+0.1}, " \ - f"{inner_loop_count+0.2}]") + r.publish( + channel, + f'{{"time":{inner_loop_count}, ' + f'"x":{inner_loop_count+0.1}, ' + f'"y":{inner_loop_count+0.2}}}', + ) + print( + f"Published data to {channel}, " + f"[{inner_loop_count}, " + f"{inner_loop_count+0.1}, " + f"{inner_loop_count+0.2}]" + ) inner_loop_count += 1 time.sleep(2) loop_count += 1 print(f"Completed {loop_count} loops") + if __name__ == "__main__": publish_messages() diff --git a/test/onair/data_handling/test_csv_parser.py b/test/onair/data_handling/test_csv_parser.py index 662e6a80..675a86b2 100644 --- a/test/onair/data_handling/test_csv_parser.py +++ b/test/onair/data_handling/test_csv_parser.py @@ -14,19 +14,25 @@ import onair.data_handling.csv_parser as csv_parser from onair.data_handling.csv_parser import DataSource + @pytest.fixture def setup_teardown(): pytest.cut = DataSource.__new__(DataSource) - yield 'setup_teardown' + yield "setup_teardown" + # process_data_per_data_file tests -def test_CSV_process_data_file_sets_sim_data_to_parse_csv_data_return_and_frame_index_to_zero(mocker, setup_teardown): +def test_CSV_process_data_file_sets_sim_data_to_parse_csv_data_return_and_frame_index_to_zero( + mocker, setup_teardown +): # Arrange arg_data_file = MagicMock() forced_return_parse_csv_data = MagicMock() - mocker.patch.object(pytest.cut, "parse_csv_data", return_value=forced_return_parse_csv_data) + mocker.patch.object( + pytest.cut, "parse_csv_data", return_value=forced_return_parse_csv_data + ) # Act pytest.cut.process_data_file(arg_data_file) @@ -35,14 +41,17 @@ def test_CSV_process_data_file_sets_sim_data_to_parse_csv_data_return_and_frame_ assert pytest.cut.sim_data == forced_return_parse_csv_data assert pytest.cut.frame_index == 0 + # CSV parse_csv_data tests -def test_CSV_parse_csv_data_returns_empty_list_when_parsed_dataset_is_empty(mocker, setup_teardown): +def test_CSV_parse_csv_data_returns_empty_list_when_parsed_dataset_is_empty( + mocker, setup_teardown +): # Arrange arg_dataFile = MagicMock() fake_file_iterator = MagicMock() fake_csv_file = MagicMock() - fake_csv_file.configure_mock(**{'__enter__.return_value': fake_file_iterator}) + fake_csv_file.configure_mock(**{"__enter__.return_value": fake_file_iterator}) fake_dataset = [] forced_return_contains = MagicMock() fake_second_data_set = MagicMock() @@ -51,32 +60,35 @@ def test_CSV_parse_csv_data_returns_empty_list_when_parsed_dataset_is_empty(mock expected_result = [] - mocker.patch(csv_parser.__name__ + '.open', return_value = fake_csv_file) - mocker.patch(csv_parser.__name__ + '.csv.reader', return_value = fake_dataset) - mocker.patch(csv_parser.__name__ + '.floatify_input') + mocker.patch(csv_parser.__name__ + ".open", return_value=fake_csv_file) + mocker.patch(csv_parser.__name__ + ".csv.reader", return_value=fake_dataset) + mocker.patch(csv_parser.__name__ + ".floatify_input") # Act result = pytest.cut.parse_csv_data(arg_dataFile) # Assert assert csv_parser.open.call_count == 1 - assert csv_parser.open.call_args_list[0].args == (arg_dataFile, 'r') - assert csv_parser.open.call_args_list[0].kwargs == ({'newline':''}) + assert csv_parser.open.call_args_list[0].args == (arg_dataFile, "r") + assert csv_parser.open.call_args_list[0].kwargs == ({"newline": ""}) assert csv_parser.csv.reader.call_count == 1 - assert csv_parser.csv.reader.call_args_list[0].args == (fake_file_iterator, ) - assert csv_parser.csv.reader.call_args_list[0].kwargs == ({'delimiter':','}) + assert csv_parser.csv.reader.call_args_list[0].args == (fake_file_iterator,) + assert csv_parser.csv.reader.call_args_list[0].kwargs == ({"delimiter": ","}) assert csv_parser.floatify_input.call_count == 0 assert result == expected_result -def test_CSV_parse_csv_data_returns_empty_list_when_parsed_dataset_is_just_headers(mocker, setup_teardown): + +def test_CSV_parse_csv_data_returns_empty_list_when_parsed_dataset_is_just_headers( + mocker, setup_teardown +): # Arrange arg_dataFile = MagicMock() fake_file_iterator = MagicMock() fake_csv_file = MagicMock() - fake_csv_file.configure_mock(**{'__enter__.return_value': fake_file_iterator}) - fake_dataset = [['fake column header', 'another fake column header']] + fake_csv_file.configure_mock(**{"__enter__.return_value": fake_file_iterator}) + fake_dataset = [["fake column header", "another fake column header"]] forced_return_contains = MagicMock() fake_second_data_set = MagicMock() fake_second_data_set.columns = MagicMock() @@ -84,85 +96,103 @@ def test_CSV_parse_csv_data_returns_empty_list_when_parsed_dataset_is_just_heade expected_result = [] - mocker.patch(csv_parser.__name__ + '.open', return_value = fake_csv_file) - mocker.patch(csv_parser.__name__ + '.csv.reader', return_value = fake_dataset) - mocker.patch(csv_parser.__name__ + '.floatify_input') + mocker.patch(csv_parser.__name__ + ".open", return_value=fake_csv_file) + mocker.patch(csv_parser.__name__ + ".csv.reader", return_value=fake_dataset) + mocker.patch(csv_parser.__name__ + ".floatify_input") # Act result = pytest.cut.parse_csv_data(arg_dataFile) # Assert assert csv_parser.open.call_count == 1 - assert csv_parser.open.call_args_list[0].args == (arg_dataFile, 'r') + assert csv_parser.open.call_args_list[0].args == (arg_dataFile, "r") assert csv_parser.csv.reader.call_count == 1 - assert csv_parser.csv.reader.call_args_list[0].args == (fake_file_iterator, ) - assert csv_parser.csv.reader.call_args_list[0].kwargs == ({'delimiter':','}) + assert csv_parser.csv.reader.call_args_list[0].args == (fake_file_iterator,) + assert csv_parser.csv.reader.call_args_list[0].kwargs == ({"delimiter": ","}) assert csv_parser.floatify_input.call_count == 0 assert result == expected_result -def test_CSV_parse_csv_data_returns_list_of_row_values_when_parsed_dataset(mocker, setup_teardown): +def test_CSV_parse_csv_data_returns_list_of_row_values_when_parsed_dataset( + mocker, setup_teardown +): # Arrange arg_dataFile = MagicMock() fake_file_iterator = MagicMock() fake_csv_file = MagicMock() - fake_csv_file.configure_mock(**{'__enter__.return_value': fake_file_iterator}) - fake_dataset = [['fake column header', 'another fake column header']] + fake_csv_file.configure_mock(**{"__enter__.return_value": fake_file_iterator}) + fake_dataset = [["fake column header", "another fake column header"]] expected_result_list = [] - num_fake_rows = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10 + num_fake_rows = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10 for i in range(num_fake_rows): fake_row_values = [] - for j in range(pytest.gen.randint(1,10)): # arbitrary, from 1 to 10 row values - fake_row_values.append(pytest.gen.randint(1, 10)) # arbitrary, from 1 to 10 as a value in row + for j in range(pytest.gen.randint(1, 10)): # arbitrary, from 1 to 10 row values + fake_row_values.append( + pytest.gen.randint(1, 10) + ) # arbitrary, from 1 to 10 as a value in row fake_dataset.append([i, fake_row_values]) expected_result_list.append(fake_row_values) - mocker.patch(csv_parser.__name__ + '.open', return_value = fake_csv_file) - mocker.patch(csv_parser.__name__ + '.csv.reader', return_value = fake_dataset) - mocker.patch(csv_parser.__name__ + '.floatify_input', side_effect = expected_result_list) + mocker.patch(csv_parser.__name__ + ".open", return_value=fake_csv_file) + mocker.patch(csv_parser.__name__ + ".csv.reader", return_value=fake_dataset) + mocker.patch( + csv_parser.__name__ + ".floatify_input", side_effect=expected_result_list + ) # Act result = pytest.cut.parse_csv_data(arg_dataFile) # Assert assert csv_parser.open.call_count == 1 - assert csv_parser.open.call_args_list[0].args == (arg_dataFile, 'r') + assert csv_parser.open.call_args_list[0].args == (arg_dataFile, "r") assert csv_parser.csv.reader.call_count == 1 - assert csv_parser.csv.reader.call_args_list[0].args == (fake_file_iterator, ) - assert csv_parser.csv.reader.call_args_list[0].kwargs == ({'delimiter':','}) + assert csv_parser.csv.reader.call_args_list[0].args == (fake_file_iterator,) + assert csv_parser.csv.reader.call_args_list[0].kwargs == ({"delimiter": ","}) assert csv_parser.floatify_input.call_count == num_fake_rows assert result == expected_result_list + # CSV parse_meta_data tests -def test_CSV_parse_meta_data_file_returns_call_to_extract_meta_data_handle_ss_breakdown(mocker, setup_teardown): +def test_CSV_parse_meta_data_file_returns_call_to_extract_meta_data_handle_ss_breakdown( + mocker, setup_teardown +): # Arrange arg_configFile = MagicMock() arg_ss_breakdown = MagicMock() expected_result = MagicMock() - mocker.patch(csv_parser.__name__ + '.extract_meta_data_handle_ss_breakdown', return_value=expected_result) - mocker.patch(csv_parser.__name__ + '.len') + mocker.patch( + csv_parser.__name__ + ".extract_meta_data_handle_ss_breakdown", + return_value=expected_result, + ) + mocker.patch(csv_parser.__name__ + ".len") # Act result = pytest.cut.parse_meta_data_file(arg_configFile, arg_ss_breakdown) # Assert assert csv_parser.extract_meta_data_handle_ss_breakdown.call_count == 1 - assert csv_parser.extract_meta_data_handle_ss_breakdown.call_args_list[0].args == (arg_configFile, arg_ss_breakdown, ) + assert csv_parser.extract_meta_data_handle_ss_breakdown.call_args_list[0].args == ( + arg_configFile, + arg_ss_breakdown, + ) assert csv_parser.len.call_count == 0 assert result == expected_result + # CSV get_vehicle_metadata tests -def test_CSV_get_vehicle_metadata_returns_list_of_headers_and_list_of_test_assignments(setup_teardown): +def test_CSV_get_vehicle_metadata_returns_list_of_headers_and_list_of_test_assignments( + setup_teardown, +): # Arrange fake_all_headers = MagicMock() fake_test_assignments = MagicMock() fake_binning_configs = {} - fake_binning_configs['test_assignments'] = fake_test_assignments + fake_binning_configs["test_assignments"] = fake_test_assignments expected_result = (fake_all_headers, fake_test_assignments) @@ -175,8 +205,11 @@ def test_CSV_get_vehicle_metadata_returns_list_of_headers_and_list_of_test_assig # Assert assert result == expected_result + # CSV get_next test -def test_CSV_get_next_increments_index_and_returns_current_frame_of_data(setup_teardown): +def test_CSV_get_next_increments_index_and_returns_current_frame_of_data( + setup_teardown, +): # Arrange fake_frame_index = 10 fake_sim_data = [] @@ -195,8 +228,11 @@ def test_CSV_get_next_increments_index_and_returns_current_frame_of_data(setup_t assert result == expected_result assert pytest.cut.frame_index == fake_frame_index + 1 + # CSV has_more test -def test_CSV_has_more_returns_true_when_index_less_than_number_of_frames(setup_teardown): +def test_CSV_has_more_returns_true_when_index_less_than_number_of_frames( + setup_teardown, +): # Arrange fake_frame_index = 10 fake_sim_data = [] @@ -214,7 +250,10 @@ def test_CSV_has_more_returns_true_when_index_less_than_number_of_frames(setup_t # Assert assert result == expected_result -def test_CSV_has_more_returns_false_when_index_equal_than_number_of_frames(setup_teardown): + +def test_CSV_has_more_returns_false_when_index_equal_than_number_of_frames( + setup_teardown, +): # Arrange fake_frame_index = 10 fake_sim_data = [] diff --git a/test/onair/data_handling/test_on_air_data_source.py b/test/onair/data_handling/test_on_air_data_source.py index 4c705621..1c396b8d 100644 --- a/test/onair/data_handling/test_on_air_data_source.py +++ b/test/onair/data_handling/test_on_air_data_source.py @@ -28,9 +28,11 @@ def get_next(self): def has_more(self): return super().has_more() + class IncompleteOnAirDataSource(OnAirDataSource): pass + class BadFakeOnAirDataSource(OnAirDataSource): def process_data_file(self, data_file): return super().process_data_file(data_file) @@ -44,26 +46,30 @@ def get_next(self): def has_more(self): return super().has_more() + @pytest.fixture def setup_teardown(): pytest.cut = FakeOnAirDataSource.__new__(FakeOnAirDataSource) - yield 'setup_teardown' + yield "setup_teardown" + # __init__ tests -def test_OnAirDataSource__init__sets_instance_variables_as_expected_and_calls_parse_meta_data_file_and_process_data_file(setup_teardown, mocker): +def test_OnAirDataSource__init__sets_instance_variables_as_expected_and_calls_parse_meta_data_file_and_process_data_file( + setup_teardown, mocker +): # Arrange arg_rawDataFile = MagicMock() arg_metadataFile = MagicMock() arg_ss_breakdown = MagicMock() fake_configs = {} - fake_configs['subsystem_assignments'] = MagicMock() - fake_configs['test_assignments'] = MagicMock() - fake_configs['description_assignments'] = MagicMock() - fake_configs['data_labels'] = MagicMock() + fake_configs["subsystem_assignments"] = MagicMock() + fake_configs["test_assignments"] = MagicMock() + fake_configs["description_assignments"] = MagicMock() + fake_configs["data_labels"] = MagicMock() - mocker.patch.object(pytest.cut, 'parse_meta_data_file', return_value=fake_configs) - mocker.patch.object(pytest.cut, 'process_data_file') + mocker.patch.object(pytest.cut, "parse_meta_data_file", return_value=fake_configs) + mocker.patch.object(pytest.cut, "process_data_file") # Act pytest.cut.__init__(arg_rawDataFile, arg_metadataFile, arg_ss_breakdown) @@ -71,16 +77,29 @@ def test_OnAirDataSource__init__sets_instance_variables_as_expected_and_calls_pa # Assert assert pytest.cut.raw_data_file == arg_rawDataFile assert pytest.cut.meta_data_file == arg_metadataFile - assert pytest.cut.all_headers == fake_configs['data_labels'] + assert pytest.cut.all_headers == fake_configs["data_labels"] assert pytest.cut.sim_data == {} assert pytest.cut.parse_meta_data_file.call_count == 1 - assert pytest.cut.parse_meta_data_file.call_args_list[0].args == (arg_metadataFile, arg_ss_breakdown, ) + assert pytest.cut.parse_meta_data_file.call_args_list[0].args == ( + arg_metadataFile, + arg_ss_breakdown, + ) assert pytest.cut.process_data_file.call_count == 1 - assert pytest.cut.process_data_file.call_args_list[0].args == (arg_rawDataFile, ) + assert pytest.cut.process_data_file.call_args_list[0].args == (arg_rawDataFile,) # assert pytest.cut.binning_configs == fake_configs - assert pytest.cut.binning_configs['subsystem_assignments'] == fake_configs['subsystem_assignments'] - assert pytest.cut.binning_configs['test_assignments'] == fake_configs['test_assignments'] - assert pytest.cut.binning_configs['description_assignments'] == fake_configs['description_assignments'] + assert ( + pytest.cut.binning_configs["subsystem_assignments"] + == fake_configs["subsystem_assignments"] + ) + assert ( + pytest.cut.binning_configs["test_assignments"] + == fake_configs["test_assignments"] + ) + assert ( + pytest.cut.binning_configs["description_assignments"] + == fake_configs["description_assignments"] + ) + # abstract methods tests def test_OnAirDataSource_raises_error_because_of_unimplemented_abstract_methods(): @@ -96,6 +115,7 @@ def test_OnAirDataSource_raises_error_because_of_unimplemented_abstract_methods( assert "get_next" in e_info.__str__() assert "has_more" in e_info.__str__() + # Incomplete plugin call tests def test_OnAirDataSource_raises_error_when_an_inherited_class_is_instantiated_because_abstract_methods_are_not_implemented_by_that_class(): # Arrange - None @@ -104,12 +124,16 @@ def test_OnAirDataSource_raises_error_when_an_inherited_class_is_instantiated_be cut = IncompleteOnAirDataSource.__new__(IncompleteOnAirDataSource) # Assert - assert "Can't instantiate abstract class IncompleteOnAirDataSource with" in e_info.__str__() + assert ( + "Can't instantiate abstract class IncompleteOnAirDataSource with" + in e_info.__str__() + ) assert "process_data_file" in e_info.__str__() assert "parse_meta_data_file" in e_info.__str__() assert "get_next" in e_info.__str__() assert "has_more" in e_info.__str__() + def test_OnAirDataSource_raises_error_when_an_inherited_class_calls_abstract_method_process_data_file(): # Act cut = BadFakeOnAirDataSource.__new__(BadFakeOnAirDataSource) @@ -119,6 +143,7 @@ def test_OnAirDataSource_raises_error_when_an_inherited_class_calls_abstract_met cut.process_data_file(None) assert "NotImplementedError" in e_info.__str__() + def test_OnAirDataSource_raises_error_when_an_inherited_class_calls_abstract_method_parse_meta_data_file(): # Act cut = BadFakeOnAirDataSource.__new__(BadFakeOnAirDataSource) @@ -128,6 +153,7 @@ def test_OnAirDataSource_raises_error_when_an_inherited_class_calls_abstract_met cut.parse_meta_data_file(None, None) assert "NotImplementedError" in e_info.__str__() + def test_OnAirDataSource_raises_error_when_an_inherited_class_calls_abstract_method_get_next(): # Act cut = BadFakeOnAirDataSource.__new__(BadFakeOnAirDataSource) @@ -137,6 +163,7 @@ def test_OnAirDataSource_raises_error_when_an_inherited_class_calls_abstract_met cut.get_next() assert "NotImplementedError" in e_info.__str__() + def test_OnAirDataSource_raises_error_when_an_inherited_class_calls_abstract_method_has_more(): # Act cut = BadFakeOnAirDataSource.__new__(BadFakeOnAirDataSource) diff --git a/test/onair/data_handling/test_parser_util.py b/test/onair/data_handling/test_parser_util.py index 1fbfa5c9..6da61c6a 100644 --- a/test/onair/data_handling/test_parser_util.py +++ b/test/onair/data_handling/test_parser_util.py @@ -13,27 +13,37 @@ import onair.data_handling.parser_util as parser_util + # extract_meta_data_handle_ss_breakdown -def test_parser_util_extract_meta_data_handle_ss_breakdown_returns_call_to_extract_meta_data_file_given_metadata_file_and_csv_set_to_True_when_given_ss_breakdown_does_not_resolve_to_False(mocker): +def test_parser_util_extract_meta_data_handle_ss_breakdown_returns_call_to_extract_meta_data_file_given_metadata_file_and_csv_set_to_True_when_given_ss_breakdown_does_not_resolve_to_False( + mocker, +): # Arrange arg_configFile = MagicMock() arg_ss_breakdown = True if pytest.gen.randint(0, 1) else MagicMock() expected_result = MagicMock() - mocker.patch(parser_util.__name__ + '.extract_meta_data', return_value=expected_result) - mocker.patch(parser_util.__name__ + '.len') + mocker.patch( + parser_util.__name__ + ".extract_meta_data", return_value=expected_result + ) + mocker.patch(parser_util.__name__ + ".len") # Act - result = parser_util.extract_meta_data_handle_ss_breakdown(arg_configFile, arg_ss_breakdown) + result = parser_util.extract_meta_data_handle_ss_breakdown( + arg_configFile, arg_ss_breakdown + ) # Assert assert parser_util.extract_meta_data.call_count == 1 - assert parser_util.extract_meta_data.call_args_list[0].args == (arg_configFile, ) + assert parser_util.extract_meta_data.call_args_list[0].args == (arg_configFile,) assert parser_util.len.call_count == 0 assert result == expected_result -def test_parser_util_extract_meta_data_handle_ss_breakdown_returns_call_to_extract_meta_data_file_given_metadata_file_and_csv_set_to_True_with_dict_def_of_subsystem_assigments_def_of_call_to_process_filepath_given_configFile_and_kwarg_csv_set_to_True_set_to_empty_list_when_len_of_call_value_dict_def_of_subsystem_assigments_def_of_call_to_process_filepath_given_configFile_and_kwarg_csv_set_to_True_is_0_when_given_ss_breakdown_evaluates_to_False(mocker): + +def test_parser_util_extract_meta_data_handle_ss_breakdown_returns_call_to_extract_meta_data_file_given_metadata_file_and_csv_set_to_True_with_dict_def_of_subsystem_assigments_def_of_call_to_process_filepath_given_configFile_and_kwarg_csv_set_to_True_set_to_empty_list_when_len_of_call_value_dict_def_of_subsystem_assigments_def_of_call_to_process_filepath_given_configFile_and_kwarg_csv_set_to_True_is_0_when_given_ss_breakdown_evaluates_to_False( + mocker, +): # Arrange arg_configFile = MagicMock() arg_ss_breakdown = False if pytest.gen.randint(0, 1) else 0 @@ -41,24 +51,34 @@ def test_parser_util_extract_meta_data_handle_ss_breakdown_returns_call_to_extra forced_return_extract_meta_data = {} forced_return_len = 0 fake_empty_processed_filepath = MagicMock() - forced_return_extract_meta_data['subsystem_assignments'] = fake_empty_processed_filepath + forced_return_extract_meta_data["subsystem_assignments"] = ( + fake_empty_processed_filepath + ) expected_result = [] - mocker.patch(parser_util.__name__ + '.extract_meta_data', return_value=forced_return_extract_meta_data) - mocker.patch(parser_util.__name__ + '.len', return_value=forced_return_len) + mocker.patch( + parser_util.__name__ + ".extract_meta_data", + return_value=forced_return_extract_meta_data, + ) + mocker.patch(parser_util.__name__ + ".len", return_value=forced_return_len) # Act - result = parser_util.extract_meta_data_handle_ss_breakdown(arg_configFile, arg_ss_breakdown) + result = parser_util.extract_meta_data_handle_ss_breakdown( + arg_configFile, arg_ss_breakdown + ) # Assert assert parser_util.extract_meta_data.call_count == 1 - assert parser_util.extract_meta_data.call_args_list[0].args == (arg_configFile, ) + assert parser_util.extract_meta_data.call_args_list[0].args == (arg_configFile,) assert parser_util.len.call_count == 1 - assert parser_util.len.call_args_list[0].args == (fake_empty_processed_filepath, ) - assert result['subsystem_assignments'] == expected_result + assert parser_util.len.call_args_list[0].args == (fake_empty_processed_filepath,) + assert result["subsystem_assignments"] == expected_result -def test_parser_util_extract_meta_data_handle_ss_breakdown_returns_call_to_extract_meta_data_given_metadata_file_and_csv_set_to_True_with_dict_def_subsystem_assignments_def_of_call_to_process_filepath_given_configFile_and_kwarg_csv_set_to_True_set_to_single_item_list_str_MISSION_for_each_item_when_given_ss_breakdown_evaluates_to_False(mocker): + +def test_parser_util_extract_meta_data_handle_ss_breakdown_returns_call_to_extract_meta_data_given_metadata_file_and_csv_set_to_True_with_dict_def_subsystem_assignments_def_of_call_to_process_filepath_given_configFile_and_kwarg_csv_set_to_True_set_to_single_item_list_str_MISSION_for_each_item_when_given_ss_breakdown_evaluates_to_False( + mocker, +): # Arrange arg_configFile = MagicMock() arg_ss_breakdown = False if pytest.gen.randint(0, 1) else 0 @@ -66,42 +86,53 @@ def test_parser_util_extract_meta_data_handle_ss_breakdown_returns_call_to_extra forced_return_extract_meta_data = {} forced_return_process_filepath = MagicMock() fake_processed_filepath = [] - num_fake_processed_filepaths = pytest.gen.randint(1,10) # arbitrary, from 1 to 10 (0 has own test) + num_fake_processed_filepaths = pytest.gen.randint( + 1, 10 + ) # arbitrary, from 1 to 10 (0 has own test) for i in range(num_fake_processed_filepaths): fake_processed_filepath.append(i) - forced_return_extract_meta_data['subsystem_assignments'] = fake_processed_filepath + forced_return_extract_meta_data["subsystem_assignments"] = fake_processed_filepath forced_return_len = num_fake_processed_filepaths expected_result = [] for i in range(num_fake_processed_filepaths): - expected_result.append(['MISSION']) + expected_result.append(["MISSION"]) - mocker.patch(parser_util.__name__ + '.extract_meta_data', return_value=forced_return_extract_meta_data) - mocker.patch(parser_util.__name__ + '.len', return_value=forced_return_len) + mocker.patch( + parser_util.__name__ + ".extract_meta_data", + return_value=forced_return_extract_meta_data, + ) + mocker.patch(parser_util.__name__ + ".len", return_value=forced_return_len) # Act - result = parser_util.extract_meta_data_handle_ss_breakdown(arg_configFile, arg_ss_breakdown) + result = parser_util.extract_meta_data_handle_ss_breakdown( + arg_configFile, arg_ss_breakdown + ) # Assert assert parser_util.extract_meta_data.call_count == 1 - assert parser_util.extract_meta_data.call_args_list[0].args == (arg_configFile, ) + assert parser_util.extract_meta_data.call_args_list[0].args == (arg_configFile,) assert parser_util.len.call_count == 1 - assert parser_util.len.call_args_list[0].args == (fake_processed_filepath, ) - assert result['subsystem_assignments'] == expected_result + assert parser_util.len.call_args_list[0].args == (fake_processed_filepath,) + assert result["subsystem_assignments"] == expected_result + # extract_meta_data tests def test_parser_util_extract_meta_data_raises_error_when_given_blank_meta_data_file(): # Arrange - arg_meta_data_file = '' + arg_meta_data_file = "" # Act with pytest.raises(AssertionError) as e_info: result = parser_util.extract_meta_data(arg_meta_data_file) # Assert - assert e_info.match('') + assert e_info.match("") -def test_parser_util_extract_meta_data_returns_expected_dicts_dict_when_configs_len_equal_to_zero(mocker): + +def test_parser_util_extract_meta_data_returns_expected_dicts_dict_when_configs_len_equal_to_zero( + mocker, +): # Arrange arg_meta_data_file = MagicMock() @@ -109,27 +140,34 @@ def test_parser_util_extract_meta_data_returns_expected_dicts_dict_when_configs_ fake_tests = MagicMock() fake_descs = MagicMock() - forced_return_parse_tlm = {'subsystem_assignments' : fake_subsystem_assignments, - 'test_assignments' : fake_tests, - 'description_assignments' : fake_descs} + forced_return_parse_tlm = { + "subsystem_assignments": fake_subsystem_assignments, + "test_assignments": fake_tests, + "description_assignments": fake_descs, + } forced_return_len = 0 - mocker.patch(parser_util.__name__ + '.parseTlmConfJson', return_value=forced_return_parse_tlm) - mocker.patch(parser_util.__name__ + '.len', return_value=forced_return_len) - mocker.patch(parser_util.__name__ + '.str2lst') + mocker.patch( + parser_util.__name__ + ".parseTlmConfJson", return_value=forced_return_parse_tlm + ) + mocker.patch(parser_util.__name__ + ".len", return_value=forced_return_len) + mocker.patch(parser_util.__name__ + ".str2lst") # Act result = parser_util.extract_meta_data(arg_meta_data_file) # Assert assert parser_util.parseTlmConfJson.call_count == 1 - assert parser_util.parseTlmConfJson.call_args_list[0].args == (arg_meta_data_file, ) + assert parser_util.parseTlmConfJson.call_args_list[0].args == (arg_meta_data_file,) assert parser_util.len.call_count == 1 - assert parser_util.len.call_args_list[0].args == (fake_subsystem_assignments, ) + assert parser_util.len.call_args_list[0].args == (fake_subsystem_assignments,) assert parser_util.str2lst.call_count == 0 assert result == forced_return_parse_tlm -def test_parser_util_extract_meta_data_returns_expected_dicts_dict_when_configs_len_equal_to_one(mocker): + +def test_parser_util_extract_meta_data_returns_expected_dicts_dict_when_configs_len_equal_to_one( + mocker, +): # Arrange arg_meta_data_file = MagicMock() @@ -138,121 +176,164 @@ def test_parser_util_extract_meta_data_returns_expected_dicts_dict_when_configs_ fake_tests = [[[fake_test_assign]]] fake_descs = [MagicMock()] - forced_return_parse_tlm = {'subsystem_assignments' : fake_subsystem_assignments, - 'test_assignments' : fake_tests, - 'description_assignments' : fake_descs} + forced_return_parse_tlm = { + "subsystem_assignments": fake_subsystem_assignments, + "test_assignments": fake_tests, + "description_assignments": fake_descs, + } - mocker.patch(parser_util.__name__ + '.parseTlmConfJson', return_value=forced_return_parse_tlm) - mocker.patch(parser_util.__name__ + '.str2lst') + mocker.patch( + parser_util.__name__ + ".parseTlmConfJson", return_value=forced_return_parse_tlm + ) + mocker.patch(parser_util.__name__ + ".str2lst") - expected_ss_assigns = [[fake_ss_assign] for fake_ss_assign in fake_subsystem_assignments] + expected_ss_assigns = [ + [fake_ss_assign] for fake_ss_assign in fake_subsystem_assignments + ] expected_result = {} - expected_result['subsystem_assignments'] = expected_ss_assigns - expected_result['test_assignments'] = [[[fake_test_assign]]] - expected_result['description_assignments'] = fake_descs.copy() + expected_result["subsystem_assignments"] = expected_ss_assigns + expected_result["test_assignments"] = [[[fake_test_assign]]] + expected_result["description_assignments"] = fake_descs.copy() # Act result = parser_util.extract_meta_data(arg_meta_data_file) # Assert assert parser_util.parseTlmConfJson.call_count == 1 - assert parser_util.parseTlmConfJson.call_args_list[0].args == (arg_meta_data_file, ) + assert parser_util.parseTlmConfJson.call_args_list[0].args == (arg_meta_data_file,) assert parser_util.str2lst.call_count == 0 assert result == expected_result -def test_parser_util_extract_meta_data_returns_expected_dicts_dict_when_len_configs_greater_than_one(mocker): + +def test_parser_util_extract_meta_data_returns_expected_dicts_dict_when_len_configs_greater_than_one( + mocker, +): # Arrange arg_meta_data_file = MagicMock() - len_configs = pytest.gen.randint(2, 10) # arbitrary, from 2 to 10 (0 and 1 have own tests) + len_configs = pytest.gen.randint( + 2, 10 + ) # arbitrary, from 2 to 10 (0 and 1 have own tests) fake_subsystem_assignments = [MagicMock()] * len_configs fake_test_assign = MagicMock() fake_tests = [[[fake_test_assign]]] * len_configs fake_descs = [MagicMock()] * len_configs - forced_return_parse_tlm = {'subsystem_assignments' : fake_subsystem_assignments, - 'test_assignments' : fake_tests, - 'description_assignments' : fake_descs} + forced_return_parse_tlm = { + "subsystem_assignments": fake_subsystem_assignments, + "test_assignments": fake_tests, + "description_assignments": fake_descs, + } - mocker.patch(parser_util.__name__ + '.parseTlmConfJson', return_value=forced_return_parse_tlm) - mocker.patch(parser_util.__name__ + '.str2lst') + mocker.patch( + parser_util.__name__ + ".parseTlmConfJson", return_value=forced_return_parse_tlm + ) + mocker.patch(parser_util.__name__ + ".str2lst") - expected_ss_assigns = [[fake_ss_assign] for fake_ss_assign in fake_subsystem_assignments] + expected_ss_assigns = [ + [fake_ss_assign] for fake_ss_assign in fake_subsystem_assignments + ] expected_result = {} - expected_result['subsystem_assignments'] = expected_ss_assigns - expected_result['test_assignments'] = [[[fake_test_assign]]] * len_configs - expected_result['description_assignments'] = fake_descs.copy() + expected_result["subsystem_assignments"] = expected_ss_assigns + expected_result["test_assignments"] = [[[fake_test_assign]]] * len_configs + expected_result["description_assignments"] = fake_descs.copy() # Act result = parser_util.extract_meta_data(arg_meta_data_file) # Assert assert parser_util.parseTlmConfJson.call_count == 1 - assert parser_util.parseTlmConfJson.call_args_list[0].args == (arg_meta_data_file, ) + assert parser_util.parseTlmConfJson.call_args_list[0].args == (arg_meta_data_file,) assert parser_util.str2lst.call_count == 0 assert result == expected_result -def test_parser_util_extract_meta_data_returns_expected_dicts_dict_when_len_configs_greater_than_one_and_NOOPs_contained_in_test_assigns(mocker): + +def test_parser_util_extract_meta_data_returns_expected_dicts_dict_when_len_configs_greater_than_one_and_NOOPs_contained_in_test_assigns( + mocker, +): # Arrange arg_meta_data_file = MagicMock() - len_configs = pytest.gen.randint(2, 10) # arbitrary, from 2 to 10 (0 and 1 have own tests) + len_configs = pytest.gen.randint( + 2, 10 + ) # arbitrary, from 2 to 10 (0 and 1 have own tests) num_noops = pytest.gen.randint(2, 10) len_configs = len_configs + num_noops fake_subsystem_assignments = [MagicMock()] * len_configs fake_test_assign = MagicMock() - noop_test_assign = 'NOOP' - fake_tests = [[[fake_test_assign]]] * (len_configs - num_noops) + [[[noop_test_assign]]] * num_noops + noop_test_assign = "NOOP" + fake_tests = [[[fake_test_assign]]] * (len_configs - num_noops) + [ + [[noop_test_assign]] + ] * num_noops fake_descs = [MagicMock()] * len_configs - forced_return_parse_tlm = {'subsystem_assignments' : fake_subsystem_assignments, - 'test_assignments' : fake_tests, - 'description_assignments' : fake_descs} + forced_return_parse_tlm = { + "subsystem_assignments": fake_subsystem_assignments, + "test_assignments": fake_tests, + "description_assignments": fake_descs, + } - mocker.patch(parser_util.__name__ + '.parseTlmConfJson', return_value=forced_return_parse_tlm) - mocker.patch(parser_util.__name__ + '.str2lst') + mocker.patch( + parser_util.__name__ + ".parseTlmConfJson", return_value=forced_return_parse_tlm + ) + mocker.patch(parser_util.__name__ + ".str2lst") - expected_ss_assigns = [[fake_ss_assign] for fake_ss_assign in fake_subsystem_assignments] + expected_ss_assigns = [ + [fake_ss_assign] for fake_ss_assign in fake_subsystem_assignments + ] expected_result = {} - expected_result['subsystem_assignments'] = expected_ss_assigns - expected_result['test_assignments'] = [[[fake_test_assign]]] * (len_configs - num_noops) + [[[noop_test_assign]]] * num_noops - expected_result['description_assignments'] = fake_descs.copy() + expected_result["subsystem_assignments"] = expected_ss_assigns + expected_result["test_assignments"] = [[[fake_test_assign]]] * ( + len_configs - num_noops + ) + [[[noop_test_assign]]] * num_noops + expected_result["description_assignments"] = fake_descs.copy() # Act result = parser_util.extract_meta_data(arg_meta_data_file) # Assert assert parser_util.parseTlmConfJson.call_count == 1 - assert parser_util.parseTlmConfJson.call_args_list[0].args == (arg_meta_data_file, ) + assert parser_util.parseTlmConfJson.call_args_list[0].args == (arg_meta_data_file,) assert parser_util.str2lst.call_count == 0 assert result == expected_result -def test_parser_util_extract_meta_data_returns_expected_dicts_dict_when_len_configs_greater_than_one_and_len_test_assigns_greater_than_one(mocker): + +def test_parser_util_extract_meta_data_returns_expected_dicts_dict_when_len_configs_greater_than_one_and_len_test_assigns_greater_than_one( + mocker, +): # Arrange arg_meta_data_file = MagicMock() - len_configs = pytest.gen.randint(2, 10) # arbitrary, from 2 to 10 (0 and 1 have own tests) + len_configs = pytest.gen.randint( + 2, 10 + ) # arbitrary, from 2 to 10 (0 and 1 have own tests) fake_subsystem_assignments = [MagicMock()] * len_configs - fake_tests= [] + fake_tests = [] fake_descs = [MagicMock()] * len_configs for i in range(len_configs): - len_test_assigns = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10 + len_test_assigns = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10 fake_test_assigns = [[MagicMock(), MagicMock()]] * len_test_assigns fake_tests.append(fake_test_assigns) - forced_return_parse_tlm = {'subsystem_assignments' : fake_subsystem_assignments, - 'test_assignments' : fake_tests, - 'description_assignments' : fake_descs} + forced_return_parse_tlm = { + "subsystem_assignments": fake_subsystem_assignments, + "test_assignments": fake_tests, + "description_assignments": fake_descs, + } forced_return_str2lst = [MagicMock()] - mocker.patch(parser_util.__name__ + '.parseTlmConfJson', return_value=forced_return_parse_tlm) - mocker.patch(parser_util.__name__ + '.str2lst', return_value=forced_return_str2lst) + mocker.patch( + parser_util.__name__ + ".parseTlmConfJson", return_value=forced_return_parse_tlm + ) + mocker.patch(parser_util.__name__ + ".str2lst", return_value=forced_return_str2lst) - expected_ss_assigns = [[fake_ss_assign] for fake_ss_assign in fake_subsystem_assignments] + expected_ss_assigns = [ + [fake_ss_assign] for fake_ss_assign in fake_subsystem_assignments + ] expected_result = {} - expected_result['subsystem_assignments'] = expected_ss_assigns - expected_result['test_assignments'] = [] - expected_result['description_assignments'] = fake_descs.copy() + expected_result["subsystem_assignments"] = expected_ss_assigns + expected_result["test_assignments"] = [] + expected_result["description_assignments"] = fake_descs.copy() expected_str2lst_args = [] expected_str2lst_call_count = 0 @@ -262,24 +343,29 @@ def test_parser_util_extract_meta_data_returns_expected_dicts_dict_when_len_conf expected_test_assign.append([test_assigns[j][0]] + forced_return_str2lst) expected_str2lst_args.append(test_assigns[j][1]) expected_str2lst_call_count += 1 - expected_result['test_assignments'].append(expected_test_assign) + expected_result["test_assignments"].append(expected_test_assign) # Act result = parser_util.extract_meta_data(arg_meta_data_file) # Assert assert parser_util.parseTlmConfJson.call_count == 1 - assert parser_util.parseTlmConfJson.call_args_list[0].args == (arg_meta_data_file, ) + assert parser_util.parseTlmConfJson.call_args_list[0].args == (arg_meta_data_file,) assert parser_util.str2lst.call_count == expected_str2lst_call_count for i in range(expected_str2lst_call_count): - assert parser_util.str2lst.call_args_list[i].args == (expected_str2lst_args[i], ) + assert parser_util.str2lst.call_args_list[i].args == (expected_str2lst_args[i],) assert result == expected_result -def test_parser_util_extract_meta_data_returns_expected_dicts_dict_when_configFiles_len_configs_greater_than_one_and_subsystem_NONE_exists(mocker): + +def test_parser_util_extract_meta_data_returns_expected_dicts_dict_when_configFiles_len_configs_greater_than_one_and_subsystem_NONE_exists( + mocker, +): # Arrange arg_meta_data_file = MagicMock() - len_configs = pytest.gen.randint(2, 10) # arbitrary, from 2 to 10 (0 and 1 have own tests) + len_configs = pytest.gen.randint( + 2, 10 + ) # arbitrary, from 2 to 10 (0 and 1 have own tests) fake_subsystem_assignments = [] fake_tests = [] fake_tests_copy = [] @@ -292,41 +378,50 @@ def test_parser_util_extract_meta_data_returns_expected_dicts_dict_when_configFi fake_subsystem_assignments.append(fake_ss_assign) expected_subsystem_assignments.append([fake_ss_assign]) else: - fake_subsystem_assignments.append('NONE') + fake_subsystem_assignments.append("NONE") expected_subsystem_assignments.append([]) fake_test_assign = MagicMock() fake_tests.append([[fake_test_assign]]) fake_tests_copy.append([[fake_test_assign]]) fake_descs.append(MagicMock()) - rand_index = pytest.gen.randint(0, len_configs-1) # arbitrary index in fake_subsystem_assignments - fake_subsystem_assignments[rand_index] = 'NONE' + rand_index = pytest.gen.randint( + 0, len_configs - 1 + ) # arbitrary index in fake_subsystem_assignments + fake_subsystem_assignments[rand_index] = "NONE" expected_subsystem_assignments[rand_index] = [] - forced_return_parse_tlm = {'subsystem_assignments' : fake_subsystem_assignments, - 'test_assignments' : fake_tests, - 'description_assignments' : fake_descs} + forced_return_parse_tlm = { + "subsystem_assignments": fake_subsystem_assignments, + "test_assignments": fake_tests, + "description_assignments": fake_descs, + } - mocker.patch(parser_util.__name__ + '.parseTlmConfJson', return_value=forced_return_parse_tlm) - mocker.patch(parser_util.__name__ + '.str2lst') + mocker.patch( + parser_util.__name__ + ".parseTlmConfJson", return_value=forced_return_parse_tlm + ) + mocker.patch(parser_util.__name__ + ".str2lst") expected_result = {} - expected_result['subsystem_assignments'] = expected_subsystem_assignments - expected_result['test_assignments'] = fake_tests_copy - expected_result['description_assignments'] = fake_descs.copy() + expected_result["subsystem_assignments"] = expected_subsystem_assignments + expected_result["test_assignments"] = fake_tests_copy + expected_result["description_assignments"] = fake_descs.copy() # Act result = parser_util.extract_meta_data(arg_meta_data_file) # Assert assert parser_util.parseTlmConfJson.call_count == 1 - assert parser_util.parseTlmConfJson.call_args_list[0].args == (arg_meta_data_file, ) + assert parser_util.parseTlmConfJson.call_args_list[0].args == (arg_meta_data_file,) assert result == expected_result + # floatify_input tests -def test_parser_util_flotify_input_returns_empty_list_when_given__input_is_vacant(mocker): +def test_parser_util_flotify_input_returns_empty_list_when_given__input_is_vacant( + mocker, +): # Arrange - arg__input = [] # empty list, no iterations + arg__input = [] # empty list, no iterations arg_remove_str = False # Act @@ -335,15 +430,18 @@ def test_parser_util_flotify_input_returns_empty_list_when_given__input_is_vacan # Assert assert result == [] -def test_parser_util_flotify_input_raises_exception_when_float_returns_non_ValueError_exception(mocker): + +def test_parser_util_flotify_input_raises_exception_when_float_returns_non_ValueError_exception( + mocker, +): # Arrange - arg__input = [str(MagicMock())] # list of single str list, 1 iteration + arg__input = [str(MagicMock())] # list of single str list, 1 iteration arg_remove_str = False exception_message = str(MagicMock()) fake_exception = Exception(exception_message) - mocker.patch('builtins.float', side_effect=[fake_exception]) + mocker.patch("builtins.float", side_effect=[fake_exception]) # Act with pytest.raises(Exception) as e_info: @@ -352,125 +450,156 @@ def test_parser_util_flotify_input_raises_exception_when_float_returns_non_Value # Assert assert e_info.match(exception_message) -def test_parser_util_flotify_input_returns_list_of_size_one_that_contains_the_call_to_float_when_no_Exception_is_thrown_and_given__input_is_str(mocker): + +def test_parser_util_flotify_input_returns_list_of_size_one_that_contains_the_call_to_float_when_no_Exception_is_thrown_and_given__input_is_str( + mocker, +): # Arrange arg__input = [] arg_remove_str = False fake_item = str(MagicMock()) - arg__input.append(fake_item) # list of single str, one iteration + arg__input.append(fake_item) # list of single str, one iteration expected_result = MagicMock() - mocker.patch('builtins.float', return_value=expected_result) + mocker.patch("builtins.float", return_value=expected_result) # Act result = parser_util.floatify_input(arg__input, arg_remove_str) # Assert assert float.call_count == 1 - assert float.call_args_list[0].args == (arg__input[0], ) + assert float.call_args_list[0].args == (arg__input[0],) assert result == [expected_result] -def test_parser_util_flotify_input_returns_list_of_size_one_that_contains_the_second_call_to_float_after_replace_call_when_single_Exception_is_thrown(mocker): + +def test_parser_util_flotify_input_returns_list_of_size_one_that_contains_the_second_call_to_float_after_replace_call_when_single_Exception_is_thrown( + mocker, +): # Arrange arg__input = [] arg_remove_str = False fake_item = MagicMock() - arg__input.append(fake_item) # list of one item, one iteration + arg__input.append(fake_item) # list of one item, one iteration expected_result = MagicMock() - mocker.patch(parser_util.__name__ + '.float', side_effect=[ValueError]) - mocker.patch(parser_util.__name__ + '.convert_str_to_timestamp', return_value=expected_result) + mocker.patch(parser_util.__name__ + ".float", side_effect=[ValueError]) + mocker.patch( + parser_util.__name__ + ".convert_str_to_timestamp", return_value=expected_result + ) # Act result = parser_util.floatify_input(arg__input, arg_remove_str) # Assert assert parser_util.float.call_count == 1 - assert parser_util.float.call_args_list[0].args == (arg__input[0], ) + assert parser_util.float.call_args_list[0].args == (arg__input[0],) assert parser_util.convert_str_to_timestamp.call_count == 1 - assert parser_util.convert_str_to_timestamp.call_args_list[0].args == (fake_item, ) + assert parser_util.convert_str_to_timestamp.call_args_list[0].args == (fake_item,) assert result == [expected_result] -def test_parser_util_flotify_input_returns_list_of_size_one_that_contains_0_dot_0_when_two_Exceptions_are_thrown_and_remove_str_is_False(mocker): + +def test_parser_util_flotify_input_returns_list_of_size_one_that_contains_0_dot_0_when_two_Exceptions_are_thrown_and_remove_str_is_False( + mocker, +): # Arrange arg__input = [] arg_remove_str = False fake_item = MagicMock() - arg__input.append(fake_item) # list of one item, one iteration + arg__input.append(fake_item) # list of one item, one iteration - mocker.patch(parser_util.__name__ + '.float', side_effect=[ValueError]) - mocker.patch(parser_util.__name__ + '.convert_str_to_timestamp', side_effect=[Exception]) + mocker.patch(parser_util.__name__ + ".float", side_effect=[ValueError]) + mocker.patch( + parser_util.__name__ + ".convert_str_to_timestamp", side_effect=[Exception] + ) # Act result = parser_util.floatify_input(arg__input, arg_remove_str) # Assert assert parser_util.float.call_count == 1 - assert parser_util.float.call_args_list[0].args == (arg__input[0], ) + assert parser_util.float.call_args_list[0].args == (arg__input[0],) assert parser_util.convert_str_to_timestamp.call_count == 1 - assert parser_util.convert_str_to_timestamp.call_args_list[0].args == (arg__input[0], ) + assert parser_util.convert_str_to_timestamp.call_args_list[0].args == ( + arg__input[0], + ) assert result == [0.0] + def test_parser_util_flotify_input_default_arg_remove_str_is_False(mocker): # Arrange arg__input = [] fake_item = MagicMock() - arg__input.append(fake_item) # list of one item, one iteration + arg__input.append(fake_item) # list of one item, one iteration - mocker.patch(parser_util.__name__ + '.float', side_effect=[ValueError]) - mocker.patch(parser_util.__name__ + '.convert_str_to_timestamp', side_effect=[Exception]) + mocker.patch(parser_util.__name__ + ".float", side_effect=[ValueError]) + mocker.patch( + parser_util.__name__ + ".convert_str_to_timestamp", side_effect=[Exception] + ) # Act result = parser_util.floatify_input(arg__input) # Assert - assert result == [0.0] # shows flow was correct for remove_str being False + assert result == [0.0] # shows flow was correct for remove_str being False + -def test_parser_util_flotify_input_returns_empty_list_when_two_Exceptions_are_thrown_and_remove_str_is_True(mocker): +def test_parser_util_flotify_input_returns_empty_list_when_two_Exceptions_are_thrown_and_remove_str_is_True( + mocker, +): # Arrange arg__input = [] arg_remove_str = True fake_item = MagicMock() - arg__input.append(fake_item) # list of one item, one iteration + arg__input.append(fake_item) # list of one item, one iteration - mocker.patch(parser_util.__name__ + '.float', side_effect=[ValueError]) - mocker.patch(parser_util.__name__ + '.convert_str_to_timestamp', side_effect=[Exception]) + mocker.patch(parser_util.__name__ + ".float", side_effect=[ValueError]) + mocker.patch( + parser_util.__name__ + ".convert_str_to_timestamp", side_effect=[Exception] + ) # Act result = parser_util.floatify_input(arg__input, arg_remove_str) # Assert assert parser_util.float.call_count == 1 - assert parser_util.float.call_args_list[0].args == (arg__input[0], ) + assert parser_util.float.call_args_list[0].args == (arg__input[0],) assert parser_util.convert_str_to_timestamp.call_count == 1 - assert parser_util.convert_str_to_timestamp.call_args_list[0].args == (fake_item, ) + assert parser_util.convert_str_to_timestamp.call_args_list[0].args == (fake_item,) assert result == [] -def test_parser_util_flotify_input_returns_call_to_float_that_was_given___input_item_when_type_of_item_is_not_str_and_there_is_single_item(mocker): + +def test_parser_util_flotify_input_returns_call_to_float_that_was_given___input_item_when_type_of_item_is_not_str_and_there_is_single_item( + mocker, +): # Arrange arg__input = [] fake_item = MagicMock() - arg__input.append(fake_item) # list of one item, one iteration + arg__input.append(fake_item) # list of one item, one iteration expected_result = MagicMock() - mocker.patch(parser_util.__name__ + '.float', return_value=expected_result) + mocker.patch(parser_util.__name__ + ".float", return_value=expected_result) # Act result = parser_util.floatify_input(arg__input) # Assert - assert result == [expected_result] # shows flow was correct for remove_str being False + assert result == [ + expected_result + ] # shows flow was correct for remove_str being False -def test_parser_util_flotify_input_returns_expected_values_for_given__input_that_is_multi_typed_when_remove_str_is_True(mocker): + +def test_parser_util_flotify_input_returns_expected_values_for_given__input_that_is_multi_typed_when_remove_str_is_True( + mocker, +): # Arrange arg__input = [] arg_remove_str = True @@ -479,24 +608,26 @@ def test_parser_util_flotify_input_returns_expected_values_for_given__input_that side_effects_for_convert_str = [] expected_result = [] - num_fakes = pytest.gen.randint(0, 10) # arbitrary, from 0 to 10 + num_fakes = pytest.gen.randint(0, 10) # arbitrary, from 0 to 10 for i in range(num_fakes): - rand_type_of_item = pytest.gen.sample(['str', 'str_need_replace', 'str_fail_replace', 'other'], 1)[0] + rand_type_of_item = pytest.gen.sample( + ["str", "str_need_replace", "str_fail_replace", "other"], 1 + )[0] - if rand_type_of_item == 'str': + if rand_type_of_item == "str": arg__input.append(MagicMock()) resultant_float = MagicMock() side_effects_for_float.append(resultant_float) expected_result.append(resultant_float) - elif rand_type_of_item == 'str_need_replace': + elif rand_type_of_item == "str_need_replace": fake_input = MagicMock() arg__input.append(fake_input) resultant_float = MagicMock() side_effects_for_float.append(ValueError) side_effects_for_convert_str.append(resultant_float) expected_result.append(resultant_float) - elif rand_type_of_item == 'str_fail_replace': + elif rand_type_of_item == "str_fail_replace": fake_input = MagicMock() arg__input.append(fake_input) resultant_float = MagicMock() @@ -508,8 +639,11 @@ def test_parser_util_flotify_input_returns_expected_values_for_given__input_that side_effects_for_float.append(resultant_float) expected_result.append(resultant_float) - mocker.patch(parser_util.__name__ + '.float', side_effect=side_effects_for_float) - mocker.patch(parser_util.__name__ + '.convert_str_to_timestamp', side_effect=side_effects_for_convert_str) + mocker.patch(parser_util.__name__ + ".float", side_effect=side_effects_for_float) + mocker.patch( + parser_util.__name__ + ".convert_str_to_timestamp", + side_effect=side_effects_for_convert_str, + ) # Act result = parser_util.floatify_input(arg__input, arg_remove_str) @@ -517,7 +651,10 @@ def test_parser_util_flotify_input_returns_expected_values_for_given__input_that # Assert assert result == expected_result -def test_parser_util_flotify_input_returns_expected_values_for_given__input_that_is_multi_typed_when_remove_str_is_False(mocker): + +def test_parser_util_flotify_input_returns_expected_values_for_given__input_that_is_multi_typed_when_remove_str_is_False( + mocker, +): # Arrange arg__input = [] arg_remove_str = False @@ -526,38 +663,43 @@ def test_parser_util_flotify_input_returns_expected_values_for_given__input_that side_effects_for_convert_str = [] expected_result = [] - num_fakes = pytest.gen.randint(0, 10) # arbitrary, from 0 to 10 + num_fakes = pytest.gen.randint(0, 10) # arbitrary, from 0 to 10 for i in range(num_fakes): - rand_type_of_item = pytest.gen.sample(['str', 'str_need_replace', 'str_fail_replace', 'other'], 1)[0] + rand_type_of_item = pytest.gen.sample( + ["str", "str_need_replace", "str_fail_replace", "other"], 1 + )[0] - if rand_type_of_item == 'str': + if rand_type_of_item == "str": arg__input.append(MagicMock()) resultant_float = MagicMock() side_effects_for_float.append(resultant_float) expected_result.append(resultant_float) - elif rand_type_of_item == 'str_need_replace': + elif rand_type_of_item == "str_need_replace": fake_input = MagicMock() arg__input.append(fake_input) resultant_float = MagicMock() side_effects_for_float.append(ValueError) side_effects_for_convert_str.append(resultant_float) expected_result.append(resultant_float) - elif rand_type_of_item == 'str_fail_replace': + elif rand_type_of_item == "str_fail_replace": fake_input = MagicMock() arg__input.append(fake_input) resultant_float = MagicMock() side_effects_for_float.append(ValueError) side_effects_for_convert_str.append(Exception) expected_result.append(0.0) - else: # other + else: # other arg__input.append(MagicMock()) resultant_float = MagicMock() side_effects_for_float.append(resultant_float) expected_result.append(resultant_float) - mocker.patch(parser_util.__name__ + '.float', side_effect=side_effects_for_float) - mocker.patch(parser_util.__name__ + '.convert_str_to_timestamp', side_effect=side_effects_for_convert_str) + mocker.patch(parser_util.__name__ + ".float", side_effect=side_effects_for_float) + mocker.patch( + parser_util.__name__ + ".convert_str_to_timestamp", + side_effect=side_effects_for_convert_str, + ) # Act result = parser_util.floatify_input(arg__input, arg_remove_str) @@ -565,8 +707,11 @@ def test_parser_util_flotify_input_returns_expected_values_for_given__input_that # Assert assert result == expected_result + # convert_str_to_timestamp -def test_parser_util_convert_str_to_timestamp_returns_datetime_strptime_timestamp_on_success(mocker): +def test_parser_util_convert_str_to_timestamp_returns_datetime_strptime_timestamp_on_success( + mocker, +): # Arrange arg_time_str = str(MagicMock()) @@ -575,28 +720,35 @@ def test_parser_util_convert_str_to_timestamp_returns_datetime_strptime_timestam fake_dt_module = MagicMock() fake_dt_dt = MagicMock() - mocker.patch(parser_util.__name__ + '.datetime', fake_dt_module) - mocker.patch.object(fake_dt_module, 'datetime', fake_dt_dt) - mocker.patch.object(fake_dt_dt, 'strptime', return_value=fake_datetime) - mocker.patch.object(fake_datetime, 'timestamp', return_value=fake_timestamp) + mocker.patch(parser_util.__name__ + ".datetime", fake_dt_module) + mocker.patch.object(fake_dt_module, "datetime", fake_dt_dt) + mocker.patch.object(fake_dt_dt, "strptime", return_value=fake_datetime) + mocker.patch.object(fake_datetime, "timestamp", return_value=fake_timestamp) # Act result = parser_util.convert_str_to_timestamp(arg_time_str) # Assert assert fake_dt_module.datetime.strptime.call_count == 1 - assert fake_dt_module.datetime.strptime.call_args_list[0].args == (arg_time_str, '%Y-%j-%H:%M:%S.%f') + assert fake_dt_module.datetime.strptime.call_args_list[0].args == ( + arg_time_str, + "%Y-%j-%H:%M:%S.%f", + ) assert fake_datetime.timestamp.call_count == 1 assert result == fake_timestamp -def test_parser_util_convert_str_to_timestamp_returns_datetime_timestamp_when_strptime_raises_error(mocker): + +def test_parser_util_convert_str_to_timestamp_returns_datetime_timestamp_when_strptime_raises_error( + mocker, +): # Arrange - arg_time_str = '59:20' + arg_time_str = "59:20" fake_timestamp = MagicMock() fake_dt_module = MagicMock() - class Fake_Datetime(): + class Fake_Datetime: timestamp_call_count = 0 + def __init__(self, year, month, day, hour, minute, second, subsecond): assert year == 2000 assert month == 1 @@ -613,8 +765,8 @@ def timestamp(self): Fake_Datetime.timestamp_call_count = self.timestamp_call_count + 1 return fake_timestamp - mocker.patch(parser_util.__name__ + '.datetime', fake_dt_module) - mocker.patch.object(fake_dt_module, 'datetime', Fake_Datetime) + mocker.patch(parser_util.__name__ + ".datetime", fake_dt_module) + mocker.patch.object(fake_dt_module, "datetime", Fake_Datetime) # Act result = parser_util.convert_str_to_timestamp(arg_time_str) @@ -623,7 +775,10 @@ def timestamp(self): assert Fake_Datetime.timestamp_call_count == 1 assert result == fake_timestamp -def test_parser_util_convert_str_to_timestamp_raises_error_when_both_strptime_and_datetime_raise_errors(mocker): + +def test_parser_util_convert_str_to_timestamp_raises_error_when_both_strptime_and_datetime_raise_errors( + mocker, +): # Arrange arg_time_str = str(MagicMock()) @@ -632,7 +787,7 @@ def test_parser_util_convert_str_to_timestamp_raises_error_when_both_strptime_an fake_dt_module = MagicMock() fake_dt_dt = MagicMock() - class Fake_Datetime(): + class Fake_Datetime: def __init__(self, year, month, day, hour, minute, second, subsecond): raise Exception @@ -642,12 +797,12 @@ def strptime(arg1, arg2): def timestamp(): assert False - mocker.patch(parser_util.__name__ + '.datetime', fake_dt_module) - mocker.patch.object(fake_dt_module, 'datetime', Fake_Datetime) + mocker.patch(parser_util.__name__ + ".datetime", fake_dt_module) + mocker.patch.object(fake_dt_module, "datetime", Fake_Datetime) # Act with pytest.raises(Exception) as e_info: parser_util.convert_str_to_timestamp(arg_time_str) # Assert - assert e_info.match('') + assert e_info.match("") diff --git a/test/onair/data_handling/test_redis_adapter.py b/test/onair/data_handling/test_redis_adapter.py index 777dfbb3..c83bb1a8 100644 --- a/test/onair/data_handling/test_redis_adapter.py +++ b/test/onair/data_handling/test_redis_adapter.py @@ -17,10 +17,13 @@ import redis import threading + # __init__ tests -def test_redis_adapter_DataSource__init__sets_redis_values_then_connects_and_subscribes_to_subscriptions(mocker): +def test_redis_adapter_DataSource__init__sets_redis_values_then_connects_and_subscribes_to_subscriptions( + mocker, +): # Arrange - expected_address = 'localhost' + expected_address = "localhost" expected_port = 6379 expected_db = 0 expected_server = None @@ -35,39 +38,47 @@ def test_redis_adapter_DataSource__init__sets_redis_values_then_connects_and_sub cut = DataSource.__new__(DataSource) cut.subscriptions = expected_subscriptions fake_order = MagicMock() - fake_order.__len__.return_value = \ - pytest.gen.randint(1, 10) # from 1 to 10 arbitrary + fake_order.__len__.return_value = pytest.gen.randint( + 1, 10 + ) # from 1 to 10 arbitrary cut.order = fake_order - mocker.patch.object(OnAirDataSource, '__init__', new=MagicMock()) - mocker.patch('threading.Lock', return_value=fake_new_data_lock) - mocker.patch.object(cut, 'connect') - mocker.patch.object(cut, 'subscribe') + mocker.patch.object(OnAirDataSource, "__init__", new=MagicMock()) + mocker.patch("threading.Lock", return_value=fake_new_data_lock) + mocker.patch.object(cut, "connect") + mocker.patch.object(cut, "subscribe") # Act cut.__init__(arg_data_file, arg_meta_file, arg_ss_breakdown) # Assert assert OnAirDataSource.__init__.call_count == 1 - assert OnAirDataSource.__init__.call_args_list[0].args == (arg_data_file, arg_meta_file, arg_ss_breakdown) + assert OnAirDataSource.__init__.call_args_list[0].args == ( + arg_data_file, + arg_meta_file, + arg_ss_breakdown, + ) assert cut.address == expected_address assert cut.port == expected_port assert cut.db == expected_db assert cut.server == expected_server assert cut.new_data_lock == fake_new_data_lock assert cut.new_data == False - assert cut.currentData == [{'headers':fake_order, - 'data':list('-' * len(fake_order))}, - {'headers':fake_order, - 'data':list('-' * len(fake_order))}] + assert cut.currentData == [ + {"headers": fake_order, "data": list("-" * len(fake_order))}, + {"headers": fake_order, "data": list("-" * len(fake_order))}, + ] assert cut.double_buffer_read_index == 0 assert cut.connect.call_count == 1 assert cut.connect.call_args_list[0].args == () assert cut.subscribe.call_count == 1 - assert cut.subscribe.call_args_list[0].args == (expected_subscriptions, ) + assert cut.subscribe.call_args_list[0].args == (expected_subscriptions,) + # connect tests -def test_redis_adapter_DataSource_connect_establishes_server_with_initialized_attributes(mocker): +def test_redis_adapter_DataSource_connect_establishes_server_with_initialized_attributes( + mocker, +): # Arrange expected_address = MagicMock() expected_port = MagicMock() @@ -79,21 +90,28 @@ def test_redis_adapter_DataSource_connect_establishes_server_with_initialized_at cut.port = expected_port cut.db = expected_db - mocker.patch(redis_adapter.__name__ + '.print_msg') - mocker.patch('redis.Redis', return_value=fake_server) + mocker.patch(redis_adapter.__name__ + ".print_msg") + mocker.patch("redis.Redis", return_value=fake_server) # Act cut.connect() # Assert assert redis_adapter.print_msg.call_count == 2 - assert redis_adapter.print_msg.call_args_list[0].args == ('Redis adapter connecting to server...',) + assert redis_adapter.print_msg.call_args_list[0].args == ( + "Redis adapter connecting to server...", + ) assert redis.Redis.call_count == 1 - assert redis.Redis.call_args_list[0].args == (expected_address, expected_port, expected_db) + assert redis.Redis.call_args_list[0].args == ( + expected_address, + expected_port, + expected_db, + ) assert fake_server.ping.call_count == 1 - assert redis_adapter.print_msg.call_args_list[1].args == ('... connected!',) + assert redis_adapter.print_msg.call_args_list[1].args == ("... connected!",) assert cut.server == fake_server + def test_redis_adapter_DataSource_fails_to_connect_to_server(mocker): # Arrange expected_address = MagicMock() @@ -106,25 +124,34 @@ def test_redis_adapter_DataSource_fails_to_connect_to_server(mocker): cut.port = expected_port cut.db = expected_db - mocker.patch(redis_adapter.__name__ + '.print_msg') - mocker.patch('redis.Redis', return_value=fake_server) - mocker.patch.object(fake_server, 'ping', return_value=False) + mocker.patch(redis_adapter.__name__ + ".print_msg") + mocker.patch("redis.Redis", return_value=fake_server) + mocker.patch.object(fake_server, "ping", return_value=False) # Act cut.connect() # Assert assert redis_adapter.print_msg.call_count == 1 - assert redis_adapter.print_msg.call_args_list[0].args == ("Redis adapter connecting to server...",) + assert redis_adapter.print_msg.call_args_list[0].args == ( + "Redis adapter connecting to server...", + ) assert redis.Redis.call_count == 1 - assert redis.Redis.call_args_list[0].args == (expected_address, expected_port, expected_db) + assert redis.Redis.call_args_list[0].args == ( + expected_address, + expected_port, + expected_db, + ) assert fake_server.ping.call_count == 1 assert cut.server == fake_server + # subscribe_message tests -def test_redis_adapter_DataSource_subscribe_subscribes_to_each_given_subscription_and_starts_listening_when_server_available(mocker): +def test_redis_adapter_DataSource_subscribe_subscribes_to_each_given_subscription_and_starts_listening_when_server_available( + mocker, +): # Arrange - arg_subscriptions = [MagicMock()] * pytest.gen.randint(1, 10) # 1 to 10 arbitrary + arg_subscriptions = [MagicMock()] * pytest.gen.randint(1, 10) # 1 to 10 arbitrary fake_server = MagicMock() fake_pubsub = MagicMock() @@ -133,12 +160,12 @@ def test_redis_adapter_DataSource_subscribe_subscribes_to_each_given_subscriptio cut = DataSource.__new__(DataSource) cut.server = fake_server - mocker.patch.object(fake_server, 'ping', return_value=True) - mocker.patch.object(fake_server, 'pubsub', return_value=fake_pubsub) - mocker.patch.object(fake_pubsub, 'subscribe') - mocker.patch(redis_adapter.__name__ + '.print_msg') - mocker.patch('threading.Thread', return_value=fake_thread) - mocker.patch.object(fake_thread, 'start') + mocker.patch.object(fake_server, "ping", return_value=True) + mocker.patch.object(fake_server, "pubsub", return_value=fake_pubsub) + mocker.patch.object(fake_pubsub, "subscribe") + mocker.patch(redis_adapter.__name__ + ".print_msg") + mocker.patch("threading.Thread", return_value=fake_thread) + mocker.patch.object(fake_thread, "start") # Act cut.subscribe(arg_subscriptions) @@ -149,13 +176,20 @@ def test_redis_adapter_DataSource_subscribe_subscribes_to_each_given_subscriptio assert fake_pubsub.subscribe.call_count == len(arg_subscriptions) for i in range(len(arg_subscriptions)): assert fake_pubsub.subscribe.call_args_list[i].args == (arg_subscriptions[i],) - assert redis_adapter.print_msg.call_args_list[i].args == (f"Subscribing to channel: {arg_subscriptions[i]}",) + assert redis_adapter.print_msg.call_args_list[i].args == ( + f"Subscribing to channel: {arg_subscriptions[i]}", + ) assert threading.Thread.call_count == 1 - assert threading.Thread.call_args_list[0].kwargs == ({'target': cut.message_listener}) + assert threading.Thread.call_args_list[0].kwargs == ( + {"target": cut.message_listener} + ) assert fake_thread.start.call_count == 1 assert cut.pubsub == fake_pubsub -def test_redis_adapter_DataSource_subscribe_states_no_subscriptions_given_when_empty(mocker): + +def test_redis_adapter_DataSource_subscribe_states_no_subscriptions_given_when_empty( + mocker, +): # Arrange arg_subscriptions = [] fake_server = MagicMock() @@ -166,11 +200,11 @@ def test_redis_adapter_DataSource_subscribe_states_no_subscriptions_given_when_e cut.server = fake_server cut.pubsub = initial_pubsub - mocker.patch.object(fake_server, 'ping', return_value=False) - mocker.patch(redis_adapter.__name__ + '.print_msg') - mocker.patch.object(fake_server, 'pubsub') - mocker.patch('threading.Thread') - mocker.patch.object(fake_thread, 'start') + mocker.patch.object(fake_server, "ping", return_value=False) + mocker.patch(redis_adapter.__name__ + ".print_msg") + mocker.patch.object(fake_server, "pubsub") + mocker.patch("threading.Thread") + mocker.patch.object(fake_thread, "start") # Act cut.subscribe(arg_subscriptions) @@ -181,11 +215,16 @@ def test_redis_adapter_DataSource_subscribe_states_no_subscriptions_given_when_e assert threading.Thread.call_count == 0 assert fake_thread.start.call_count == 0 assert cut.pubsub == initial_pubsub - assert redis_adapter.print_msg.call_args_list[0].args == ("No subscriptions given!",) + assert redis_adapter.print_msg.call_args_list[0].args == ( + "No subscriptions given!", + ) + # Note the self.server.ping during runtime will error, not actually return False, but that means code will never run # this unit test is for completeness of coverage -def test_redis_adapter_DataSource_subscribe_states_no_subscriptions_given_when_server_does_not_respond_to_ping(mocker): +def test_redis_adapter_DataSource_subscribe_states_no_subscriptions_given_when_server_does_not_respond_to_ping( + mocker, +): # Arrange arg_channel = [MagicMock()] fake_server = MagicMock() @@ -196,11 +235,11 @@ def test_redis_adapter_DataSource_subscribe_states_no_subscriptions_given_when_s cut.server = fake_server cut.pubsub = initial_pubsub - mocker.patch.object(fake_server, 'ping', return_value=False) - mocker.patch(redis_adapter.__name__ + '.print_msg') - mocker.patch.object(fake_server, 'pubsub') - mocker.patch('threading.Thread') - mocker.patch.object(fake_thread, 'start') + mocker.patch.object(fake_server, "ping", return_value=False) + mocker.patch(redis_adapter.__name__ + ".print_msg") + mocker.patch.object(fake_server, "pubsub") + mocker.patch("threading.Thread") + mocker.patch.object(fake_thread, "start") # Act cut.subscribe(arg_channel) @@ -211,7 +250,10 @@ def test_redis_adapter_DataSource_subscribe_states_no_subscriptions_given_when_s assert threading.Thread.call_count == 0 assert fake_thread.start.call_count == 0 assert cut.pubsub == initial_pubsub - assert redis_adapter.print_msg.call_args_list[0].args == ("No subscriptions given!",) + assert redis_adapter.print_msg.call_args_list[0].args == ( + "No subscriptions given!", + ) + # get_next tests def test_redis_adapter_DataSource_get_next_returns_expected_data_when_new_data_is_true_and_double_buffer_read_index_is_0(): @@ -224,8 +266,8 @@ def test_redis_adapter_DataSource_get_next_returns_expected_data_when_new_data_i pre_call_index = cut.double_buffer_read_index expected_result = MagicMock() cut.currentData = [] - cut.currentData.append({'data': MagicMock()}) - cut.currentData.append({'data': expected_result}) + cut.currentData.append({"data": MagicMock()}) + cut.currentData.append({"data": expected_result}) # Act result = cut.get_next() @@ -235,6 +277,7 @@ def test_redis_adapter_DataSource_get_next_returns_expected_data_when_new_data_i assert cut.double_buffer_read_index == 1 assert result == expected_result + def test_redis_adapter_DataSource_get_next_returns_expected_data_when_new_data_is_true_and_double_buffer_read_index_is_1(): # Arrange # Renew DataSource to ensure test independence @@ -245,8 +288,8 @@ def test_redis_adapter_DataSource_get_next_returns_expected_data_when_new_data_i pre_call_index = cut.double_buffer_read_index expected_result = MagicMock() cut.currentData = [] - cut.currentData.append({'data': expected_result}) - cut.currentData.append({'data': MagicMock()}) + cut.currentData.append({"data": expected_result}) + cut.currentData.append({"data": MagicMock()}) # Act result = cut.get_next() @@ -256,11 +299,12 @@ def test_redis_adapter_DataSource_get_next_returns_expected_data_when_new_data_i assert cut.double_buffer_read_index == 0 assert result == expected_result + def test_redis_adapter_DataSource_get_next_when_called_multiple_times_when_new_data_is_true(): # Arrange # Renew DataSource to ensure test independence cut = DataSource.__new__(DataSource) - cut.double_buffer_read_index = pytest.gen.randint(0,1) + cut.double_buffer_read_index = pytest.gen.randint(0, 1) cut.new_data_lock = MagicMock() cut.currentData = [MagicMock(), MagicMock()] pre_call_index = cut.double_buffer_read_index @@ -268,14 +312,14 @@ def test_redis_adapter_DataSource_get_next_when_called_multiple_times_when_new_d # Act results = [] - num_calls = pytest.gen.randint(2,10) # arbitrary, 2 to 10 + num_calls = pytest.gen.randint(2, 10) # arbitrary, 2 to 10 for i in range(num_calls): cut.new_data = True fake_new_data = MagicMock() if cut.double_buffer_read_index == 0: - cut.currentData[1] = {'data': fake_new_data} + cut.currentData[1] = {"data": fake_new_data} else: - cut.currentData[0] = {'data': fake_new_data} + cut.currentData[0] = {"data": fake_new_data} expected_data.append(fake_new_data) results.append(cut.get_next()) @@ -285,29 +329,30 @@ def test_redis_adapter_DataSource_get_next_when_called_multiple_times_when_new_d results[i] = expected_data[i] assert cut.double_buffer_read_index == (num_calls + pre_call_index) % 2 + def test_redis_adapter_DataSource_get_next_waits_until_data_is_available(mocker): # Arrange # Renew DataSource to ensure test independence cut = DataSource.__new__(DataSource) cut.new_data_lock = MagicMock() - cut.double_buffer_read_index = pytest.gen.randint(0,1) + cut.double_buffer_read_index = pytest.gen.randint(0, 1) pre_call_index = cut.double_buffer_read_index expected_result = MagicMock() cut.new_data = None cut.currentData = [] if pre_call_index == 0: - cut.currentData.append({'data': MagicMock()}) - cut.currentData.append({'data': expected_result}) + cut.currentData.append({"data": MagicMock()}) + cut.currentData.append({"data": expected_result}) else: - cut.currentData.append({'data': expected_result}) - cut.currentData.append({'data': MagicMock()}) + cut.currentData.append({"data": expected_result}) + cut.currentData.append({"data": MagicMock()}) num_falses = pytest.gen.randint(1, 10) side_effect_list = [False] * num_falses side_effect_list.append(True) - mocker.patch.object(cut, 'has_data', side_effect=side_effect_list) - mocker.patch(redis_adapter.__name__ + '.time.sleep') + mocker.patch.object(cut, "has_data", side_effect=side_effect_list) + mocker.patch(redis_adapter.__name__ + ".time.sleep") # Act result = cut.get_next() @@ -325,22 +370,26 @@ def test_redis_adapter_DataSource_get_next_waits_until_data_is_available(mocker) assert result == expected_result + # has_more tests def test_redis_adapter_DataSource_has_more_always_returns_True(): cut = DataSource.__new__(DataSource) assert cut.has_more() == True + # message_listener tests -def test_redis_adapter_DataSource_message_listener_warns_of_exit_and_does_not_run_for_loop_when_listen_returns_StopIteration(mocker): +def test_redis_adapter_DataSource_message_listener_warns_of_exit_and_does_not_run_for_loop_when_listen_returns_StopIteration( + mocker, +): # Arrange cut = DataSource.__new__(DataSource) cut.pubsub = MagicMock(name="cut.pubsub") - fake_listener = MagicMock(name='fake_listener') + fake_listener = MagicMock(name="fake_listener") fake_listener.__next__.side_effect = StopIteration - mocker.patch.object(cut.pubsub, 'listen', side_effect=[fake_listener]) - mocker.patch(redis_adapter.__name__ + '.json.loads') - mocker.patch(redis_adapter.__name__ + '.print_msg') + mocker.patch.object(cut.pubsub, "listen", side_effect=[fake_listener]) + mocker.patch(redis_adapter.__name__ + ".json.loads") + mocker.patch(redis_adapter.__name__ + ".print_msg") # Act cut.message_listener() @@ -348,20 +397,32 @@ def test_redis_adapter_DataSource_message_listener_warns_of_exit_and_does_not_ru # Assert assert redis_adapter.json.loads.call_count == 0 assert redis_adapter.print_msg.call_count == 1 - assert redis_adapter.print_msg.call_args_list[0].args == ("Redis subscription listener exited.", ['WARNING']) + assert redis_adapter.print_msg.call_args_list[0].args == ( + "Redis subscription listener exited.", + ["WARNING"], + ) -def test_redis_adapter_DataSource_message_listener_prints_warning_when_receiving_non_message_type(mocker): + +def test_redis_adapter_DataSource_message_listener_prints_warning_when_receiving_non_message_type( + mocker, +): # Arrange cut = DataSource.__new__(DataSource) cut.pubsub = MagicMock() - ignored_message_types = ['subscribe', 'unsubscribe', 'psubscribe', 'punsubscribe', 'pmessage'] + ignored_message_types = [ + "subscribe", + "unsubscribe", + "psubscribe", + "punsubscribe", + "pmessage", + ] fake_message = {} - fake_message['type'] = pytest.gen.choice(ignored_message_types) - fake_message['channel'] = str(MagicMock(name='fake_message')).encode('utf-8') - mocker.patch.object(cut.pubsub, 'listen', return_value=[fake_message]) - mocker.patch(redis_adapter.__name__ + '.json.loads') - mocker.patch(redis_adapter.__name__ + '.print_msg') + fake_message["type"] = pytest.gen.choice(ignored_message_types) + fake_message["channel"] = str(MagicMock(name="fake_message")).encode("utf-8") + mocker.patch.object(cut.pubsub, "listen", return_value=[fake_message]) + mocker.patch(redis_adapter.__name__ + ".json.loads") + mocker.patch(redis_adapter.__name__ + ".print_msg") # Act cut.message_listener() @@ -370,198 +431,251 @@ def test_redis_adapter_DataSource_message_listener_prints_warning_when_receiving assert redis_adapter.json.loads.call_count == 0 assert redis_adapter.print_msg.call_count == 2 assert redis_adapter.print_msg.call_args_list[0].args == ( - f"Redis adapter: channel '{fake_message['channel'].decode()}' received " \ - f"message type: {fake_message['type']}.", ['WARNING']) + f"Redis adapter: channel '{fake_message['channel'].decode()}' received " + f"message type: {fake_message['type']}.", + ["WARNING"], + ) assert redis_adapter.print_msg.call_args_list[1].args == ( - "Redis subscription listener exited.", ['WARNING']) + "Redis subscription listener exited.", + ["WARNING"], + ) + -def test_redis_adapter_DataSource_message_listener_prints_warning_when_data_not_json_format_and_does_not_update_frame(mocker): +def test_redis_adapter_DataSource_message_listener_prints_warning_when_data_not_json_format_and_does_not_update_frame( + mocker, +): # Arrange cut = DataSource.__new__(DataSource) cut.pubsub = MagicMock() fake_message = {} - fake_message['type'] = 'message' - fake_message['channel'] = str( - MagicMock(name='fake_message_channel')).encode('utf-8') - fake_message['data'] = str(MagicMock(name='fake_message_data')) - mocker.patch.object(cut.pubsub, 'listen', return_value=[fake_message]) - mocker.patch(redis_adapter.__name__ + '.json.loads', side_effect=ValueError) - mocker.patch(redis_adapter.__name__ + '.print_msg') + fake_message["type"] = "message" + fake_message["channel"] = str(MagicMock(name="fake_message_channel")).encode( + "utf-8" + ) + fake_message["data"] = str(MagicMock(name="fake_message_data")) + mocker.patch.object(cut.pubsub, "listen", return_value=[fake_message]) + mocker.patch(redis_adapter.__name__ + ".json.loads", side_effect=ValueError) + mocker.patch(redis_adapter.__name__ + ".print_msg") # Act cut.message_listener() # Assert assert redis_adapter.json.loads.call_count == 1 - assert redis_adapter.json.loads.call_args_list[0].args == ( - fake_message['data'], ) + assert redis_adapter.json.loads.call_args_list[0].args == (fake_message["data"],) assert redis_adapter.print_msg.call_count == 2 assert redis_adapter.print_msg.call_args_list[0].args == ( - f'Subscribed channel `{fake_message["channel"].decode()}\' message ' \ - 'received but is not in json format.\nMessage:\n' \ - f'{fake_message["data"]}', ['WARNING']) + f'Subscribed channel `{fake_message["channel"].decode()}\' message ' + "received but is not in json format.\nMessage:\n" + f'{fake_message["data"]}', + ["WARNING"], + ) assert redis_adapter.print_msg.call_args_list[1].args == ( - "Redis subscription listener exited.", ['WARNING']) + "Redis subscription listener exited.", + ["WARNING"], + ) + -def test_redis_adapter_DataSource_message_listener_warns_user_when_processed_data_did_not_contain_time(mocker): +def test_redis_adapter_DataSource_message_listener_warns_user_when_processed_data_did_not_contain_time( + mocker, +): # Arrange cut = DataSource.__new__(DataSource) - cut.double_buffer_read_index = pytest.gen.choice([0 , 1]) - cut.currentData = {0: {'headers': [], 'data': []}, - 1: {'headers': [], 'data': []}} + cut.double_buffer_read_index = pytest.gen.choice([0, 1]) + cut.currentData = {0: {"headers": [], "data": []}, 1: {"headers": [], "data": []}} cut.pubsub = MagicMock() cut.new_data_lock = MagicMock() cut.new_data = False fake_message = {} - fake_message['type'] = 'message' - fake_message['channel'] = str( - MagicMock(name='fake_message_channel')).encode('utf-8') - fake_message['data'] = '{}' # empty_message - mocker.patch.object(cut.pubsub, 'listen', return_value=[fake_message]) - mocker.patch(redis_adapter.__name__ + '.json.loads', return_value={}) - mocker.patch(redis_adapter.__name__ + '.print_msg') + fake_message["type"] = "message" + fake_message["channel"] = str(MagicMock(name="fake_message_channel")).encode( + "utf-8" + ) + fake_message["data"] = "{}" # empty_message + mocker.patch.object(cut.pubsub, "listen", return_value=[fake_message]) + mocker.patch(redis_adapter.__name__ + ".json.loads", return_value={}) + mocker.patch(redis_adapter.__name__ + ".print_msg") # Act cut.message_listener() # Assert assert redis_adapter.json.loads.call_count == 1 - assert redis_adapter.json.loads.call_args_list[0].args == ( - fake_message['data'], ) + assert redis_adapter.json.loads.call_args_list[0].args == (fake_message["data"],) assert redis_adapter.print_msg.call_count == 2 assert redis_adapter.print_msg.call_args_list[0].args == ( - f'Message from channel `{fake_message["channel"].decode()}\' ' \ - f'did not contain `time\' key\nMessage:\n{fake_message["data"]}', \ - ['WARNING']) + f'Message from channel `{fake_message["channel"].decode()}\' ' + f'did not contain `time\' key\nMessage:\n{fake_message["data"]}', + ["WARNING"], + ) assert redis_adapter.print_msg.call_args_list[1].args == ( - "Redis subscription listener exited.", ['WARNING']) + "Redis subscription listener exited.", + ["WARNING"], + ) -def test_redis_adapter_DataSource_message_listener_warns_of_received_key_that_does_not_exist_in_header(mocker): + +def test_redis_adapter_DataSource_message_listener_warns_of_received_key_that_does_not_exist_in_header( + mocker, +): # Arrange cut = DataSource.__new__(DataSource) - cut.double_buffer_read_index = pytest.gen.choice([0 , 1]) - cut.currentData = {0: {'headers': ['time'], - 'data': ['-']}, - 1: {'headers': ['time'], - 'data': ['-']}} + cut.double_buffer_read_index = pytest.gen.choice([0, 1]) + cut.currentData = { + 0: {"headers": ["time"], "data": ["-"]}, + 1: {"headers": ["time"], "data": ["-"]}, + } cut.pubsub = MagicMock() cut.new_data_lock = MagicMock() cut.new_data = False fake_message = {} - fake_message['type'] = 'message' - fake_message['channel'] = str( - MagicMock(name='fake_message_channel')).encode('utf-8') - fake_message['data'] = '{"time":0, "unknown_key":0}' - mocker.patch.object(cut.pubsub, 'listen', return_value=[fake_message]) - mocker.patch(redis_adapter.__name__ + '.json.loads', return_value={"time":0, "unknown_key":0}) - mocker.patch(redis_adapter.__name__ + '.print_msg') + fake_message["type"] = "message" + fake_message["channel"] = str(MagicMock(name="fake_message_channel")).encode( + "utf-8" + ) + fake_message["data"] = '{"time":0, "unknown_key":0}' + mocker.patch.object(cut.pubsub, "listen", return_value=[fake_message]) + mocker.patch( + redis_adapter.__name__ + ".json.loads", + return_value={"time": 0, "unknown_key": 0}, + ) + mocker.patch(redis_adapter.__name__ + ".print_msg") # Act cut.message_listener() # Assert assert redis_adapter.json.loads.call_count == 1 - assert redis_adapter.json.loads.call_args_list[0].args == ( - fake_message['data'], ) + assert redis_adapter.json.loads.call_args_list[0].args == (fake_message["data"],) assert redis_adapter.print_msg.call_count == 2 assert redis_adapter.print_msg.call_args_list[0].args == ( - f"Unused key `unknown_key' in message " \ - f'from channel `{fake_message["channel"].decode()}.\'', ['WARNING']) + f"Unused key `unknown_key' in message " + f'from channel `{fake_message["channel"].decode()}.\'', + ["WARNING"], + ) assert redis_adapter.print_msg.call_args_list[1].args == ( - "Redis subscription listener exited.", ['WARNING']) + "Redis subscription listener exited.", + ["WARNING"], + ) -def test_redis_adapter_DataSource_message_listener_warns_of_expected_keys_that_do_not_appear_in_message(mocker): + +def test_redis_adapter_DataSource_message_listener_warns_of_expected_keys_that_do_not_appear_in_message( + mocker, +): # Arrange cut = DataSource.__new__(DataSource) - cut.double_buffer_read_index = pytest.gen.choice([0 , 1]) + cut.double_buffer_read_index = pytest.gen.choice([0, 1]) cut.pubsub = MagicMock() cut.new_data_lock = MagicMock() cut.new_data = False fake_message = {} - fake_message['type'] = 'message' - fake_message['channel'] = str( - MagicMock(name='fake_message_channel')).encode('utf-8') - cut.currentData = {0: {'headers': ['time', - f'{fake_message["channel"].decode()}' \ - '.missing_key'], - 'data': ['-', '-']}, - 1: {'headers': ['time', - f'{fake_message["channel"].decode()}' \ - '.missing_key'], - 'data': ['-', '-']}} - fake_message['data'] = '{}' - mocker.patch.object(cut.pubsub, 'listen', return_value=[fake_message]) - mocker.patch(redis_adapter.__name__ + '.json.loads', return_value={}) - mocker.patch(redis_adapter.__name__ + '.print_msg') + fake_message["type"] = "message" + fake_message["channel"] = str(MagicMock(name="fake_message_channel")).encode( + "utf-8" + ) + cut.currentData = { + 0: { + "headers": ["time", f'{fake_message["channel"].decode()}' ".missing_key"], + "data": ["-", "-"], + }, + 1: { + "headers": ["time", f'{fake_message["channel"].decode()}' ".missing_key"], + "data": ["-", "-"], + }, + } + fake_message["data"] = "{}" + mocker.patch.object(cut.pubsub, "listen", return_value=[fake_message]) + mocker.patch(redis_adapter.__name__ + ".json.loads", return_value={}) + mocker.patch(redis_adapter.__name__ + ".print_msg") # Act cut.message_listener() # Assert assert redis_adapter.json.loads.call_count == 1 - assert redis_adapter.json.loads.call_args_list[0].args == ( - fake_message['data'], ) + assert redis_adapter.json.loads.call_args_list[0].args == (fake_message["data"],) assert redis_adapter.print_msg.call_count == 3 assert redis_adapter.print_msg.call_args_list[0].args == ( - f'Message from channel `{fake_message["channel"].decode()}\' ' \ - f'did not contain `{fake_message["channel"].decode()}.missing_key\'' \ - f' key\nMessage:\n{fake_message["data"]}', \ - ['WARNING']) + f'Message from channel `{fake_message["channel"].decode()}\' ' + f'did not contain `{fake_message["channel"].decode()}.missing_key\'' + f' key\nMessage:\n{fake_message["data"]}', + ["WARNING"], + ) assert redis_adapter.print_msg.call_args_list[1].args == ( - f'Message from channel `{fake_message["channel"].decode()}\' ' \ - f'did not contain `time\' key\nMessage:\n{fake_message["data"]}', \ - ['WARNING']) + f'Message from channel `{fake_message["channel"].decode()}\' ' + f'did not contain `time\' key\nMessage:\n{fake_message["data"]}', + ["WARNING"], + ) assert redis_adapter.print_msg.call_args_list[2].args == ( - "Redis subscription listener exited.", ['WARNING']) + "Redis subscription listener exited.", + ["WARNING"], + ) -def test_redis_adapter_DataSource_message_listener_updates_new_data_with_received_data_by_channel_and_key_matched_to_frame_header(mocker): + +def test_redis_adapter_DataSource_message_listener_updates_new_data_with_received_data_by_channel_and_key_matched_to_frame_header( + mocker, +): # Arrange cut = DataSource.__new__(DataSource) - cut.double_buffer_read_index = pytest.gen.choice([0 , 1]) + cut.double_buffer_read_index = pytest.gen.choice([0, 1]) cut.pubsub = MagicMock() cut.new_data_lock = MagicMock() cut.new_data = False fake_message = {} - fake_message['type'] = 'message' - fake_message['channel'] = str( - MagicMock(name='fake_message_channel')).encode('utf-8') - cut.currentData = {0: {'headers': ['time', - f'{fake_message["channel"].decode()}' \ - '.correct_key', 'fakeotherchannel.x'], - 'data': ['-', '-', '0']}, - 1: {'headers': ['time', - f'{fake_message["channel"].decode()}' \ - '.correct_key', 'fakeotherchannel.x'], - 'data': ['-', '-', '0']}} - fake_message['data'] = '{}' - mocker.patch.object(cut.pubsub, 'listen', return_value=[fake_message]) + fake_message["type"] = "message" + fake_message["channel"] = str(MagicMock(name="fake_message_channel")).encode( + "utf-8" + ) + cut.currentData = { + 0: { + "headers": [ + "time", + f'{fake_message["channel"].decode()}' ".correct_key", + "fakeotherchannel.x", + ], + "data": ["-", "-", "0"], + }, + 1: { + "headers": [ + "time", + f'{fake_message["channel"].decode()}' ".correct_key", + "fakeotherchannel.x", + ], + "data": ["-", "-", "0"], + }, + } + fake_message["data"] = "{}" + mocker.patch.object(cut.pubsub, "listen", return_value=[fake_message]) fake_data = { - 'time': pytest.gen.randint(1, 100), # from 1 to 100 arbitrary - 'correct_key': pytest.gen.randint(1, 100), # from 1 to 100 arbitrary + "time": pytest.gen.randint(1, 100), # from 1 to 100 arbitrary + "correct_key": pytest.gen.randint(1, 100), # from 1 to 100 arbitrary } - mocker.patch(redis_adapter.__name__ + '.json.loads', - return_value=fake_data) - mocker.patch(redis_adapter.__name__ + '.print_msg') + mocker.patch(redis_adapter.__name__ + ".json.loads", return_value=fake_data) + mocker.patch(redis_adapter.__name__ + ".print_msg") # Act cut.message_listener() # Assert assert redis_adapter.json.loads.call_count == 1 - assert redis_adapter.json.loads.call_args_list[0].args == ( - fake_message['data'], ) + assert redis_adapter.json.loads.call_args_list[0].args == (fake_message["data"],) assert cut.new_data == True print(cut.currentData[cut.double_buffer_read_index]) - assert cut.currentData[(cut.double_buffer_read_index + 1) % 2]['data'] == \ - [fake_data['time'], fake_data['correct_key'], '-'] + assert cut.currentData[(cut.double_buffer_read_index + 1) % 2]["data"] == [ + fake_data["time"], + fake_data["correct_key"], + "-", + ] assert redis_adapter.print_msg.call_count == 1 assert redis_adapter.print_msg.call_args_list[0].args == ( - "Redis subscription listener exited.", ['WARNING']) + "Redis subscription listener exited.", + ["WARNING"], + ) + # has_data tests def test_redis_adapter_DataSource_has_data_returns_instance_new_data(): @@ -573,84 +687,127 @@ def test_redis_adapter_DataSource_has_data_returns_instance_new_data(): assert result == expected_result + # redis_adapter parse_meta_data tests -def test_redis_adapter_DataSource_parse_meta_data_file_raises_ConfigKeyError_when_order_is_not_in_config_file(mocker): +def test_redis_adapter_DataSource_parse_meta_data_file_raises_ConfigKeyError_when_order_is_not_in_config_file( + mocker, +): # Arrange cut = DataSource.__new__(DataSource) arg_configFile = MagicMock() arg_ss_breakdown = MagicMock() expected_extracted_configs = MagicMock() - expected_subscriptions = [MagicMock()] * pytest.gen.randint(0, 10) # 0 to 10 arbitrary - fake_meta = {'fake_other_stuff': MagicMock(), - 'redis_subscriptions':expected_subscriptions} + expected_subscriptions = [MagicMock()] * pytest.gen.randint( + 0, 10 + ) # 0 to 10 arbitrary + fake_meta = { + "fake_other_stuff": MagicMock(), + "redis_subscriptions": expected_subscriptions, + } - mocker.patch(redis_adapter.__name__ + '.extract_meta_data_handle_ss_breakdown', return_value=expected_extracted_configs) - mocker.patch(redis_adapter.__name__ + '.parseJson', return_value=fake_meta) + mocker.patch( + redis_adapter.__name__ + ".extract_meta_data_handle_ss_breakdown", + return_value=expected_extracted_configs, + ) + mocker.patch(redis_adapter.__name__ + ".parseJson", return_value=fake_meta) - exception_message = (f'Config file: \'{arg_configFile}\' ' \ - 'missing required key \'order\'') + exception_message = ( + f"Config file: '{arg_configFile}' " "missing required key 'order'" + ) # Act with pytest.raises(ConfigKeyError) as e_info: - cut.parse_meta_data_file(arg_configFile, arg_ss_breakdown, ) + cut.parse_meta_data_file( + arg_configFile, + arg_ss_breakdown, + ) # Assert assert redis_adapter.extract_meta_data_handle_ss_breakdown.call_count == 1 - assert redis_adapter.extract_meta_data_handle_ss_breakdown.call_args_list[0].args == (arg_configFile, arg_ss_breakdown) + assert redis_adapter.extract_meta_data_handle_ss_breakdown.call_args_list[ + 0 + ].args == (arg_configFile, arg_ss_breakdown) assert redis_adapter.parseJson.call_count == 1 - assert redis_adapter.parseJson.call_args_list[0].args == (arg_configFile, ) + assert redis_adapter.parseJson.call_args_list[0].args == (arg_configFile,) assert e_info.match(exception_message) -def test_redis_adapter_DataSource_parse_meta_data_file_returns_call_to_extract_meta_data_handle_ss_breakdown_and_sets_subscriptions_when_redis_subscriptions_occupied(mocker): + +def test_redis_adapter_DataSource_parse_meta_data_file_returns_call_to_extract_meta_data_handle_ss_breakdown_and_sets_subscriptions_when_redis_subscriptions_occupied( + mocker, +): # Arrange cut = DataSource.__new__(DataSource) arg_configFile = MagicMock() arg_ss_breakdown = MagicMock() expected_extracted_configs = MagicMock() - expected_subscriptions = [MagicMock()] * pytest.gen.randint(0, 10) # 0 to 10 arbitrary - fake_meta = {'fake_other_stuff': MagicMock(), - 'order': MagicMock(), - 'redis_subscriptions':expected_subscriptions} + expected_subscriptions = [MagicMock()] * pytest.gen.randint( + 0, 10 + ) # 0 to 10 arbitrary + fake_meta = { + "fake_other_stuff": MagicMock(), + "order": MagicMock(), + "redis_subscriptions": expected_subscriptions, + } - mocker.patch(redis_adapter.__name__ + '.extract_meta_data_handle_ss_breakdown', return_value=expected_extracted_configs) - mocker.patch(redis_adapter.__name__ + '.parseJson', return_value=fake_meta) + mocker.patch( + redis_adapter.__name__ + ".extract_meta_data_handle_ss_breakdown", + return_value=expected_extracted_configs, + ) + mocker.patch(redis_adapter.__name__ + ".parseJson", return_value=fake_meta) # Act - result = cut.parse_meta_data_file(arg_configFile, arg_ss_breakdown, ) + result = cut.parse_meta_data_file( + arg_configFile, + arg_ss_breakdown, + ) # Assert assert redis_adapter.extract_meta_data_handle_ss_breakdown.call_count == 1 - assert redis_adapter.extract_meta_data_handle_ss_breakdown.call_args_list[0].args == (arg_configFile, arg_ss_breakdown) + assert redis_adapter.extract_meta_data_handle_ss_breakdown.call_args_list[ + 0 + ].args == (arg_configFile, arg_ss_breakdown) assert redis_adapter.parseJson.call_count == 1 - assert redis_adapter.parseJson.call_args_list[0].args == (arg_configFile, ) + assert redis_adapter.parseJson.call_args_list[0].args == (arg_configFile,) assert cut.subscriptions == expected_subscriptions assert result == expected_extracted_configs -def test_redis_adapter_DataSource_parse_meta_data_file_returns_call_to_extract_meta_data_handle_ss_breakdown_and_sets_subscriptions_to_empty_when_none_given(mocker): + +def test_redis_adapter_DataSource_parse_meta_data_file_returns_call_to_extract_meta_data_handle_ss_breakdown_and_sets_subscriptions_to_empty_when_none_given( + mocker, +): # Arrange cut = DataSource.__new__(DataSource) arg_configFile = MagicMock() arg_ss_breakdown = MagicMock() - fake_configs = {'fake_other_stuff': MagicMock()} - fake_meta = {'order': MagicMock()} + fake_configs = {"fake_other_stuff": MagicMock()} + fake_meta = {"order": MagicMock()} - mocker.patch(redis_adapter.__name__ + '.extract_meta_data_handle_ss_breakdown', return_value=fake_configs) - mocker.patch(redis_adapter.__name__ + '.parseJson', return_value=fake_meta) + mocker.patch( + redis_adapter.__name__ + ".extract_meta_data_handle_ss_breakdown", + return_value=fake_configs, + ) + mocker.patch(redis_adapter.__name__ + ".parseJson", return_value=fake_meta) # Act - result = cut.parse_meta_data_file(arg_configFile, arg_ss_breakdown, ) + result = cut.parse_meta_data_file( + arg_configFile, + arg_ss_breakdown, + ) # Assert assert redis_adapter.extract_meta_data_handle_ss_breakdown.call_count == 1 - assert redis_adapter.extract_meta_data_handle_ss_breakdown.call_args_list[0].args == (arg_configFile, arg_ss_breakdown) + assert redis_adapter.extract_meta_data_handle_ss_breakdown.call_args_list[ + 0 + ].args == (arg_configFile, arg_ss_breakdown) assert redis_adapter.parseJson.call_count == 1 - assert redis_adapter.parseJson.call_args_list[0].args == (arg_configFile, ) + assert redis_adapter.parseJson.call_args_list[0].args == (arg_configFile,) assert cut.subscriptions == [] assert result == fake_configs + # redis_adapter get_vehicle_metadata tests def test_redis_adapter_DataSource_get_vehicle_metadata_returns_list_of_headers_and_list_of_test_assignments(): # Arrange @@ -658,7 +815,7 @@ def test_redis_adapter_DataSource_get_vehicle_metadata_returns_list_of_headers_a fake_all_headers = MagicMock() fake_test_assignments = MagicMock() fake_binning_configs = {} - fake_binning_configs['test_assignments'] = fake_test_assignments + fake_binning_configs["test_assignments"] = fake_test_assignments expected_result = (fake_all_headers, fake_test_assignments) @@ -671,6 +828,7 @@ def test_redis_adapter_DataSource_get_vehicle_metadata_returns_list_of_headers_a # Assert assert result == expected_result + # redis_adapter process_data_file tests def test_redis_adapter_DataSource_process_data_file_does_nothing(): # Arrange diff --git a/test/onair/data_handling/test_tlm_json_parser.py b/test/onair/data_handling/test_tlm_json_parser.py index 4534c945..c61cedc3 100644 --- a/test/onair/data_handling/test_tlm_json_parser.py +++ b/test/onair/data_handling/test_tlm_json_parser.py @@ -13,34 +13,43 @@ import onair.data_handling.tlm_json_parser as tlm_json_parser + # parseTlmConfJson tests -def test_tlm_json_parser_parseTlmConfJson_returns_configs_with_empty_dicts_when_reorg_dict_is_empty(mocker): +def test_tlm_json_parser_parseTlmConfJson_returns_configs_with_empty_dicts_when_reorg_dict_is_empty( + mocker, +): # Arrange arg_file_path = MagicMock() fake_data = MagicMock() fake_organized_data = {} - mocker.patch(tlm_json_parser.__name__ + '.parseJson', return_value=fake_data) - mocker.patch(tlm_json_parser.__name__ + '.reorganizeTlmDict', return_value=fake_organized_data) + mocker.patch(tlm_json_parser.__name__ + ".parseJson", return_value=fake_data) + mocker.patch( + tlm_json_parser.__name__ + ".reorganizeTlmDict", + return_value=fake_organized_data, + ) expected_result = {} - expected_result['subsystem_assignments'] = [] - expected_result['test_assignments'] = [] - expected_result['description_assignments'] = [] - expected_result['data_labels'] = [] + expected_result["subsystem_assignments"] = [] + expected_result["test_assignments"] = [] + expected_result["description_assignments"] = [] + expected_result["data_labels"] = [] # Act result = tlm_json_parser.parseTlmConfJson(arg_file_path) # Assert assert tlm_json_parser.parseJson.call_count == 1 - assert tlm_json_parser.parseJson.call_args_list[0].args == (arg_file_path, ) + assert tlm_json_parser.parseJson.call_args_list[0].args == (arg_file_path,) assert tlm_json_parser.reorganizeTlmDict.call_count == 1 - assert tlm_json_parser.reorganizeTlmDict.call_args_list[0].args == (fake_data, ) + assert tlm_json_parser.reorganizeTlmDict.call_args_list[0].args == (fake_data,) assert result == expected_result -def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reorg_dict_contains_only_one_label_and_order_key_does_not_exist(mocker): + +def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reorg_dict_contains_only_one_label_and_order_key_does_not_exist( + mocker, +): # Arrange arg_file_path = MagicMock() @@ -51,30 +60,38 @@ def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reo fake_mnemonics = MagicMock() fake_description = MagicMock() fake_organized_data = {} - fake_organized_data[fake_label] = {'subsystem' : fake_subsystem, - 'tests' : {fake_mnemonics : fake_limits}, - 'description' : fake_description} - - mocker.patch(tlm_json_parser.__name__ + '.parseJson', return_value=fake_data) - mocker.patch(tlm_json_parser.__name__ + '.reorganizeTlmDict', return_value=fake_organized_data) + fake_organized_data[fake_label] = { + "subsystem": fake_subsystem, + "tests": {fake_mnemonics: fake_limits}, + "description": fake_description, + } + + mocker.patch(tlm_json_parser.__name__ + ".parseJson", return_value=fake_data) + mocker.patch( + tlm_json_parser.__name__ + ".reorganizeTlmDict", + return_value=fake_organized_data, + ) expected_result = {} - expected_result['subsystem_assignments'] = [fake_subsystem] - expected_result['test_assignments'] = [[[fake_mnemonics, fake_limits]]] - expected_result['description_assignments'] = [fake_description] - expected_result['data_labels'] = [fake_label] + expected_result["subsystem_assignments"] = [fake_subsystem] + expected_result["test_assignments"] = [[[fake_mnemonics, fake_limits]]] + expected_result["description_assignments"] = [fake_description] + expected_result["data_labels"] = [fake_label] # Act result = tlm_json_parser.parseTlmConfJson(arg_file_path) # Assert assert tlm_json_parser.parseJson.call_count == 1 - assert tlm_json_parser.parseJson.call_args_list[0].args == (arg_file_path, ) + assert tlm_json_parser.parseJson.call_args_list[0].args == (arg_file_path,) assert tlm_json_parser.reorganizeTlmDict.call_count == 1 - assert tlm_json_parser.reorganizeTlmDict.call_args_list[0].args == (fake_data, ) + assert tlm_json_parser.reorganizeTlmDict.call_args_list[0].args == (fake_data,) assert result == expected_result -def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reorg_dict_contains_only_one_label_and_limits_test_and_description_keys_do_not_exist(mocker): + +def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reorg_dict_contains_only_one_label_and_limits_test_and_description_keys_do_not_exist( + mocker, +): # Arrange arg_file_path = MagicMock() @@ -82,28 +99,34 @@ def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reo fake_label = MagicMock() fake_subsystem = MagicMock() fake_organized_data = {} - fake_organized_data[fake_label] = {'subsystem' : fake_subsystem} + fake_organized_data[fake_label] = {"subsystem": fake_subsystem} - mocker.patch(tlm_json_parser.__name__ + '.parseJson', return_value=fake_data) - mocker.patch(tlm_json_parser.__name__ + '.reorganizeTlmDict', return_value=fake_organized_data) + mocker.patch(tlm_json_parser.__name__ + ".parseJson", return_value=fake_data) + mocker.patch( + tlm_json_parser.__name__ + ".reorganizeTlmDict", + return_value=fake_organized_data, + ) expected_result = {} - expected_result['subsystem_assignments'] = [fake_subsystem] - expected_result['test_assignments'] = [[['NOOP']]] - expected_result['description_assignments'] = [['No description']] - expected_result['data_labels'] = [fake_label] + expected_result["subsystem_assignments"] = [fake_subsystem] + expected_result["test_assignments"] = [[["NOOP"]]] + expected_result["description_assignments"] = [["No description"]] + expected_result["data_labels"] = [fake_label] # Act result = tlm_json_parser.parseTlmConfJson(arg_file_path) # Assert assert tlm_json_parser.parseJson.call_count == 1 - assert tlm_json_parser.parseJson.call_args_list[0].args == (arg_file_path, ) + assert tlm_json_parser.parseJson.call_args_list[0].args == (arg_file_path,) assert tlm_json_parser.reorganizeTlmDict.call_count == 1 - assert tlm_json_parser.reorganizeTlmDict.call_args_list[0].args == (fake_data, ) + assert tlm_json_parser.reorganizeTlmDict.call_args_list[0].args == (fake_data,) assert result == expected_result -def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reorg_dict_contains_multiple_labels_and_limits_test_and_description_keys_do_not_exist(mocker): + +def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reorg_dict_contains_multiple_labels_and_limits_test_and_description_keys_do_not_exist( + mocker, +): # Arrange arg_file_path = MagicMock() @@ -111,73 +134,87 @@ def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reo fake_organized_data = {} fake_subsystems = [] fake_labels = [] - num_labels = pytest.gen.randint(2, 10) # arbitrary, from 2 to 10 + num_labels = pytest.gen.randint(2, 10) # arbitrary, from 2 to 10 for i in range(num_labels): fake_label = MagicMock() fake_subsystem = MagicMock() fake_subsystems.append(fake_subsystem) fake_labels.append(fake_label) - fake_organized_data[fake_label] = {'subsystem' : fake_subsystem} + fake_organized_data[fake_label] = {"subsystem": fake_subsystem} - mocker.patch(tlm_json_parser.__name__ + '.parseJson', return_value=fake_data) - mocker.patch(tlm_json_parser.__name__ + '.reorganizeTlmDict', return_value=fake_organized_data) + mocker.patch(tlm_json_parser.__name__ + ".parseJson", return_value=fake_data) + mocker.patch( + tlm_json_parser.__name__ + ".reorganizeTlmDict", + return_value=fake_organized_data, + ) expected_result = {} - expected_result['subsystem_assignments'] = fake_subsystems - expected_result['test_assignments'] = [[['NOOP']]] * num_labels - expected_result['description_assignments'] = [['No description']] * num_labels - expected_result['data_labels'] = fake_labels + expected_result["subsystem_assignments"] = fake_subsystems + expected_result["test_assignments"] = [[["NOOP"]]] * num_labels + expected_result["description_assignments"] = [["No description"]] * num_labels + expected_result["data_labels"] = fake_labels # Act result = tlm_json_parser.parseTlmConfJson(arg_file_path) # Assert assert tlm_json_parser.parseJson.call_count == 1 - assert tlm_json_parser.parseJson.call_args_list[0].args == (arg_file_path, ) + assert tlm_json_parser.parseJson.call_args_list[0].args == (arg_file_path,) assert tlm_json_parser.reorganizeTlmDict.call_count == 1 - assert tlm_json_parser.reorganizeTlmDict.call_args_list[0].args == (fake_data, ) + assert tlm_json_parser.reorganizeTlmDict.call_args_list[0].args == (fake_data,) assert result == expected_result -def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reorg_dict_contains_only_one_label_and_order_key_does_exist(mocker): + +def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reorg_dict_contains_only_one_label_and_order_key_does_exist( + mocker, +): # Arrange arg_file_path = MagicMock() fake_label = MagicMock() - fake_data = {'order' : [fake_label]} + fake_data = {"order": [fake_label]} fake_subsystem = MagicMock() fake_limits = MagicMock() fake_mnemonics = MagicMock() fake_description = MagicMock() fake_organized_data = {} - fake_organized_data[fake_label] = {'subsystem' : fake_subsystem, - 'tests' : {fake_mnemonics : fake_limits}, - 'description' : fake_description} - - mocker.patch(tlm_json_parser.__name__ + '.parseJson', return_value=fake_data) - mocker.patch(tlm_json_parser.__name__ + '.reorganizeTlmDict', return_value=fake_organized_data) + fake_organized_data[fake_label] = { + "subsystem": fake_subsystem, + "tests": {fake_mnemonics: fake_limits}, + "description": fake_description, + } + + mocker.patch(tlm_json_parser.__name__ + ".parseJson", return_value=fake_data) + mocker.patch( + tlm_json_parser.__name__ + ".reorganizeTlmDict", + return_value=fake_organized_data, + ) expected_result = {} - expected_result['subsystem_assignments'] = [fake_subsystem] - expected_result['test_assignments'] = [[[fake_mnemonics, fake_limits]]] - expected_result['description_assignments'] = [fake_description] - expected_result['data_labels'] = [fake_label] + expected_result["subsystem_assignments"] = [fake_subsystem] + expected_result["test_assignments"] = [[[fake_mnemonics, fake_limits]]] + expected_result["description_assignments"] = [fake_description] + expected_result["data_labels"] = [fake_label] # Act result = tlm_json_parser.parseTlmConfJson(arg_file_path) # Assert assert tlm_json_parser.parseJson.call_count == 1 - assert tlm_json_parser.parseJson.call_args_list[0].args == (arg_file_path, ) + assert tlm_json_parser.parseJson.call_args_list[0].args == (arg_file_path,) assert tlm_json_parser.reorganizeTlmDict.call_count == 1 - assert tlm_json_parser.reorganizeTlmDict.call_args_list[0].args == (fake_data, ) + assert tlm_json_parser.reorganizeTlmDict.call_args_list[0].args == (fake_data,) assert result == expected_result -def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reorg_dict_contains_more_than_one_label_and_order_key_does_not_exist(mocker): + +def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reorg_dict_contains_more_than_one_label_and_order_key_does_not_exist( + mocker, +): # Arrange arg_file_path = MagicMock() fake_data = MagicMock() - num_elems = pytest.gen.randint(2, 10) # arbitrary, from 2 to 10 + num_elems = pytest.gen.randint(2, 10) # arbitrary, from 2 to 10 fake_labels = [MagicMock() for i in range(num_elems)] fake_subsystem = MagicMock() fake_limits = MagicMock() @@ -185,34 +222,42 @@ def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reo fake_description = MagicMock() fake_organized_data = {} for label in fake_labels: - fake_organized_data[label] = {'subsystem' : fake_subsystem, - 'tests' : {fake_mnemonics : fake_limits}, - 'description' : fake_description} - - mocker.patch(tlm_json_parser.__name__ + '.parseJson', return_value=fake_data) - mocker.patch(tlm_json_parser.__name__ + '.reorganizeTlmDict', return_value=fake_organized_data) + fake_organized_data[label] = { + "subsystem": fake_subsystem, + "tests": {fake_mnemonics: fake_limits}, + "description": fake_description, + } + + mocker.patch(tlm_json_parser.__name__ + ".parseJson", return_value=fake_data) + mocker.patch( + tlm_json_parser.__name__ + ".reorganizeTlmDict", + return_value=fake_organized_data, + ) expected_result = {} - expected_result['subsystem_assignments'] = [fake_subsystem] * num_elems - expected_result['test_assignments'] = [[[fake_mnemonics, fake_limits]]] * num_elems - expected_result['description_assignments'] = [fake_description] * num_elems - expected_result['data_labels'] = fake_labels + expected_result["subsystem_assignments"] = [fake_subsystem] * num_elems + expected_result["test_assignments"] = [[[fake_mnemonics, fake_limits]]] * num_elems + expected_result["description_assignments"] = [fake_description] * num_elems + expected_result["data_labels"] = fake_labels # Act result = tlm_json_parser.parseTlmConfJson(arg_file_path) # Assert assert tlm_json_parser.parseJson.call_count == 1 - assert tlm_json_parser.parseJson.call_args_list[0].args == (arg_file_path, ) + assert tlm_json_parser.parseJson.call_args_list[0].args == (arg_file_path,) assert tlm_json_parser.reorganizeTlmDict.call_count == 1 - assert tlm_json_parser.reorganizeTlmDict.call_args_list[0].args == (fake_data, ) + assert tlm_json_parser.reorganizeTlmDict.call_args_list[0].args == (fake_data,) assert result == expected_result -def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reorg_dict_contains_more_than_one_label_and_order_key_does_exist(mocker): + +def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reorg_dict_contains_more_than_one_label_and_order_key_does_exist( + mocker, +): # Arrange arg_file_path = MagicMock() - num_elems = pytest.gen.randint(2, 10) # arbitrary, from 2 to 10 + num_elems = pytest.gen.randint(2, 10) # arbitrary, from 2 to 10 fake_label = [] fake_subsystem = [] fake_limits = [] @@ -226,7 +271,7 @@ def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reo fake_description.append(MagicMock()) fake_order = fake_label.copy() pytest.gen.shuffle(fake_order) - fake_data = {'order' : fake_order} + fake_data = {"order": fake_order} desired_order = {} for i in range(num_elems): @@ -244,38 +289,48 @@ def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reo fake_organized_data = {} for i in range(num_elems): - fake_organized_data[fake_label[i]] = {'subsystem' : fake_subsystem[i], - 'tests' : {fake_mnemonics[i] : fake_limits[i]}, - 'description' : fake_description[i]} - - mocker.patch(tlm_json_parser.__name__ + '.parseJson', return_value=fake_data) - mocker.patch(tlm_json_parser.__name__ + '.reorganizeTlmDict', return_value=fake_organized_data) + fake_organized_data[fake_label[i]] = { + "subsystem": fake_subsystem[i], + "tests": {fake_mnemonics[i]: fake_limits[i]}, + "description": fake_description[i], + } + + mocker.patch(tlm_json_parser.__name__ + ".parseJson", return_value=fake_data) + mocker.patch( + tlm_json_parser.__name__ + ".reorganizeTlmDict", + return_value=fake_organized_data, + ) expected_result = {} - expected_result['subsystem_assignments'] = [] - expected_result['test_assignments'] = [] - expected_result['description_assignments'] = [] - expected_result['data_labels'] = ordered_labels + expected_result["subsystem_assignments"] = [] + expected_result["test_assignments"] = [] + expected_result["description_assignments"] = [] + expected_result["data_labels"] = ordered_labels for i in range(num_elems): - expected_result['subsystem_assignments'].append(ordered_subsys[i]) - expected_result['test_assignments'].append([[ordered_mnemonics[i], ordered_limits[i]]]) - expected_result['description_assignments'].append(ordered_descs[i]) + expected_result["subsystem_assignments"].append(ordered_subsys[i]) + expected_result["test_assignments"].append( + [[ordered_mnemonics[i], ordered_limits[i]]] + ) + expected_result["description_assignments"].append(ordered_descs[i]) # Act result = tlm_json_parser.parseTlmConfJson(arg_file_path) # Assert assert tlm_json_parser.parseJson.call_count == 1 - assert tlm_json_parser.parseJson.call_args_list[0].args == (arg_file_path, ) + assert tlm_json_parser.parseJson.call_args_list[0].args == (arg_file_path,) assert tlm_json_parser.reorganizeTlmDict.call_count == 1 - assert tlm_json_parser.reorganizeTlmDict.call_args_list[0].args == (fake_data, ) + assert tlm_json_parser.reorganizeTlmDict.call_args_list[0].args == (fake_data,) assert result == expected_result -def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reorg_dict_contains_more_than_one_label_and_limits_are_interpreted_as_empty_lists(mocker): + +def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reorg_dict_contains_more_than_one_label_and_limits_are_interpreted_as_empty_lists( + mocker, +): # Arrange arg_file_path = MagicMock() - num_elems = pytest.gen.randint(2, 10) # arbitrary, from 2 to 10 + num_elems = pytest.gen.randint(2, 10) # arbitrary, from 2 to 10 fake_label = [] fake_subsystem = [] fake_limits = [] @@ -289,7 +344,7 @@ def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reo fake_description.append(MagicMock()) fake_order = fake_label.copy() pytest.gen.shuffle(fake_order) - fake_data = {'order' : fake_order} + fake_data = {"order": fake_order} desired_order = {} for i in range(num_elems): @@ -307,50 +362,59 @@ def test_tlm_json_parser_parseTlmConfJson_returns_expected_configs_dict_when_reo fake_organized_data = {} for i in range(num_elems): - fake_organized_data[fake_label[i]] = {'subsystem' : fake_subsystem[i], - 'tests' : {fake_mnemonics[i] : fake_limits[i]}, - 'description' : fake_description[i]} - - mocker.patch(tlm_json_parser.__name__ + '.parseJson', return_value=fake_data) - mocker.patch(tlm_json_parser.__name__ + '.reorganizeTlmDict', return_value=fake_organized_data) + fake_organized_data[fake_label[i]] = { + "subsystem": fake_subsystem[i], + "tests": {fake_mnemonics[i]: fake_limits[i]}, + "description": fake_description[i], + } + + mocker.patch(tlm_json_parser.__name__ + ".parseJson", return_value=fake_data) + mocker.patch( + tlm_json_parser.__name__ + ".reorganizeTlmDict", + return_value=fake_organized_data, + ) expected_result = {} - expected_result['subsystem_assignments'] = [] - expected_result['test_assignments'] = [] - expected_result['description_assignments'] = [] - expected_result['data_labels'] = ordered_labels + expected_result["subsystem_assignments"] = [] + expected_result["test_assignments"] = [] + expected_result["description_assignments"] = [] + expected_result["data_labels"] = ordered_labels for i in range(num_elems): - expected_result['subsystem_assignments'].append(ordered_subsys[i]) - expected_result['test_assignments'].append([[ordered_mnemonics[i], ordered_limits[i]]]) - expected_result['description_assignments'].append(ordered_descs[i]) + expected_result["subsystem_assignments"].append(ordered_subsys[i]) + expected_result["test_assignments"].append( + [[ordered_mnemonics[i], ordered_limits[i]]] + ) + expected_result["description_assignments"].append(ordered_descs[i]) # Act result = tlm_json_parser.parseTlmConfJson(arg_file_path) # Assert assert tlm_json_parser.parseJson.call_count == 1 - assert tlm_json_parser.parseJson.call_args_list[0].args == (arg_file_path, ) + assert tlm_json_parser.parseJson.call_args_list[0].args == (arg_file_path,) assert tlm_json_parser.reorganizeTlmDict.call_count == 1 - assert tlm_json_parser.reorganizeTlmDict.call_args_list[0].args == (fake_data, ) + assert tlm_json_parser.reorganizeTlmDict.call_args_list[0].args == (fake_data,) assert result == expected_result + # reorganizeTlmDict tests def test_tlm_json_parser_reorganizeTlmDict_raises_error_when_arg_data_does_not_contain_subsystems_key(): # Arrange - arg_data_len = pytest.gen.randint(0, 10) # arbitrary, from 0 to 10 + arg_data_len = pytest.gen.randint(0, 10) # arbitrary, from 0 to 10 arg_data = {} - [arg_data.update({MagicMock() : MagicMock()}) for i in range(arg_data_len)] + [arg_data.update({MagicMock(): MagicMock()}) for i in range(arg_data_len)] # Assert with pytest.raises(KeyError) as e_info: result = tlm_json_parser.reorganizeTlmDict(arg_data) # Act - assert e_info.match('subsystems') + assert e_info.match("subsystems") + def test_tlm_json_parser_reorganizeTlmDict_returns_empty_dict_when_arg_data_subsystems_exists_and_is_empty(): # Arrange - arg_data = {'subsystems' : {}} + arg_data = {"subsystems": {}} # Assert result = tlm_json_parser.reorganizeTlmDict(arg_data) @@ -358,15 +422,16 @@ def test_tlm_json_parser_reorganizeTlmDict_returns_empty_dict_when_arg_data_subs # Act assert result == {} + def test_tlm_json_parser_reorganizeTlmDict_returns_empty_dict_when_arg_data_subsystems_exists_and_all_keys_map_to_empty(): # Arrange - arg_data = {'subsystems' : {}} + arg_data = {"subsystems": {}} - num_fake_subsystems = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10 + num_fake_subsystems = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10 fake_subsystems = [MagicMock() for i in range(num_fake_subsystems)] for fs in fake_subsystems: - arg_data['subsystems'][fs] = {} + arg_data["subsystems"][fs] = {} # Assert result = tlm_json_parser.reorganizeTlmDict(arg_data) @@ -374,24 +439,29 @@ def test_tlm_json_parser_reorganizeTlmDict_returns_empty_dict_when_arg_data_subs # Act assert result == {} + def test_tlm_json_parser_reorganizeTlmDict_returns_expected_dict_when_arg_data_subsystems_exists_and_is_not_empty(): # Arrange - arg_data = {'subsystems' : {}} + arg_data = {"subsystems": {}} - num_fake_subsystems = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10, 0 has own test + num_fake_subsystems = pytest.gen.randint( + 1, 10 + ) # arbitrary, from 1 to 10, 0 has own test fake_subsystems = [MagicMock() for i in range(num_fake_subsystems)] expected_result = {} for fs in fake_subsystems: - arg_data['subsystems'][fs] = {} - num_fake_labels = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10, 0 has own test + arg_data["subsystems"][fs] = {} + num_fake_labels = pytest.gen.randint( + 1, 10 + ) # arbitrary, from 1 to 10, 0 has own test for i in range(num_fake_labels): fake_label = i fake_label_value = MagicMock() - arg_data['subsystems'][fs][fake_label] = fake_label_value + arg_data["subsystems"][fs][fake_label] = fake_label_value expected_result[fake_label] = fake_label_value - expected_result[fake_label]['subsystem'] = fs + expected_result[fake_label]["subsystem"] = fs # Assert result = tlm_json_parser.reorganizeTlmDict(arg_data) @@ -399,42 +469,55 @@ def test_tlm_json_parser_reorganizeTlmDict_returns_expected_dict_when_arg_data_s # Act assert result == expected_result + # str2lst tests -def test_tlm_json_parser_str2lst_returns_call_to_ast_literal_eval_which_receive_given_string(mocker): +def test_tlm_json_parser_str2lst_returns_call_to_ast_literal_eval_which_receive_given_string( + mocker, +): # Arrange arg_string = str(MagicMock()) expected_result = MagicMock() - mocker.patch(tlm_json_parser.__name__ + '.ast.literal_eval', return_value=expected_result) + mocker.patch( + tlm_json_parser.__name__ + ".ast.literal_eval", return_value=expected_result + ) # Act result = tlm_json_parser.str2lst(arg_string) # Assert assert tlm_json_parser.ast.literal_eval.call_count == 1 - assert tlm_json_parser.ast.literal_eval.call_args_list[0].args == (arg_string, ) + assert tlm_json_parser.ast.literal_eval.call_args_list[0].args == (arg_string,) assert result == expected_result -def test_tlm_json_parser_str2lst_prints_message_when_ast_literal_eval_receives_given_string_but_raises_exception(mocker): + +def test_tlm_json_parser_str2lst_prints_message_when_ast_literal_eval_receives_given_string_but_raises_exception( + mocker, +): # Arrange arg_string = str(MagicMock()) - mocker.patch(tlm_json_parser.__name__ + '.ast.literal_eval', side_effect=Exception) - mocker.patch(tlm_json_parser.__name__ + '.print') + mocker.patch(tlm_json_parser.__name__ + ".ast.literal_eval", side_effect=Exception) + mocker.patch(tlm_json_parser.__name__ + ".print") # Act result = tlm_json_parser.str2lst(arg_string) # Assert assert tlm_json_parser.ast.literal_eval.call_count == 1 - assert tlm_json_parser.ast.literal_eval.call_args_list[0].args == (arg_string, ) + assert tlm_json_parser.ast.literal_eval.call_args_list[0].args == (arg_string,) assert tlm_json_parser.print.call_count == 1 - assert tlm_json_parser.print.call_args_list[0].args == ("Unable to process string representation of list", ) + assert tlm_json_parser.print.call_args_list[0].args == ( + "Unable to process string representation of list", + ) assert result == None + # parseJson tests -def test_tlm_json_parser_parseJson_opens_given_path_and_returns_data_returned_by_json(mocker): +def test_tlm_json_parser_parseJson_opens_given_path_and_returns_data_returned_by_json( + mocker, +): # Arrange arg_path = MagicMock() @@ -442,19 +525,19 @@ def test_tlm_json_parser_parseJson_opens_given_path_and_returns_data_returned_by fake_file_str = MagicMock() fake_file_data = MagicMock() - mocker.patch(tlm_json_parser.__name__ + '.open', return_value=fake_file) - mocker.patch.object(fake_file, 'read', return_value=fake_file_str) - mocker.patch(tlm_json_parser.__name__ + '.json.loads', return_value=fake_file_data) - mocker.patch.object(fake_file, 'close') + mocker.patch(tlm_json_parser.__name__ + ".open", return_value=fake_file) + mocker.patch.object(fake_file, "read", return_value=fake_file_str) + mocker.patch(tlm_json_parser.__name__ + ".json.loads", return_value=fake_file_data) + mocker.patch.object(fake_file, "close") # Act result = tlm_json_parser.parseJson(arg_path) # Assert assert tlm_json_parser.open.call_count == 1 - assert tlm_json_parser.open.call_args_list[0].args == (arg_path, 'rb') + assert tlm_json_parser.open.call_args_list[0].args == (arg_path, "rb") assert fake_file.read.call_count == 1 assert tlm_json_parser.json.loads.call_count == 1 - assert tlm_json_parser.json.loads.call_args_list[0].args == (fake_file_str, ) + assert tlm_json_parser.json.loads.call_args_list[0].args == (fake_file_str,) assert fake_file.close.call_count == 1 assert result == fake_file_data diff --git a/test/onair/src/ai_components/ai_plugin_abstract/test_AI_plugin_core.py b/test/onair/src/ai_components/ai_plugin_abstract/test_AI_plugin_core.py index af529235..42b2c73a 100644 --- a/test/onair/src/ai_components/ai_plugin_abstract/test_AI_plugin_core.py +++ b/test/onair/src/ai_components/ai_plugin_abstract/test_AI_plugin_core.py @@ -14,6 +14,7 @@ import onair.src.ai_components.ai_plugin_abstract.ai_plugin as ai_plugin from onair.src.ai_components.ai_plugin_abstract.ai_plugin import AIPlugin + class FakeAIPlugin(AIPlugin): def __init__(self, _name, _headers): return super().__init__(_name, _headers) @@ -24,10 +25,12 @@ def update(self): def render_reasoning(self): return dict() + class IncompleteFakeAIPlugin(AIPlugin): def __init__(self, _name, _headers): return super().__init__(_name, _headers) + class BadFakeAIPlugin(AIPlugin): def __init__(self, _name, _headers): return super().__init__(_name, _headers) @@ -38,6 +41,7 @@ def update(self): def render_reasoning(self): return super().render_reasoning() + # abstract methods tests def test_AIPlugin_raises_error_because_of_unimplemented_abstract_methods(): # Arrange - None @@ -50,6 +54,7 @@ def test_AIPlugin_raises_error_because_of_unimplemented_abstract_methods(): assert "update" in e_info.__str__() assert "render_reasoning" in e_info.__str__() + # Incomplete plugin call tests def test_AIPlugin_raises_error_when_an_inherited_class_is_instantiated_because_abstract_methods_are_not_implemented_by_that_class(): # Arrange - None @@ -58,10 +63,14 @@ def test_AIPlugin_raises_error_when_an_inherited_class_is_instantiated_because_a cut = IncompleteFakeAIPlugin.__new__(IncompleteFakeAIPlugin) # Assert - assert "Can't instantiate abstract class IncompleteFakeAIPlugin with" in e_info.__str__() + assert ( + "Can't instantiate abstract class IncompleteFakeAIPlugin with" + in e_info.__str__() + ) assert "update" in e_info.__str__() assert "render_reasoning" in e_info.__str__() + def test_AIPlugin_raises_error_when_an_inherited_class_calls_abstract_methods_in_parent(): # Act cut = BadFakeAIPlugin.__new__(BadFakeAIPlugin) @@ -73,6 +82,7 @@ def test_AIPlugin_raises_error_when_an_inherited_class_calls_abstract_methods_in fnc() assert "NotImplementedError" in e_info.__str__() + # Complete plugin call tests def test_AIPlugin_does_not_raise_error_when_an_inherited_class_is_instantiated_because_abstract_methods_are_implemented_by_that_class(): # Arrange @@ -85,8 +95,10 @@ def test_AIPlugin_does_not_raise_error_when_an_inherited_class_is_instantiated_b # Assert assert exception_raised == False + # Complete plugin call tests + # __init__ tests def test_AIPlugin__init__raises_assertion_error_when_given__headers_len_is_not_greater_than_0(): # Arrange @@ -100,16 +112,21 @@ def test_AIPlugin__init__raises_assertion_error_when_given__headers_len_is_not_g cut.__init__(arg__name, arg__headers) # Assert - assert e_info.match('') + assert e_info.match("") + -def test_AIPlugin__init__sets_instance_values_to_given_args_when_given__headers_len_is_greater_than_0(mocker): +def test_AIPlugin__init__sets_instance_values_to_given_args_when_given__headers_len_is_greater_than_0( + mocker, +): # Arrange arg__name = MagicMock() arg__headers = MagicMock() cut = FakeAIPlugin.__new__(FakeAIPlugin) - mocker.patch(ai_plugin.__name__ + '.len', return_value=pytest.gen.randint(1, 200)) # arbitrary, from 1 to 200 (but > 0) + mocker.patch( + ai_plugin.__name__ + ".len", return_value=pytest.gen.randint(1, 200) + ) # arbitrary, from 1 to 200 (but > 0) # Act cut.__init__(arg__name, arg__headers) diff --git a/test/onair/src/ai_components/test_learners_interface.py b/test/onair/src/ai_components/test_learners_interface.py index e22abf50..e0c68532 100644 --- a/test/onair/src/ai_components/test_learners_interface.py +++ b/test/onair/src/ai_components/test_learners_interface.py @@ -14,6 +14,7 @@ import onair.src.ai_components.learners_interface as learners_interface from onair.src.ai_components.learners_interface import LearnersInterface + # __init__ tests def test_LearnersInterface__init__raises_AssertionError_when_given_headers_len_is_0(): # Arrange @@ -28,9 +29,12 @@ def test_LearnersInterface__init__raises_AssertionError_when_given_headers_len_i cut.__init__(arg_headers) # Assert - assert e_info.match('Headers are required') + assert e_info.match("Headers are required") + -def test_LearnersInterface__init__sets_self_headers_to_given_headers_and_sets_self_learner_constructs_to_return_value_of_import_plugins(mocker): +def test_LearnersInterface__init__sets_self_headers_to_given_headers_and_sets_self_learner_constructs_to_return_value_of_import_plugins( + mocker, +): # Arrange arg_headers = MagicMock() arg__learner_plugins = MagicMock() @@ -39,8 +43,10 @@ def test_LearnersInterface__init__sets_self_headers_to_given_headers_and_sets_se forced_return_learner_constructs = MagicMock() - mocker.patch(learners_interface.__name__ + '.import_plugins', return_value=forced_return_learner_constructs) - + mocker.patch( + learners_interface.__name__ + ".import_plugins", + return_value=forced_return_learner_constructs, + ) cut = LearnersInterface.__new__(LearnersInterface) @@ -50,9 +56,13 @@ def test_LearnersInterface__init__sets_self_headers_to_given_headers_and_sets_se # Assert assert cut.headers == arg_headers assert learners_interface.import_plugins.call_count == 1 - assert learners_interface.import_plugins.call_args_list[0].args == (arg_headers, arg__learner_plugins) + assert learners_interface.import_plugins.call_args_list[0].args == ( + arg_headers, + arg__learner_plugins, + ) assert cut.learner_constructs == forced_return_learner_constructs + # update tests def test_LearnersInterface_update_does_nothing_when_instance_learner_constructs_is_empty(): # Arrange @@ -68,7 +78,10 @@ def test_LearnersInterface_update_does_nothing_when_instance_learner_constructs_ # Assert assert result == None -def test_LearnersInterface_update_calls_update_with_given_low_level_and_high_level_data_on_each_learner_constructs_item(mocker): + +def test_LearnersInterface_update_calls_update_with_given_low_level_and_high_level_data_on_each_learner_constructs_item( + mocker, +): # Arrange arg_low_level_data = MagicMock() arg_high_level_data = MagicMock() @@ -76,7 +89,9 @@ def test_LearnersInterface_update_calls_update_with_given_low_level_and_high_lev cut = LearnersInterface.__new__(LearnersInterface) cut.learner_constructs = [] - num_fake_learner_constructs = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10 (0 has own test) + num_fake_learner_constructs = pytest.gen.randint( + 1, 10 + ) # arbitrary, from 1 to 10 (0 has own test) for i in range(num_fake_learner_constructs): cut.learner_constructs.append(MagicMock()) @@ -86,7 +101,11 @@ def test_LearnersInterface_update_calls_update_with_given_low_level_and_high_lev # Assert for i in range(num_fake_learner_constructs): assert cut.learner_constructs[i].update.call_count == 1 - assert cut.learner_constructs[i].update.call_args_list[0].args == (arg_low_level_data, arg_high_level_data) + assert cut.learner_constructs[i].update.call_args_list[0].args == ( + arg_low_level_data, + arg_high_level_data, + ) + # check_for_salient_event def test_LearnersInterface_salient_event_does_nothing(): @@ -99,8 +118,11 @@ def test_LearnersInterface_salient_event_does_nothing(): # Assert assert result == None + # render_reasoning tests -def test_LearnersInterface_render_reasoning_returns_empty_dict_when_instance_learner_constructs_is_empty(mocker): +def test_LearnersInterface_render_reasoning_returns_empty_dict_when_instance_learner_constructs_is_empty( + mocker, +): # Arrange cut = LearnersInterface.__new__(LearnersInterface) cut.learner_constructs = [] @@ -111,21 +133,32 @@ def test_LearnersInterface_render_reasoning_returns_empty_dict_when_instance_lea # Assert assert result == {} -def test_LearnersInterface_render_reasoning_returns_dict_of_each_ai_construct_as_key_to_the_result_of_its_render_reasoning_when_instance_learner_constructs_is_occupied(mocker): + +def test_LearnersInterface_render_reasoning_returns_dict_of_each_ai_construct_as_key_to_the_result_of_its_render_reasoning_when_instance_learner_constructs_is_occupied( + mocker, +): # Arrange cut = LearnersInterface.__new__(LearnersInterface) cut.learner_constructs = [] expected_result = {} - num_fake_learner_constructs = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10 (0 has own test) + num_fake_learner_constructs = pytest.gen.randint( + 1, 10 + ) # arbitrary, from 1 to 10 (0 has own test) for i in range(num_fake_learner_constructs): fake_ai_construct = MagicMock() forced_return_ai_construct_render_reasoning = MagicMock() cut.learner_constructs.append(fake_ai_construct) - mocker.patch.object(fake_ai_construct, 'render_reasoning', return_value=forced_return_ai_construct_render_reasoning) + mocker.patch.object( + fake_ai_construct, + "render_reasoning", + return_value=forced_return_ai_construct_render_reasoning, + ) fake_ai_construct.component_name = MagicMock() - expected_result[fake_ai_construct.component_name] = forced_return_ai_construct_render_reasoning + expected_result[fake_ai_construct.component_name] = ( + forced_return_ai_construct_render_reasoning + ) # Act result = cut.render_reasoning() @@ -134,4 +167,4 @@ def test_LearnersInterface_render_reasoning_returns_dict_of_each_ai_construct_as for i in range(num_fake_learner_constructs): assert cut.learner_constructs[i].render_reasoning.call_count == 1 assert cut.learner_constructs[i].render_reasoning.call_args_list[0].args == () - assert result == expected_result \ No newline at end of file + assert result == expected_result diff --git a/test/onair/src/ai_components/test_planners_interface.py b/test/onair/src/ai_components/test_planners_interface.py index 62da8e64..de2f41ed 100644 --- a/test/onair/src/ai_components/test_planners_interface.py +++ b/test/onair/src/ai_components/test_planners_interface.py @@ -14,6 +14,7 @@ import onair.src.ai_components.planners_interface as planners_interface from onair.src.ai_components.planners_interface import PlannersInterface + # __init__ tests def test_PlannersInterface__init__raises_AssertionError_when_given_headers_len_is_0(): # Arrange @@ -28,9 +29,12 @@ def test_PlannersInterface__init__raises_AssertionError_when_given_headers_len_i cut.__init__(arg_headers) # Assert - assert e_info.match('Headers are required') + assert e_info.match("Headers are required") + -def test_PlannersInterface__init__sets_self_headers_to_given_headers_and_sets_self_planner_constructs_to_return_value_of_import_plugins(mocker): +def test_PlannersInterface__init__sets_self_headers_to_given_headers_and_sets_self_planner_constructs_to_return_value_of_import_plugins( + mocker, +): # Arrange arg_headers = MagicMock() arg__planner_plugins = MagicMock() @@ -39,7 +43,10 @@ def test_PlannersInterface__init__sets_self_headers_to_given_headers_and_sets_se forced_return_planner_constructs = MagicMock() - mocker.patch(planners_interface.__name__ + '.import_plugins', return_value=forced_return_planner_constructs) + mocker.patch( + planners_interface.__name__ + ".import_plugins", + return_value=forced_return_planner_constructs, + ) cut = PlannersInterface.__new__(PlannersInterface) @@ -49,9 +56,13 @@ def test_PlannersInterface__init__sets_self_headers_to_given_headers_and_sets_se # Assert assert cut.headers == arg_headers assert planners_interface.import_plugins.call_count == 1 - assert planners_interface.import_plugins.call_args_list[0].args == (arg_headers, arg__planner_plugins) + assert planners_interface.import_plugins.call_args_list[0].args == ( + arg_headers, + arg__planner_plugins, + ) assert cut.planner_constructs == forced_return_planner_constructs + # update tests def test_PlannersInterface_update_does_nothing_when_instance_planner_constructs_is_empty(): # Arrange @@ -66,14 +77,19 @@ def test_PlannersInterface_update_does_nothing_when_instance_planner_constructs_ # Assert assert result == None -def test_PlannersInterface_update_calls_update_with_given_low_level_and_high_level_data_on_each_planner_constructs_item(mocker): + +def test_PlannersInterface_update_calls_update_with_given_low_level_and_high_level_data_on_each_planner_constructs_item( + mocker, +): # Arrange arg_high_level_data = MagicMock() cut = PlannersInterface.__new__(PlannersInterface) cut.planner_constructs = [] - num_fake_planner_constructs = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10 (0 has own test) + num_fake_planner_constructs = pytest.gen.randint( + 1, 10 + ) # arbitrary, from 1 to 10 (0 has own test) for i in range(num_fake_planner_constructs): cut.planner_constructs.append(MagicMock()) @@ -84,7 +100,10 @@ def test_PlannersInterface_update_calls_update_with_given_low_level_and_high_lev for i in range(num_fake_planner_constructs): assert cut.planner_constructs[i].update.call_count == 1 assert cut.planner_constructs[i].update.call_args_list[0].args == () - assert cut.planner_constructs[i].update.call_args_list[0].kwargs == {'high_level_data':arg_high_level_data} + assert cut.planner_constructs[i].update.call_args_list[0].kwargs == { + "high_level_data": arg_high_level_data + } + # check_for_salient_event tests def test_PlannersInterface_check_for_salient_event_does_nothing(): @@ -97,8 +116,11 @@ def test_PlannersInterface_check_for_salient_event_does_nothing(): # Assert assert result == None + # render reasoning tests -def test_PlannersInterface_render_reasoning_returns_empty_dict_when_instance_planner_constructs_is_empty(mocker): +def test_PlannersInterface_render_reasoning_returns_empty_dict_when_instance_planner_constructs_is_empty( + mocker, +): # Arrange cut = PlannersInterface.__new__(PlannersInterface) cut.planner_constructs = [] @@ -110,21 +132,31 @@ def test_PlannersInterface_render_reasoning_returns_empty_dict_when_instance_pla assert result == {} -def test_PlannersInterface_render_reasoning_returns_dict_of_each_ai_construct_as_key_to_the_result_of_its_render_reasoning_when_instance_planner_constructs_is_occupied(mocker): +def test_PlannersInterface_render_reasoning_returns_dict_of_each_ai_construct_as_key_to_the_result_of_its_render_reasoning_when_instance_planner_constructs_is_occupied( + mocker, +): # Arrange cut = PlannersInterface.__new__(PlannersInterface) cut.planner_constructs = [] expected_result = {} - num_fake_planner_constructs = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10 (0 has own test) + num_fake_planner_constructs = pytest.gen.randint( + 1, 10 + ) # arbitrary, from 1 to 10 (0 has own test) for i in range(num_fake_planner_constructs): fake_ai_construct = MagicMock() forced_return_ai_construct_render_reasoning = MagicMock() cut.planner_constructs.append(fake_ai_construct) - mocker.patch.object(fake_ai_construct, 'render_reasoning', return_value=forced_return_ai_construct_render_reasoning) + mocker.patch.object( + fake_ai_construct, + "render_reasoning", + return_value=forced_return_ai_construct_render_reasoning, + ) fake_ai_construct.component_name = MagicMock() - expected_result[fake_ai_construct.component_name] = forced_return_ai_construct_render_reasoning + expected_result[fake_ai_construct.component_name] = ( + forced_return_ai_construct_render_reasoning + ) # Act result = cut.render_reasoning() diff --git a/test/onair/src/reasoning/test_agent.py b/test/onair/src/reasoning/test_agent.py index a1eedd88..a65d6925 100644 --- a/test/onair/src/reasoning/test_agent.py +++ b/test/onair/src/reasoning/test_agent.py @@ -14,8 +14,11 @@ import onair.src.reasoning.agent as agent from onair.src.reasoning.agent import Agent + # __init__ tests -def test_Agent__init__sets_vehicle_rep_to_given_vehicle_and_learning_systems_and_mission_status_and_bayesian_status(mocker): +def test_Agent__init__sets_vehicle_rep_to_given_vehicle_and_learning_systems_and_mission_status_and_bayesian_status( + mocker, +): # Arrange arg_vehicle = MagicMock() arg_learners_plugin_dict = MagicMock() @@ -23,37 +26,59 @@ def test_Agent__init__sets_vehicle_rep_to_given_vehicle_and_learning_systems_and arg_complex_plugin_dict = MagicMock() fake_headers = MagicMock() - fake_headers.__len__.return_value = 1 # Fake a header + fake_headers.__len__.return_value = 1 # Fake a header fake_learning_systems = MagicMock() fake_planning_systems = MagicMock() fake_complex_systems = MagicMock() fake_mission_status = MagicMock() fake_bayesian_status = MagicMock() - mocker.patch.object(arg_vehicle, 'get_headers', return_value=fake_headers) - mocker.patch.object(arg_vehicle, 'get_status', return_value=fake_mission_status) - mocker.patch.object(arg_vehicle, 'get_bayesian_status', return_value=fake_bayesian_status) - mocker.patch(agent.__name__ + '.LearnersInterface', return_value=fake_learning_systems) - mocker.patch(agent.__name__ + '.PlannersInterface', return_value=fake_planning_systems) - mocker.patch(agent.__name__ + '.ComplexReasoningInterface', return_value=fake_complex_systems) + mocker.patch.object(arg_vehicle, "get_headers", return_value=fake_headers) + mocker.patch.object(arg_vehicle, "get_status", return_value=fake_mission_status) + mocker.patch.object( + arg_vehicle, "get_bayesian_status", return_value=fake_bayesian_status + ) + mocker.patch( + agent.__name__ + ".LearnersInterface", return_value=fake_learning_systems + ) + mocker.patch( + agent.__name__ + ".PlannersInterface", return_value=fake_planning_systems + ) + mocker.patch( + agent.__name__ + ".ComplexReasoningInterface", return_value=fake_complex_systems + ) cut = Agent.__new__(Agent) # Act - cut.__init__(arg_vehicle, arg_learners_plugin_dict, arg_planners_plugin_dict, arg_complex_plugin_dict) + cut.__init__( + arg_vehicle, + arg_learners_plugin_dict, + arg_planners_plugin_dict, + arg_complex_plugin_dict, + ) # Assert assert cut.vehicle_rep == arg_vehicle assert arg_vehicle.get_headers.call_count == 3 assert arg_vehicle.get_headers.call_args_list[0].args == () assert agent.LearnersInterface.call_count == 1 - assert agent.LearnersInterface.call_args_list[0].args == (fake_headers, arg_learners_plugin_dict) + assert agent.LearnersInterface.call_args_list[0].args == ( + fake_headers, + arg_learners_plugin_dict, + ) assert cut.learning_systems == fake_learning_systems assert agent.PlannersInterface.call_count == 1 - assert agent.PlannersInterface.call_args_list[0].args == (fake_headers, arg_planners_plugin_dict) + assert agent.PlannersInterface.call_args_list[0].args == ( + fake_headers, + arg_planners_plugin_dict, + ) assert cut.planning_systems == fake_planning_systems assert agent.ComplexReasoningInterface.call_count == 1 - assert agent.ComplexReasoningInterface.call_args_list[0].args == (fake_headers, arg_complex_plugin_dict) + assert agent.ComplexReasoningInterface.call_args_list[0].args == ( + fake_headers, + arg_complex_plugin_dict, + ) assert cut.complex_reasoning_systems == fake_complex_systems assert arg_vehicle.get_status.call_count == 1 assert arg_vehicle.get_status.call_args_list[0].args == () @@ -62,8 +87,11 @@ def test_Agent__init__sets_vehicle_rep_to_given_vehicle_and_learning_systems_and assert arg_vehicle.get_bayesian_status.call_args_list[0].args == () assert cut.bayesian_status == fake_bayesian_status + # reason tests -def test_Agent_reason_updates_vehicle_rep_with_given_frame_learners_with_frame_and_aggregated_high_level_data_planners_with_aggreagated_high_level_data_returning_complex_reasonings_update_and_render_reasoning(mocker): +def test_Agent_reason_updates_vehicle_rep_with_given_frame_learners_with_frame_and_aggregated_high_level_data_planners_with_aggreagated_high_level_data_returning_complex_reasonings_update_and_render_reasoning( + mocker, +): # Arrange arg_frame = MagicMock() fake_vehicle_rep = MagicMock() @@ -77,12 +105,16 @@ def test_Agent_reason_updates_vehicle_rep_with_given_frame_learners_with_frame_a fake_complex_reasoning_systems = MagicMock() fake_learning_systems_reasoning = MagicMock() fake_planning_systems_reasoning = MagicMock() - expected_aggregate_to_learners = {'vehicle_rep': fake_vehicle_rep_state} - expected_aggregate_to_planners = {'vehicle_rep': fake_vehicle_rep_state, - 'learning_systems':fake_learning_systems_reasoning} - expected_aggregate_to_complex = {'vehicle_rep': fake_vehicle_rep_state, - 'learning_systems':fake_learning_systems_reasoning, - 'planning_systems':fake_planning_systems_reasoning} + expected_aggregate_to_learners = {"vehicle_rep": fake_vehicle_rep_state} + expected_aggregate_to_planners = { + "vehicle_rep": fake_vehicle_rep_state, + "learning_systems": fake_learning_systems_reasoning, + } + expected_aggregate_to_complex = { + "vehicle_rep": fake_vehicle_rep_state, + "learning_systems": fake_learning_systems_reasoning, + "planning_systems": fake_planning_systems_reasoning, + } expected_result = MagicMock() cut = Agent.__new__(Agent) @@ -93,14 +125,49 @@ def test_Agent_reason_updates_vehicle_rep_with_given_frame_learners_with_frame_a mock_manager = mocker.MagicMock() - mock_manager.attach_mock(mocker.patch.object(fake_vehicle_rep, 'update'), 'cut.vehicle_rep.update') - mock_manager.attach_mock(mocker.patch.object(fake_vehicle_rep, 'get_state_information', return_value=fake_vehicle_rep_state), 'cut.vehicle_rep.get_state_information') - mock_manager.attach_mock(mocker.patch.object(fake_learning_systems, 'update'), 'cut.learning_systems.update') - mock_manager.attach_mock(mocker.patch.object(fake_learning_systems, 'render_reasoning', return_value=fake_learning_systems_reasoning), 'cut.learning_systems.render_reasoning') - mock_manager.attach_mock(mocker.patch.object(fake_planning_systems, 'update'), 'cut.planning_systems.update') - mock_manager.attach_mock(mocker.patch.object(fake_planning_systems, 'render_reasoning', return_value=fake_planning_systems_reasoning), 'cut.planning_systems.render_reasoning') - mock_manager.attach_mock(mocker.patch.object(fake_complex_reasoning_systems, 'update_and_render_reasoning', return_value=expected_result), 'cut.complex_reasoning_systems.update_and_render_reasoning') - + mock_manager.attach_mock( + mocker.patch.object(fake_vehicle_rep, "update"), "cut.vehicle_rep.update" + ) + mock_manager.attach_mock( + mocker.patch.object( + fake_vehicle_rep, + "get_state_information", + return_value=fake_vehicle_rep_state, + ), + "cut.vehicle_rep.get_state_information", + ) + mock_manager.attach_mock( + mocker.patch.object(fake_learning_systems, "update"), + "cut.learning_systems.update", + ) + mock_manager.attach_mock( + mocker.patch.object( + fake_learning_systems, + "render_reasoning", + return_value=fake_learning_systems_reasoning, + ), + "cut.learning_systems.render_reasoning", + ) + mock_manager.attach_mock( + mocker.patch.object(fake_planning_systems, "update"), + "cut.planning_systems.update", + ) + mock_manager.attach_mock( + mocker.patch.object( + fake_planning_systems, + "render_reasoning", + return_value=fake_planning_systems_reasoning, + ), + "cut.planning_systems.render_reasoning", + ) + mock_manager.attach_mock( + mocker.patch.object( + fake_complex_reasoning_systems, + "update_and_render_reasoning", + return_value=expected_result, + ), + "cut.complex_reasoning_systems.update_and_render_reasoning", + ) # mocker.patch.object(fake_learning_systems, 'render_reasoning', return_value=fake_learning_systems_reasoning) # mocker.patch.object(fake_planning_systems, 'render_reasoning', return_value=fake_planning_systems_reasoning) @@ -110,19 +177,26 @@ def test_Agent_reason_updates_vehicle_rep_with_given_frame_learners_with_frame_a # Assert result = expected_result - #TODO: using expected_aggregate_to_complex is incorrect, appears to maybe be an issue with MagicMock somehow + # TODO: using expected_aggregate_to_complex is incorrect, appears to maybe be an issue with MagicMock somehow # problem is its always the same object, that gets updated during the function, unfortunately it only saves the object # not a "snapshot" of what the object was at the time, so each recorded call thinks it got the object which it did, but the state is wrong # side_effect could be used to save the true values, but research better options - mock_manager.assert_has_calls([ - mocker.call.cut.vehicle_rep.update(arg_frame), - mocker.call.cut.vehicle_rep.get_state_information(), - mocker.call.cut.learning_systems.update(fake_vehicle_rep.curr_data, expected_aggregate_to_complex), - mocker.call.cut.learning_systems.render_reasoning(), - mocker.call.cut.planning_systems.update(expected_aggregate_to_complex), - mocker.call.cut.planning_systems.render_reasoning(), - mocker.call.cut.complex_reasoning_systems.update_and_render_reasoning(expected_aggregate_to_complex), - ], any_order=False) + mock_manager.assert_has_calls( + [ + mocker.call.cut.vehicle_rep.update(arg_frame), + mocker.call.cut.vehicle_rep.get_state_information(), + mocker.call.cut.learning_systems.update( + fake_vehicle_rep.curr_data, expected_aggregate_to_complex + ), + mocker.call.cut.learning_systems.render_reasoning(), + mocker.call.cut.planning_systems.update(expected_aggregate_to_complex), + mocker.call.cut.planning_systems.render_reasoning(), + mocker.call.cut.complex_reasoning_systems.update_and_render_reasoning( + expected_aggregate_to_complex + ), + ], + any_order=False, + ) # diagnose tests diff --git a/test/onair/src/reasoning/test_complex_resoning_interface.py b/test/onair/src/reasoning/test_complex_resoning_interface.py index 7ac10040..f14b1277 100644 --- a/test/onair/src/reasoning/test_complex_resoning_interface.py +++ b/test/onair/src/reasoning/test_complex_resoning_interface.py @@ -14,6 +14,7 @@ import onair.src.reasoning.complex_reasoning_interface as complex_reasoning_interface from onair.src.reasoning.complex_reasoning_interface import ComplexReasoningInterface + # __init__ tests def test_ComplexReasoningInterface__init__raises_AssertionError_when_given_headers_len_is_0(): # Arrange @@ -28,9 +29,12 @@ def test_ComplexReasoningInterface__init__raises_AssertionError_when_given_heade cut.__init__(arg_headers) # Assert - assert e_info.match('Headers are required') + assert e_info.match("Headers are required") + -def test_ComplexReasoningInterface__init__sets_self_headers_to_given_headers_and_sets_self_reasoning_constructs_to_return_value_of_import_plugins(mocker): +def test_ComplexReasoningInterface__init__sets_self_headers_to_given_headers_and_sets_self_reasoning_constructs_to_return_value_of_import_plugins( + mocker, +): # Arrange arg_headers = MagicMock() arg__reasoning_plugins = MagicMock() @@ -39,8 +43,10 @@ def test_ComplexReasoningInterface__init__sets_self_headers_to_given_headers_and forced_return_reasoning_constructs = MagicMock() - mocker.patch(complex_reasoning_interface.__name__ + '.import_plugins', return_value=forced_return_reasoning_constructs) - + mocker.patch( + complex_reasoning_interface.__name__ + ".import_plugins", + return_value=forced_return_reasoning_constructs, + ) cut = ComplexReasoningInterface.__new__(ComplexReasoningInterface) @@ -50,17 +56,23 @@ def test_ComplexReasoningInterface__init__sets_self_headers_to_given_headers_and # Assert assert cut.headers == arg_headers assert complex_reasoning_interface.import_plugins.call_count == 1 - assert complex_reasoning_interface.import_plugins.call_args_list[0].args == (arg_headers, arg__reasoning_plugins) + assert complex_reasoning_interface.import_plugins.call_args_list[0].args == ( + arg_headers, + arg__reasoning_plugins, + ) assert cut.reasoning_constructs == forced_return_reasoning_constructs + # update_and_render_reasoning -def test_ComplexReasoningInterface_update_and_render_reasoning_returns_given_high_level_data_with_complex_systems_as_empty_dict_when_no_reasoning_constructs(mocker): +def test_ComplexReasoningInterface_update_and_render_reasoning_returns_given_high_level_data_with_complex_systems_as_empty_dict_when_no_reasoning_constructs( + mocker, +): # Arrange - fake_high_level_key = MagicMock(name='fake_high_level_key') - fake_high_level_value = MagicMock(name='fake_high_level_value') - arg_high_level_data = {fake_high_level_key:fake_high_level_value} + fake_high_level_key = MagicMock(name="fake_high_level_key") + fake_high_level_value = MagicMock(name="fake_high_level_value") + arg_high_level_data = {fake_high_level_key: fake_high_level_value} expected_result = arg_high_level_data - expected_result.update({'complex_systems':{}}) + expected_result.update({"complex_systems": {}}) cut = ComplexReasoningInterface.__new__(ComplexReasoningInterface) cut.reasoning_constructs = [] @@ -71,23 +83,30 @@ def test_ComplexReasoningInterface_update_and_render_reasoning_returns_given_hig # Assert assert result == expected_result -def test_ComplexReasoningInterface_update_and_render_reasoning_invokes_on_all_reasoning_constructs_then_returns_their_results_added_to_the_hgih_level_data(mocker): + +def test_ComplexReasoningInterface_update_and_render_reasoning_invokes_on_all_reasoning_constructs_then_returns_their_results_added_to_the_hgih_level_data( + mocker, +): # Arrange - fake_high_level_key = MagicMock(name='fake_high_level_key') - fake_high_level_value = MagicMock(name='fake_high_level_value') - arg_high_level_data = {fake_high_level_key:fake_high_level_value} + fake_high_level_key = MagicMock(name="fake_high_level_key") + fake_high_level_value = MagicMock(name="fake_high_level_value") + arg_high_level_data = {fake_high_level_key: fake_high_level_value} expected_result = arg_high_level_data - expected_result.update({'complex_systems':{}}) + expected_result.update({"complex_systems": {}}) cut = ComplexReasoningInterface.__new__(ComplexReasoningInterface) cut.reasoning_constructs = [] for i in range(0, pytest.gen.randint(1, 10)): cut.reasoning_constructs.append(MagicMock(name=f"fake_plugin_{i}")) cut.reasoning_constructs[-1].component_name = f"fake_plugin_{i}" - mocker.patch.object(cut.reasoning_constructs[-1], 'update') + mocker.patch.object(cut.reasoning_constructs[-1], "update") rv = f"{i}" - mocker.patch.object(cut.reasoning_constructs[-1], 'render_reasoning', return_value=rv) - expected_result['complex_systems'].update({cut.reasoning_constructs[-1].component_name : rv}) + mocker.patch.object( + cut.reasoning_constructs[-1], "render_reasoning", return_value=rv + ) + expected_result["complex_systems"].update( + {cut.reasoning_constructs[-1].component_name: rv} + ) # Act result = cut.update_and_render_reasoning(arg_high_level_data) @@ -96,6 +115,7 @@ def test_ComplexReasoningInterface_update_and_render_reasoning_invokes_on_all_re assert result == expected_result assert cut.reasoning_constructs[0].update.call_count == 1 + # check_for_salient_event tests def test_ComplexReasoningInterface_salient_event_does_nothing(): # Arrange diff --git a/test/onair/src/reasoning/test_diagnosis.py b/test/onair/src/reasoning/test_diagnosis.py index d0078d7d..c058a1b6 100644 --- a/test/onair/src/reasoning/test_diagnosis.py +++ b/test/onair/src/reasoning/test_diagnosis.py @@ -14,6 +14,7 @@ import onair.src.reasoning.diagnosis as diagnosis from onair.src.reasoning.diagnosis import Diagnosis + # __init__ tests def test_Diagnosis__init__initializes_all_attributes_to_expected_values_when_arg_learning_system_results_is_empty_dict(): # Assert @@ -26,11 +27,13 @@ def test_Diagnosis__init__initializes_all_attributes_to_expected_values_when_arg cut = Diagnosis.__new__(Diagnosis) # Act - result = cut.__init__(fake_timestep, - fake_learning_system_results, - fake_status_confidence, - fake_currently_faulting_mnemonics, - fake_ground_truth) + result = cut.__init__( + fake_timestep, + fake_learning_system_results, + fake_status_confidence, + fake_currently_faulting_mnemonics, + fake_ground_truth, + ) # Assert assert cut.time_step == fake_timestep @@ -41,11 +44,14 @@ def test_Diagnosis__init__initializes_all_attributes_to_expected_values_when_arg assert cut.has_kalman == False assert cut.kalman_results == None + def test_Diagnosis__init__initializes_all_attributes_to_expected_values_when_arg_learning_system_results_is_non_empty_and_does_not_contain_kalman_plugin(): # Arrange fake_timestep = MagicMock() fake_learning_system_results = {} - num_learning_system_results = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + num_learning_system_results = pytest.gen.randint( + 1, 10 + ) # arbitrary, random int from 1 to 10 for i in range(num_learning_system_results): fake_learning_system_results[MagicMock()] = MagicMock() fake_status_confidence = MagicMock() @@ -55,11 +61,13 @@ def test_Diagnosis__init__initializes_all_attributes_to_expected_values_when_arg cut = Diagnosis.__new__(Diagnosis) # Act - result = cut.__init__(fake_timestep, - fake_learning_system_results, - fake_status_confidence, - fake_currently_faulting_mnemonics, - fake_ground_truth) + result = cut.__init__( + fake_timestep, + fake_learning_system_results, + fake_status_confidence, + fake_currently_faulting_mnemonics, + fake_ground_truth, + ) # Assert assert cut.time_step == fake_timestep @@ -70,15 +78,18 @@ def test_Diagnosis__init__initializes_all_attributes_to_expected_values_when_arg assert cut.has_kalman == False assert cut.kalman_results == None + def test_Diagnosis__init__initializes_all_attributes_to_expected_values_when_arg_learning_system_results_is_non_empty_and_contains_kalman_plugin(): # Arrange fake_timestep = MagicMock() fake_learning_system_results = {} - num_learning_system_results = pytest.gen.randint(0, 10) # arbitrary, random int from 0 to 10 + num_learning_system_results = pytest.gen.randint( + 0, 10 + ) # arbitrary, random int from 0 to 10 for i in range(num_learning_system_results): fake_learning_system_results[MagicMock()] = MagicMock() fake_kalman_results = MagicMock() - fake_learning_system_results['kalman'] = fake_kalman_results + fake_learning_system_results["kalman"] = fake_kalman_results fake_status_confidence = MagicMock() fake_currently_faulting_mnemonics = MagicMock() fake_ground_truth = MagicMock() @@ -86,11 +97,13 @@ def test_Diagnosis__init__initializes_all_attributes_to_expected_values_when_arg cut = Diagnosis.__new__(Diagnosis) # Act - result = cut.__init__(fake_timestep, - fake_learning_system_results, - fake_status_confidence, - fake_currently_faulting_mnemonics, - fake_ground_truth) + result = cut.__init__( + fake_timestep, + fake_learning_system_results, + fake_status_confidence, + fake_currently_faulting_mnemonics, + fake_ground_truth, + ) # Assert assert cut.time_step == fake_timestep @@ -101,6 +114,7 @@ def test_Diagnosis__init__initializes_all_attributes_to_expected_values_when_arg assert cut.has_kalman == True assert cut.kalman_results == fake_kalman_results + # perform_diagnosis tests def test_Diagnosis_perform_diagnosis_returns_empty_Dict_when_has_kalman_is_False(): # Arrange @@ -114,7 +128,10 @@ def test_Diagnosis_perform_diagnosis_returns_empty_Dict_when_has_kalman_is_False assert type(result) == dict assert result == {} -def test_Diagnosis_perform_diagnosis_returns_dict_of_str_top_and_walkdown_of_random_mnemonic_when_has_kalman_is_True(mocker): + +def test_Diagnosis_perform_diagnosis_returns_dict_of_str_top_and_walkdown_of_random_mnemonic_when_has_kalman_is_True( + mocker, +): # Arrange fake_kalman_results = MagicMock() @@ -125,32 +142,40 @@ def test_Diagnosis_perform_diagnosis_returns_dict_of_str_top_and_walkdown_of_ran forced_list_return_value = MagicMock() forced_random_choice_return_value = MagicMock() forced_walkdown_return_value = MagicMock() - mocker.patch(diagnosis.__name__ + '.list', return_value=forced_list_return_value) - mocker.patch(diagnosis.__name__ + '.random.choice', return_value=forced_random_choice_return_value) - mocker.patch.object(cut, 'walkdown', return_value=forced_walkdown_return_value) + mocker.patch(diagnosis.__name__ + ".list", return_value=forced_list_return_value) + mocker.patch( + diagnosis.__name__ + ".random.choice", + return_value=forced_random_choice_return_value, + ) + mocker.patch.object(cut, "walkdown", return_value=forced_walkdown_return_value) # Act result = cut.perform_diagnosis() # Assert assert type(result) == dict - assert result == {'top' : forced_walkdown_return_value} + assert result == {"top": forced_walkdown_return_value} assert diagnosis.list.call_count == 1 - assert diagnosis.list.call_args_list[0].args == (fake_kalman_results[0], ) + assert diagnosis.list.call_args_list[0].args == (fake_kalman_results[0],) assert diagnosis.random.choice.call_count == 1 - assert diagnosis.random.choice.call_args_list[0].args == (forced_list_return_value, ) + assert diagnosis.random.choice.call_args_list[0].args == (forced_list_return_value,) assert cut.walkdown.call_count == 1 - assert cut.walkdown.call_args_list[0].args == (forced_random_choice_return_value, ) + assert cut.walkdown.call_args_list[0].args == (forced_random_choice_return_value,) + # walkdown tests -def test_Diagnosis_walkdown_returns_expected_value_and_does_not_call_copy_deepcopy_function_when_used_mnemonics_is_not_empty_and_mnemonic_name_is_not_blank_and_has_kalman_is_True_and_kalman_results_does_not_contain_mnemonic_name(mocker): +def test_Diagnosis_walkdown_returns_expected_value_and_does_not_call_copy_deepcopy_function_when_used_mnemonics_is_not_empty_and_mnemonic_name_is_not_blank_and_has_kalman_is_True_and_kalman_results_does_not_contain_mnemonic_name( + mocker, +): # Arrange arg_mnemonic_name = str(MagicMock()) - num_used_mnemonics = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + num_used_mnemonics = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 arg_used_mnemonics = [MagicMock()] * num_used_mnemonics - fake_kalman_results = [MagicMock()] * pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 - len_fake_list = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + fake_kalman_results = [MagicMock()] * pytest.gen.randint( + 1, 10 + ) # arbitrary, random int from 1 to 10 + len_fake_list = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 fake_kalman_results[0] = [MagicMock()] * len_fake_list expected_result = fake_kalman_results[0][0] @@ -159,7 +184,7 @@ def test_Diagnosis_walkdown_returns_expected_value_and_does_not_call_copy_deepco cut.has_kalman = True cut.kalman_results = fake_kalman_results - mocker.patch(diagnosis.__name__ + '.copy.deepcopy') + mocker.patch(diagnosis.__name__ + ".copy.deepcopy") # Act result = cut.walkdown(arg_mnemonic_name, arg_used_mnemonics) @@ -168,13 +193,18 @@ def test_Diagnosis_walkdown_returns_expected_value_and_does_not_call_copy_deepco assert result == expected_result assert diagnosis.copy.deepcopy.call_count == 0 -def test_Diagnosis_walkdown_returns_expected_value_and_calls_copy_deepcopy_function_when_used_mnemonics_is_empty_and_mnemonic_name_is_not_blank_and_has_kalman_is_True_and_kalman_results_does_not_contain_mnemonic_name(mocker): + +def test_Diagnosis_walkdown_returns_expected_value_and_calls_copy_deepcopy_function_when_used_mnemonics_is_empty_and_mnemonic_name_is_not_blank_and_has_kalman_is_True_and_kalman_results_does_not_contain_mnemonic_name( + mocker, +): # Arrange arg_mnemonic_name = str(MagicMock()) arg_used_mnemonics = [] - fake_kalman_results = [MagicMock()] * pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 - len_fake_list = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + fake_kalman_results = [MagicMock()] * pytest.gen.randint( + 1, 10 + ) # arbitrary, random int from 1 to 10 + len_fake_list = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 fake_kalman_results[0] = [MagicMock()] * len_fake_list fake_currently_faulting_mnemonics = MagicMock() @@ -185,7 +215,7 @@ def test_Diagnosis_walkdown_returns_expected_value_and_calls_copy_deepcopy_funct cut.has_kalman = True cut.kalman_results = fake_kalman_results - mocker.patch(diagnosis.__name__ + '.copy.deepcopy') + mocker.patch(diagnosis.__name__ + ".copy.deepcopy") # Act result = cut.walkdown(arg_mnemonic_name, arg_used_mnemonics) @@ -193,26 +223,35 @@ def test_Diagnosis_walkdown_returns_expected_value_and_calls_copy_deepcopy_funct # Assert assert result == expected_result assert diagnosis.copy.deepcopy.call_count == 1 - assert diagnosis.copy.deepcopy.call_args_list[0].args == (fake_currently_faulting_mnemonics, ) + assert diagnosis.copy.deepcopy.call_args_list[0].args == ( + fake_currently_faulting_mnemonics, + ) + -def test_Diagnosis_walkdown_returns_NO_DIAGNOSIS_when_mnemonic_name_is_not_blank_and_has_kalman_is_True_and_kalman_results_contains_mnemonic_name(mocker): +def test_Diagnosis_walkdown_returns_NO_DIAGNOSIS_when_mnemonic_name_is_not_blank_and_has_kalman_is_True_and_kalman_results_contains_mnemonic_name( + mocker, +): # Arrange arg_mnemonic_name = str(MagicMock()) - num_used_mnemonics = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + num_used_mnemonics = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 arg_used_mnemonics = [MagicMock()] * num_used_mnemonics - fake_kalman_results = [MagicMock()] * pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 - len_fake_list = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + fake_kalman_results = [MagicMock()] * pytest.gen.randint( + 1, 10 + ) # arbitrary, random int from 1 to 10 + len_fake_list = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 fake_kalman_results[0] = [MagicMock()] * len_fake_list - rand_name_index = pytest.gen.randint(0, len_fake_list - 1) # random index in fake_kalman_results[0] + rand_name_index = pytest.gen.randint( + 0, len_fake_list - 1 + ) # random index in fake_kalman_results[0] fake_kalman_results[0][rand_name_index] = arg_mnemonic_name cut = Diagnosis.__new__(Diagnosis) cut.has_kalman = True cut.kalman_results = fake_kalman_results - mocker.patch(diagnosis.__name__ + '.copy.deepcopy') + mocker.patch(diagnosis.__name__ + ".copy.deepcopy") # Act result = cut.walkdown(arg_mnemonic_name, arg_used_mnemonics) @@ -221,21 +260,26 @@ def test_Diagnosis_walkdown_returns_NO_DIAGNOSIS_when_mnemonic_name_is_not_blank assert result == Diagnosis.NO_DIAGNOSIS assert diagnosis.copy.deepcopy.call_count == 0 -def test_Diagnosis_walkdown_returns_NO_DIAGNOSIS_when_mnemonic_name_is_not_blank_and_has_kalman_is_False_and_kalman_results_does_not_contain_mnemonic_name(mocker): + +def test_Diagnosis_walkdown_returns_NO_DIAGNOSIS_when_mnemonic_name_is_not_blank_and_has_kalman_is_False_and_kalman_results_does_not_contain_mnemonic_name( + mocker, +): # Arrange arg_mnemonic_name = str(MagicMock()) - num_used_mnemonics = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + num_used_mnemonics = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 arg_used_mnemonics = [MagicMock()] * num_used_mnemonics - fake_kalman_results = [MagicMock()] * pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 - len_fake_list = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + fake_kalman_results = [MagicMock()] * pytest.gen.randint( + 1, 10 + ) # arbitrary, random int from 1 to 10 + len_fake_list = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 fake_kalman_results[0] = [MagicMock()] * len_fake_list cut = Diagnosis.__new__(Diagnosis) cut.has_kalman = False cut.kalman_results = fake_kalman_results - mocker.patch(diagnosis.__name__ + '.copy.deepcopy') + mocker.patch(diagnosis.__name__ + ".copy.deepcopy") # Act result = cut.walkdown(arg_mnemonic_name, arg_used_mnemonics) @@ -244,21 +288,26 @@ def test_Diagnosis_walkdown_returns_NO_DIAGNOSIS_when_mnemonic_name_is_not_blank assert result == Diagnosis.NO_DIAGNOSIS assert diagnosis.copy.deepcopy.call_count == 0 -def test_Diagnosis_walkdown_returns_NO_DIAGNOSIS_when_mnemonic_name_is_blank_and_has_kalman_is_True_and_kalman_results_does_not_contain_mnemonic_name(mocker): + +def test_Diagnosis_walkdown_returns_NO_DIAGNOSIS_when_mnemonic_name_is_blank_and_has_kalman_is_True_and_kalman_results_does_not_contain_mnemonic_name( + mocker, +): # Arrange - arg_mnemonic_name = '' - num_used_mnemonics = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + arg_mnemonic_name = "" + num_used_mnemonics = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 arg_used_mnemonics = [MagicMock()] * num_used_mnemonics - fake_kalman_results = [MagicMock()] * pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 - len_fake_list = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + fake_kalman_results = [MagicMock()] * pytest.gen.randint( + 1, 10 + ) # arbitrary, random int from 1 to 10 + len_fake_list = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 fake_kalman_results[0] = [MagicMock()] * len_fake_list cut = Diagnosis.__new__(Diagnosis) cut.has_kalman = True cut.kalman_results = fake_kalman_results - mocker.patch(diagnosis.__name__ + '.copy.deepcopy') + mocker.patch(diagnosis.__name__ + ".copy.deepcopy") # Act result = cut.walkdown(arg_mnemonic_name, arg_used_mnemonics) diff --git a/test/onair/src/run_scripts/test_execution_engine.py b/test/onair/src/run_scripts/test_execution_engine.py index 2baa4ebb..d5555b2f 100644 --- a/test/onair/src/run_scripts/test_execution_engine.py +++ b/test/onair/src/run_scripts/test_execution_engine.py @@ -17,18 +17,20 @@ # __init__ tests -def test_ExecutionEngine__init__sets_expected_values_but_does_no_calls_when_config_file_is_empty_string(mocker): +def test_ExecutionEngine__init__sets_expected_values_but_does_no_calls_when_config_file_is_empty_string( + mocker, +): # Arrange - arg_config_file = '' + arg_config_file = "" arg_run_name = MagicMock() arg_save_flag = MagicMock() cut = ExecutionEngine.__new__(ExecutionEngine) - mocker.patch.object(cut, 'init_save_paths') - mocker.patch.object(cut, 'parse_configs') - mocker.patch.object(cut, 'parse_data') - mocker.patch.object(cut, 'setup_sim') + mocker.patch.object(cut, "init_save_paths") + mocker.patch.object(cut, "parse_configs") + mocker.patch.object(cut, "parse_data") + mocker.patch.object(cut, "setup_sim") # Act cut.__init__(arg_config_file, arg_run_name, arg_save_flag) @@ -36,13 +38,13 @@ def test_ExecutionEngine__init__sets_expected_values_but_does_no_calls_when_conf # Assert assert cut.run_name == arg_run_name assert cut.IO_Enabled == False - assert cut.dataFilePath == '' - assert cut.telemetryFile == '' - assert cut.fullTelemetryFile == '' - assert cut.metadataFilePath == '' - assert cut.metaFile == '' - assert cut.fullMetaFile == '' - assert cut.data_source_file == '' + assert cut.dataFilePath == "" + assert cut.telemetryFile == "" + assert cut.fullTelemetryFile == "" + assert cut.metadataFilePath == "" + assert cut.metaFile == "" + assert cut.fullMetaFile == "" + assert cut.data_source_file == "" assert cut.simDataSource == None assert cut.sim == None assert cut.save_flag == arg_save_flag @@ -53,7 +55,9 @@ def test_ExecutionEngine__init__sets_expected_values_but_does_no_calls_when_conf assert cut.setup_sim.call_count == 0 -def test_ExecutionEngine__init__does_calls_when_config_file_is_an_occupied_string(mocker): +def test_ExecutionEngine__init__does_calls_when_config_file_is_an_occupied_string( + mocker, +): # Arrange arg_config_file = str(MagicMock()) arg_run_name = MagicMock() @@ -61,10 +65,10 @@ def test_ExecutionEngine__init__does_calls_when_config_file_is_an_occupied_strin cut = ExecutionEngine.__new__(ExecutionEngine) - mocker.patch.object(cut, 'init_save_paths') - mocker.patch.object(cut, 'parse_configs') - mocker.patch.object(cut, 'parse_data') - mocker.patch.object(cut, 'setup_sim') + mocker.patch.object(cut, "init_save_paths") + mocker.patch.object(cut, "parse_configs") + mocker.patch.object(cut, "parse_data") + mocker.patch.object(cut, "setup_sim") # Act cut.__init__(arg_config_file, arg_run_name, arg_save_flag) @@ -72,35 +76,43 @@ def test_ExecutionEngine__init__does_calls_when_config_file_is_an_occupied_strin # Assert assert cut.init_save_paths.call_count == 1 assert cut.parse_configs.call_count == 1 - assert cut.parse_configs.call_args_list[0].args == (arg_config_file, ) + assert cut.parse_configs.call_args_list[0].args == (arg_config_file,) assert cut.parse_data.call_count == 1 assert cut.parse_data.call_args_list[0].args == ( - cut.data_source_file, cut.dataFilePath, cut.metadataFilePath, ) + cut.data_source_file, + cut.dataFilePath, + cut.metadataFilePath, + ) assert cut.setup_sim.call_count == 1 -def test_ExecutionEngine__init__accepts_no_arguments_using_defaults_instead_with_config_file_default_as_empty_string(mocker): +def test_ExecutionEngine__init__accepts_no_arguments_using_defaults_instead_with_config_file_default_as_empty_string( + mocker, +): # Arrange cut = ExecutionEngine.__new__(ExecutionEngine) - mocker.patch.object(cut, 'init_save_paths') - mocker.patch.object(cut, 'parse_configs') - mocker.patch.object(cut, 'parse_data') - mocker.patch.object(cut, 'setup_sim') + mocker.patch.object(cut, "init_save_paths") + mocker.patch.object(cut, "parse_configs") + mocker.patch.object(cut, "parse_data") + mocker.patch.object(cut, "setup_sim") # Act cut.__init__() # Assert - assert cut.run_name == '' + assert cut.run_name == "" assert cut.save_flag == False - assert cut.save_name == '' + assert cut.save_name == "" assert cut.init_save_paths.call_count == 0 + # parse_configs tests -def test_ExecutionEngine_parse_configs_raises_FileNotFoundError_when_config_cannot_be_read(mocker): +def test_ExecutionEngine_parse_configs_raises_FileNotFoundError_when_config_cannot_be_read( + mocker, +): # Arrange arg_bad_config_filepath = MagicMock() @@ -110,29 +122,33 @@ def test_ExecutionEngine_parse_configs_raises_FileNotFoundError_when_config_cann cut = ExecutionEngine.__new__(ExecutionEngine) - mocker.patch(execution_engine.__name__ + - '.configparser.ConfigParser', return_value=fake_config) - mocker.patch.object(fake_config, 'read', - return_value=fake_config_read_result) + mocker.patch( + execution_engine.__name__ + ".configparser.ConfigParser", + return_value=fake_config, + ) + mocker.patch.object(fake_config, "read", return_value=fake_config_read_result) # Act with pytest.raises(FileNotFoundError) as e_info: cut.parse_configs(arg_bad_config_filepath) # Assert - assert e_info.match(f"Config file at '{ - arg_bad_config_filepath}' could not be read.") + assert e_info.match( + f"Config file at '{arg_bad_config_filepath}' could not be read." + ) -def test_ExecutionEngine_parse_configs_raises_KeyError_with_config_file_info_when_the_required_key_FILES_is_not_in_config(mocker): +def test_ExecutionEngine_parse_configs_raises_KeyError_with_config_file_info_when_the_required_key_FILES_is_not_in_config( + mocker, +): # Arrange arg_config_filepath = MagicMock() - missing_key = 'FILES' + missing_key = "FILES" fake_dict_for_Config = { "DATA_HANDLING": MagicMock(), "PLUGINS": MagicMock(), - "OPTIONS": MagicMock() + "OPTIONS": MagicMock(), } fake_config = MagicMock() fake_config.__getitem__.side_effect = fake_dict_for_Config.__getitem__ @@ -141,11 +157,12 @@ def test_ExecutionEngine_parse_configs_raises_KeyError_with_config_file_info_whe cut = ExecutionEngine.__new__(ExecutionEngine) - mocker.patch(execution_engine.__name__ + - '.configparser.ConfigParser', return_value=fake_config) - mocker.patch.object(fake_config, 'read', - return_value=fake_config_read_result) - mocker.patch.object(cut, 'parse_plugins_dict', return_value=None) + mocker.patch( + execution_engine.__name__ + ".configparser.ConfigParser", + return_value=fake_config, + ) + mocker.patch.object(fake_config, "read", return_value=fake_config_read_result) + mocker.patch.object(cut, "parse_plugins_dict", return_value=None) # Act with pytest.raises(KeyError) as e_info: @@ -153,27 +170,28 @@ def test_ExecutionEngine_parse_configs_raises_KeyError_with_config_file_info_whe # Assert assert e_info.match( - f"Config file: '{arg_config_filepath}', missing key: {missing_key}") + f"Config file: '{arg_config_filepath}', missing key: {missing_key}" + ) -def test_ExecutionEngine_parse_configs_raises_KeyError_with_config_file_info_when_a_required_FILES_subkey_is_not_in_config(mocker): +def test_ExecutionEngine_parse_configs_raises_KeyError_with_config_file_info_when_a_required_FILES_subkey_is_not_in_config( + mocker, +): # Arrange arg_config_filepath = MagicMock() fake_files = { - 'TelemetryFilePath': MagicMock(), - 'TelemetryFile': MagicMock(), - 'MetaFilePath': MagicMock(), - 'MetaFile': MagicMock() - } - fake_data_handling = { - 'DataSourceFile': MagicMock() + "TelemetryFilePath": MagicMock(), + "TelemetryFile": MagicMock(), + "MetaFilePath": MagicMock(), + "MetaFile": MagicMock(), } + fake_data_handling = {"DataSourceFile": MagicMock()} fake_plugins = { - 'KnowledgeRepPluginDict': "{fake_name:fake_path}", - 'LearnersPluginDict': "{fake_name:fake_path}", - 'PlannersPluginDict': "{fake_name:fake_path}", - 'ComplexPluginDict': "{fake_name:fake_path}" + "KnowledgeRepPluginDict": "{fake_name:fake_path}", + "LearnersPluginDict": "{fake_name:fake_path}", + "PlannersPluginDict": "{fake_name:fake_path}", + "ComplexPluginDict": "{fake_name:fake_path}", } required_keys = [item for item in list(fake_files.keys())] missing_key = pytest.gen.choice(required_keys) @@ -183,7 +201,7 @@ def test_ExecutionEngine_parse_configs_raises_KeyError_with_config_file_info_whe "FILES": fake_files, "DATA_HANDLING": fake_data_handling, "PLUGINS": fake_plugins, - "OPTIONS": fake_options + "OPTIONS": fake_options, } fake_config = MagicMock() fake_config.__getitem__.side_effect = fake_dict_for_Config.__getitem__ @@ -192,11 +210,12 @@ def test_ExecutionEngine_parse_configs_raises_KeyError_with_config_file_info_whe cut = ExecutionEngine.__new__(ExecutionEngine) - mocker.patch(execution_engine.__name__ + - '.configparser.ConfigParser', return_value=fake_config) - mocker.patch.object(fake_config, 'read', - return_value=fake_config_read_result) - mocker.patch.object(cut, 'parse_plugins_dict', return_value=None) + mocker.patch( + execution_engine.__name__ + ".configparser.ConfigParser", + return_value=fake_config, + ) + mocker.patch.object(fake_config, "read", return_value=fake_config_read_result) + mocker.patch.object(cut, "parse_plugins_dict", return_value=None) # Act with pytest.raises(KeyError) as e_info: @@ -204,26 +223,27 @@ def test_ExecutionEngine_parse_configs_raises_KeyError_with_config_file_info_whe # Assert assert e_info.match( - f"Config file: '{arg_config_filepath}', missing key: {missing_key}") + f"Config file: '{arg_config_filepath}', missing key: {missing_key}" + ) -def test_ExecutionEngine_parse_configs_skips_OPTIONS_when_the_required_section_OPTIONS_is_not_in_config(mocker): +def test_ExecutionEngine_parse_configs_skips_OPTIONS_when_the_required_section_OPTIONS_is_not_in_config( + mocker, +): # Arrange arg_config_filepath = MagicMock() fake_files = { - 'TelemetryFilePath': MagicMock(), - 'TelemetryFile': MagicMock(), - 'MetaFilePath': MagicMock(), - 'MetaFile': MagicMock() - } - fake_data_handling = { - 'DataSourceFile': MagicMock() + "TelemetryFilePath": MagicMock(), + "TelemetryFile": MagicMock(), + "MetaFilePath": MagicMock(), + "MetaFile": MagicMock(), } + fake_data_handling = {"DataSourceFile": MagicMock()} fake_plugins = { - 'KnowledgeRepPluginDict': "{fake_name:fake_path}", - 'LearnersPluginDict': "{fake_name:fake_path}", - 'PlannersPluginDict': "{fake_name:fake_path}", - 'ComplexPluginDict': "{fake_name:fake_path}" + "KnowledgeRepPluginDict": "{fake_name:fake_path}", + "LearnersPluginDict": "{fake_name:fake_path}", + "PlannersPluginDict": "{fake_name:fake_path}", + "ComplexPluginDict": "{fake_name:fake_path}", } fake_plugin_dict = MagicMock() fake_plugin_dict.body = MagicMock() @@ -232,7 +252,7 @@ def test_ExecutionEngine_parse_configs_skips_OPTIONS_when_the_required_section_O fake_dict_for_Config = { "FILES": fake_files, "DATA_HANDLING": fake_data_handling, - "PLUGINS": fake_plugins + "PLUGINS": fake_plugins, } fake_config = MagicMock() fake_config.__getitem__.side_effect = fake_dict_for_Config.__getitem__ @@ -242,13 +262,12 @@ def test_ExecutionEngine_parse_configs_skips_OPTIONS_when_the_required_section_O fake_learners_plugin_list = MagicMock() fake_planners_plugin_list = MagicMock() fake_complex_plugin_list = MagicMock() - fake_plugins = [fake_knowledge_rep_plugin_list, - fake_learners_plugin_list, - fake_planners_plugin_list, - fake_complex_plugin_list] - fake_IO_enabled = MagicMock() - fake_Dev_enabled = MagicMock() - fake_Viz_enabled = MagicMock() + fake_plugins = [ + fake_knowledge_rep_plugin_list, + fake_learners_plugin_list, + fake_planners_plugin_list, + fake_complex_plugin_list, + ] fake_plugin_dict = MagicMock() fake_keys = MagicMock() fake_plugin = MagicMock() @@ -257,20 +276,19 @@ def test_ExecutionEngine_parse_configs_skips_OPTIONS_when_the_required_section_O fake_keys.__len__.return_value = 1 fake_keys.__iter__.return_value = iter([str(fake_plugin)]) - cut = ExecutionEngine.__new__(ExecutionEngine) + cut = ExecutionEngine() - mocker.patch(execution_engine.__name__ + - '.configparser.ConfigParser', return_value=fake_config) - mocker.patch.object(fake_config, 'read', - return_value=fake_config_read_result) + mocker.patch( + execution_engine.__name__ + ".configparser.ConfigParser", + return_value=fake_config, + ) + mocker.patch.object(fake_config, "read", return_value=fake_config_read_result) mocker.patch.object(fake_config, "has_section", return_value=False) - mocker.patch.object(cut, 'parse_plugins_dict', side_effect=fake_plugins) - mocker.patch(execution_engine.__name__ + '.isinstance', return_value=True) - mocker.patch(execution_engine.__name__ + - '.os.path.exists', return_value=True) - mocker.patch.object(fake_plugin_dict, 'keys', return_value=fake_keys) - mocker.patch.object(fake_plugin_dict, '__getitem__', - return_value=fake_path) + mocker.patch.object(cut, "parse_plugins_dict", side_effect=fake_plugins) + mocker.patch(execution_engine.__name__ + ".isinstance", return_value=True) + mocker.patch(execution_engine.__name__ + ".os.path.exists", return_value=True) + mocker.patch.object(fake_plugin_dict, "keys", return_value=fake_keys) + mocker.patch.object(fake_plugin_dict, "__getitem__", return_value=fake_path) # Act cut.parse_configs(arg_config_filepath) @@ -278,11 +296,11 @@ def test_ExecutionEngine_parse_configs_skips_OPTIONS_when_the_required_section_O # Assert assert execution_engine.configparser.ConfigParser.call_count == 1 assert fake_config.read.call_count == 1 - assert cut.dataFilePath == fake_files['TelemetryFilePath'] - assert cut.telemetryFile == fake_files['TelemetryFile'] - assert cut.metadataFilePath == fake_files['MetaFilePath'] - assert cut.metaFile == fake_files['MetaFile'] - assert cut.data_source_file == fake_data_handling['DataSourceFile'] + assert cut.dataFilePath == fake_files["TelemetryFilePath"] + assert cut.telemetryFile == fake_files["TelemetryFile"] + assert cut.metadataFilePath == fake_files["MetaFilePath"] + assert cut.metaFile == fake_files["MetaFile"] + assert cut.data_source_file == fake_data_handling["DataSourceFile"] assert cut.parse_plugins_dict.call_count == 4 assert cut.knowledge_rep_plugin_dict == fake_knowledge_rep_plugin_list assert cut.learners_plugin_dict == fake_learners_plugin_list @@ -295,19 +313,17 @@ def test_ExecutionEngine_parse_configs_sets_all_items_without_error(mocker): # Arrange arg_config_filepath = MagicMock() fake_files = { - 'TelemetryFilePath': MagicMock(), - 'TelemetryFile': MagicMock(), - 'MetaFilePath': MagicMock(), - 'MetaFile': MagicMock() - } - fake_data_handling = { - 'DataSourceFile': MagicMock() + "TelemetryFilePath": MagicMock(), + "TelemetryFile": MagicMock(), + "MetaFilePath": MagicMock(), + "MetaFile": MagicMock(), } + fake_data_handling = {"DataSourceFile": MagicMock()} fake_plugins = { - 'KnowledgeRepPluginDict': "{fake_name:fake_path}", - 'LearnersPluginDict': "{fake_name:fake_path}", - 'PlannersPluginDict': "{fake_name:fake_path}", - 'ComplexPluginDict': "{fake_name:fake_path}" + "KnowledgeRepPluginDict": "{fake_name:fake_path}", + "LearnersPluginDict": "{fake_name:fake_path}", + "PlannersPluginDict": "{fake_name:fake_path}", + "ComplexPluginDict": "{fake_name:fake_path}", } fake_options = MagicMock() fake_plugin_dict = MagicMock() @@ -318,7 +334,7 @@ def test_ExecutionEngine_parse_configs_sets_all_items_without_error(mocker): "FILES": fake_files, "DATA_HANDLING": fake_data_handling, "PLUGINS": fake_plugins, - "OPTIONS": fake_options + "OPTIONS": fake_options, } fake_config = MagicMock() fake_config.__getitem__.side_effect = fake_dict_for_Config.__getitem__ @@ -328,13 +344,13 @@ def test_ExecutionEngine_parse_configs_sets_all_items_without_error(mocker): fake_learners_plugin_list = MagicMock() fake_planners_plugin_list = MagicMock() fake_complex_plugin_list = MagicMock() - fake_plugins = [fake_knowledge_rep_plugin_list, - fake_learners_plugin_list, - fake_planners_plugin_list, - fake_complex_plugin_list] + fake_plugins = [ + fake_knowledge_rep_plugin_list, + fake_learners_plugin_list, + fake_planners_plugin_list, + fake_complex_plugin_list, + ] fake_IO_enabled = MagicMock() - fake_Dev_enabled = MagicMock() - fake_Viz_enabled = MagicMock() fake_plugin_dict = MagicMock() fake_keys = MagicMock() fake_plugin = MagicMock() @@ -345,20 +361,18 @@ def test_ExecutionEngine_parse_configs_sets_all_items_without_error(mocker): cut = ExecutionEngine.__new__(ExecutionEngine) - mocker.patch(execution_engine.__name__ + - '.configparser.ConfigParser', return_value=fake_config) - mocker.patch.object(fake_config, 'read', - return_value=fake_config_read_result) + mocker.patch( + execution_engine.__name__ + ".configparser.ConfigParser", + return_value=fake_config, + ) + mocker.patch.object(fake_config, "read", return_value=fake_config_read_result) mocker.patch.object(fake_config, "has_section", return_value=True) - mocker.patch.object(cut, 'parse_plugins_dict', side_effect=fake_plugins) - mocker.patch.object(fake_options, 'getboolean', - return_value=fake_IO_enabled) - mocker.patch(execution_engine.__name__ + '.isinstance', return_value=True) - mocker.patch(execution_engine.__name__ + - '.os.path.exists', return_value=True) - mocker.patch.object(fake_plugin_dict, 'keys', return_value=fake_keys) - mocker.patch.object(fake_plugin_dict, '__getitem__', - return_value=fake_path) + mocker.patch.object(cut, "parse_plugins_dict", side_effect=fake_plugins) + mocker.patch.object(fake_options, "getboolean", return_value=fake_IO_enabled) + mocker.patch(execution_engine.__name__ + ".isinstance", return_value=True) + mocker.patch(execution_engine.__name__ + ".os.path.exists", return_value=True) + mocker.patch.object(fake_plugin_dict, "keys", return_value=fake_keys) + mocker.patch.object(fake_plugin_dict, "__getitem__", return_value=fake_path) # Act cut.parse_configs(arg_config_filepath) @@ -366,24 +380,27 @@ def test_ExecutionEngine_parse_configs_sets_all_items_without_error(mocker): # Assert assert execution_engine.configparser.ConfigParser.call_count == 1 assert fake_config.read.call_count == 1 - assert cut.dataFilePath == fake_files['TelemetryFilePath'] - assert cut.telemetryFile == fake_files['TelemetryFile'] - assert cut.metadataFilePath == fake_files['MetaFilePath'] - assert cut.metaFile == fake_files['MetaFile'] - assert cut.data_source_file == fake_data_handling['DataSourceFile'] + assert cut.dataFilePath == fake_files["TelemetryFilePath"] + assert cut.telemetryFile == fake_files["TelemetryFile"] + assert cut.metadataFilePath == fake_files["MetaFilePath"] + assert cut.metaFile == fake_files["MetaFile"] + assert cut.data_source_file == fake_data_handling["DataSourceFile"] assert cut.parse_plugins_dict.call_count == 4 assert cut.knowledge_rep_plugin_dict == fake_knowledge_rep_plugin_list assert cut.learners_plugin_dict == fake_learners_plugin_list assert cut.planners_plugin_dict == fake_planners_plugin_list assert cut.complex_plugin_dict == fake_complex_plugin_list assert fake_options.getboolean.call_count == 1 - assert fake_options.getboolean.call_args_list[0].args == ('IO_Enabled', ) + assert fake_options.getboolean.call_args_list[0].args == ("IO_Enabled",) assert cut.IO_Enabled == fake_IO_enabled + # parse_plugins_dict -def test_ExecutionEngine_parse_plugins_list_raises_ValueError_when_config_plugin_dict_is_not_dict(mocker): +def test_ExecutionEngine_parse_plugins_list_raises_ValueError_when_config_plugin_dict_is_not_dict( + mocker, +): # Arrange arg_config_plugin_dict = MagicMock() @@ -394,25 +411,29 @@ def test_ExecutionEngine_parse_plugins_list_raises_ValueError_when_config_plugin cut = ExecutionEngine.__new__(ExecutionEngine) cut.config_filepath = fake_config_filepath - mocker.patch.object(cut, 'ast_parse_eval', return_value=fake_plugin_dict) - mocker.patch(execution_engine.__name__ + '.isinstance', return_value=False) + mocker.patch.object(cut, "ast_parse_eval", return_value=fake_plugin_dict) + mocker.patch(execution_engine.__name__ + ".isinstance", return_value=False) # Act with pytest.raises(ValueError) as e_info: cut.parse_plugins_dict(arg_config_plugin_dict) # Assert - assert e_info.match(f"Plugin dict {arg_config_plugin_dict} from { - fake_config_filepath} is invalid. It must be a dict.") + assert e_info.match( + f"Plugin dict {arg_config_plugin_dict} from {fake_config_filepath} is invalid. It must be a dict." + ) assert cut.ast_parse_eval.call_count == 1 - assert cut.ast_parse_eval.call_args_list[0].args == ( - arg_config_plugin_dict,) + assert cut.ast_parse_eval.call_args_list[0].args == (arg_config_plugin_dict,) assert execution_engine.isinstance.call_count == 1 assert execution_engine.isinstance.call_args_list[0].args == ( - fake_plugin_dict.body, execution_engine.ast.Dict, ) + fake_plugin_dict.body, + execution_engine.ast.Dict, + ) -def test_ExecutionEngine_parse_plugins_list_raises_FileNotFoundError_when_single_config_plugin_dict_key_maps_to_non_existing_file(mocker): +def test_ExecutionEngine_parse_plugins_list_raises_FileNotFoundError_when_single_config_plugin_dict_key_maps_to_non_existing_file( + mocker, +): # Arrange arg_config_plugin_dict = MagicMock() @@ -428,39 +449,43 @@ def test_ExecutionEngine_parse_plugins_list_raises_FileNotFoundError_when_single cut = ExecutionEngine.__new__(ExecutionEngine) cut.config_filepath = fake_config_filepath - mocker.patch.object(cut, 'ast_parse_eval', return_value=fake_plugin_dict) - mocker.patch(execution_engine.__name__ + '.isinstance', return_value=True) - mocker.patch(execution_engine.__name__ + '.ast.literal_eval', - return_value=fake_temp_plugin_dict) - mocker.patch.object(fake_temp_plugin_dict, 'values', - return_value=fake_values) - mocker.patch(execution_engine.__name__ + - '.os.path.exists', return_value=False) + mocker.patch.object(cut, "ast_parse_eval", return_value=fake_plugin_dict) + mocker.patch(execution_engine.__name__ + ".isinstance", return_value=True) + mocker.patch( + execution_engine.__name__ + ".ast.literal_eval", + return_value=fake_temp_plugin_dict, + ) + mocker.patch.object(fake_temp_plugin_dict, "values", return_value=fake_values) + mocker.patch(execution_engine.__name__ + ".os.path.exists", return_value=False) # Act with pytest.raises(FileNotFoundError) as e_info: cut.parse_plugins_dict(arg_config_plugin_dict) # Assert - assert e_info.match(f"In config file '{fake_config_filepath}' Plugin path '{ - fake_path}' does not exist.") + assert e_info.match( + f"In config file '{fake_config_filepath}' Plugin path '{fake_path}' does not exist." + ) assert cut.ast_parse_eval.call_count == 1 - assert cut.ast_parse_eval.call_args_list[0].args == ( - arg_config_plugin_dict,) + assert cut.ast_parse_eval.call_args_list[0].args == (arg_config_plugin_dict,) assert execution_engine.isinstance.call_count == 1 assert execution_engine.isinstance.call_args_list[0].args == ( - fake_plugin_dict.body, execution_engine.ast.Dict, ) + fake_plugin_dict.body, + execution_engine.ast.Dict, + ) assert execution_engine.ast.literal_eval.call_count == 1 assert execution_engine.ast.literal_eval.call_args_list[0].args == ( - arg_config_plugin_dict, ) + arg_config_plugin_dict, + ) assert fake_temp_plugin_dict.values.call_count == 1 assert fake_temp_plugin_dict.values.call_args_list[0].args == () assert execution_engine.os.path.exists.call_count == 1 - assert execution_engine.os.path.exists.call_args_list[0].args == ( - fake_path, ) + assert execution_engine.os.path.exists.call_args_list[0].args == (fake_path,) -def test_ExecutionEngine_parse_plugins_list_raises_FileNotFoundError_when_any_config_plugin_dict_key_maps_to_non_existing_file(mocker): +def test_ExecutionEngine_parse_plugins_list_raises_FileNotFoundError_when_any_config_plugin_dict_key_maps_to_non_existing_file( + mocker, +): # Arrange arg_config_plugin_dict = MagicMock() @@ -472,7 +497,7 @@ def test_ExecutionEngine_parse_plugins_list_raises_FileNotFoundError_when_any_co fake_path = MagicMock() # from 2 to 10 arbitrary, 1 has own test num_fake_items = pytest.gen.randint(2, 10) - num_fake_existing_files = pytest.gen.randint(1, num_fake_items-1) + num_fake_existing_files = pytest.gen.randint(1, num_fake_items - 1) exists_side_effects = [True] * num_fake_existing_files exists_side_effects.append(False) @@ -481,38 +506,41 @@ def test_ExecutionEngine_parse_plugins_list_raises_FileNotFoundError_when_any_co cut = ExecutionEngine.__new__(ExecutionEngine) cut.config_filepath = fake_config_filepath - mocker.patch.object(cut, 'ast_parse_eval', return_value=fake_plugin_dict) - mocker.patch(execution_engine.__name__ + '.isinstance', return_value=True) - mocker.patch(execution_engine.__name__ + '.ast.literal_eval', - return_value=fake_temp_plugin_dict) - mocker.patch.object(fake_temp_plugin_dict, 'values', - return_value=fake_values) - mocker.patch(execution_engine.__name__ + '.os.path.exists', - side_effect=exists_side_effects) + mocker.patch.object(cut, "ast_parse_eval", return_value=fake_plugin_dict) + mocker.patch(execution_engine.__name__ + ".isinstance", return_value=True) + mocker.patch( + execution_engine.__name__ + ".ast.literal_eval", + return_value=fake_temp_plugin_dict, + ) + mocker.patch.object(fake_temp_plugin_dict, "values", return_value=fake_values) + mocker.patch( + execution_engine.__name__ + ".os.path.exists", side_effect=exists_side_effects + ) # Act with pytest.raises(FileNotFoundError) as e_info: cut.parse_plugins_dict(arg_config_plugin_dict) # Assert - assert e_info.match(f"In config file '{fake_config_filepath}' Plugin path '{ - fake_path}' does not exist.") + assert e_info.match( + f"In config file '{fake_config_filepath}' Plugin path '{fake_path}' does not exist." + ) assert cut.ast_parse_eval.call_count == 1 - assert cut.ast_parse_eval.call_args_list[0].args == ( - arg_config_plugin_dict,) + assert cut.ast_parse_eval.call_args_list[0].args == (arg_config_plugin_dict,) assert execution_engine.isinstance.call_count == 1 assert execution_engine.isinstance.call_args_list[0].args == ( - fake_plugin_dict.body, execution_engine.ast.Dict, ) + fake_plugin_dict.body, + execution_engine.ast.Dict, + ) assert execution_engine.ast.literal_eval.call_count == 1 assert execution_engine.ast.literal_eval.call_args_list[0].args == ( - arg_config_plugin_dict, ) + arg_config_plugin_dict, + ) assert fake_temp_plugin_dict.values.call_count == 1 assert fake_temp_plugin_dict.values.call_args_list[0].args == () - assert execution_engine.os.path.exists.call_count == len( - exists_side_effects) + assert execution_engine.os.path.exists.call_count == len(exists_side_effects) for i in range(len(exists_side_effects)): - assert execution_engine.os.path.exists.call_args_list[i].args == ( - fake_path, ) + assert execution_engine.os.path.exists.call_args_list[i].args == (fake_path,) def test_ExecutionEngine_returns_empty_dict_when_config_dict_is_empty(mocker): @@ -527,10 +555,12 @@ def test_ExecutionEngine_returns_empty_dict_when_config_dict_is_empty(mocker): cut = ExecutionEngine.__new__(ExecutionEngine) cut.config_filepath = fake_config_filepath - mocker.patch.object(cut, 'ast_parse_eval', return_value=fake_plugin_dict) - mocker.patch(execution_engine.__name__ + '.isinstance', return_value=True) - mocker.patch(execution_engine.__name__ + '.ast.literal_eval', - return_value=fake_temp_plugin_dict) + mocker.patch.object(cut, "ast_parse_eval", return_value=fake_plugin_dict) + mocker.patch(execution_engine.__name__ + ".isinstance", return_value=True) + mocker.patch( + execution_engine.__name__ + ".ast.literal_eval", + return_value=fake_temp_plugin_dict, + ) # Act result = cut.parse_plugins_dict(arg_config_plugin_dict) @@ -538,14 +568,16 @@ def test_ExecutionEngine_returns_empty_dict_when_config_dict_is_empty(mocker): # Assert assert result == {} assert cut.ast_parse_eval.call_count == 1 - assert cut.ast_parse_eval.call_args_list[0].args == ( - arg_config_plugin_dict,) + assert cut.ast_parse_eval.call_args_list[0].args == (arg_config_plugin_dict,) assert execution_engine.isinstance.call_count == 1 assert execution_engine.isinstance.call_args_list[0].args == ( - fake_plugin_dict.body, execution_engine.ast.Dict, ) + fake_plugin_dict.body, + execution_engine.ast.Dict, + ) assert execution_engine.ast.literal_eval.call_count == 1 assert execution_engine.ast.literal_eval.call_args_list[0].args == ( - arg_config_plugin_dict, ) + arg_config_plugin_dict, + ) def test_ExecutionEngine_returns_expected_dict_when_all_mapped_files_exist(mocker): @@ -567,14 +599,16 @@ def test_ExecutionEngine_returns_expected_dict_when_all_mapped_files_exist(mocke cut = ExecutionEngine.__new__(ExecutionEngine) cut.config_filepath = fake_config_filepath - mocker.patch.object(cut, 'ast_parse_eval', return_value=fake_plugin_dict) - mocker.patch(execution_engine.__name__ + '.isinstance', return_value=True) - mocker.patch(execution_engine.__name__ + '.ast.literal_eval', - return_value=fake_temp_plugin_dict) - mocker.patch.object(fake_temp_plugin_dict, 'values', - return_value=fake_values) - mocker.patch(execution_engine.__name__ + '.os.path.exists', - side_effect=exists_side_effects) + mocker.patch.object(cut, "ast_parse_eval", return_value=fake_plugin_dict) + mocker.patch(execution_engine.__name__ + ".isinstance", return_value=True) + mocker.patch( + execution_engine.__name__ + ".ast.literal_eval", + return_value=fake_temp_plugin_dict, + ) + mocker.patch.object(fake_temp_plugin_dict, "values", return_value=fake_values) + mocker.patch( + execution_engine.__name__ + ".os.path.exists", side_effect=exists_side_effects + ) # Act result = cut.parse_plugins_dict(arg_config_plugin_dict) @@ -582,26 +616,29 @@ def test_ExecutionEngine_returns_expected_dict_when_all_mapped_files_exist(mocke # Assert assert result == fake_temp_plugin_dict assert cut.ast_parse_eval.call_count == 1 - assert cut.ast_parse_eval.call_args_list[0].args == ( - arg_config_plugin_dict,) + assert cut.ast_parse_eval.call_args_list[0].args == (arg_config_plugin_dict,) assert execution_engine.isinstance.call_count == 1 assert execution_engine.isinstance.call_args_list[0].args == ( - fake_plugin_dict.body, execution_engine.ast.Dict, ) + fake_plugin_dict.body, + execution_engine.ast.Dict, + ) assert execution_engine.ast.literal_eval.call_count == 1 assert execution_engine.ast.literal_eval.call_args_list[0].args == ( - arg_config_plugin_dict, ) + arg_config_plugin_dict, + ) assert fake_temp_plugin_dict.values.call_count == 1 assert fake_temp_plugin_dict.values.call_args_list[0].args == () - assert execution_engine.os.path.exists.call_count == len( - exists_side_effects) + assert execution_engine.os.path.exists.call_count == len(exists_side_effects) for i in range(len(exists_side_effects)): - assert execution_engine.os.path.exists.call_args_list[i].args == ( - fake_path, ) + assert execution_engine.os.path.exists.call_args_list[i].args == (fake_path,) + # parse_data tests -def test_ExecutionEngine_parse_data_sets_the_simDataSource_to_a_new_data_source_module_DataSource(mocker): +def test_ExecutionEngine_parse_data_sets_the_simDataSource_to_a_new_data_source_module_DataSource( + mocker, +): # Arrange arg_parser_file_name = MagicMock() arg_dataFile = str(MagicMock()) @@ -614,33 +651,49 @@ def test_ExecutionEngine_parse_data_sets_the_simDataSource_to_a_new_data_source_ cut = ExecutionEngine.__new__(ExecutionEngine) - mocker.patch(execution_engine.__name__ + - '.importlib.util.spec_from_file_location', return_value=fake_spec) - mocker.patch(execution_engine.__name__ + - '.importlib.util.module_from_spec', return_value=fake_module) - mocker.patch.object(fake_spec, 'loader.exec_module', return_value=None) - mocker.patch.object(fake_module, 'DataSource', - return_value=fake_parser_class_instance) + mocker.patch( + execution_engine.__name__ + ".importlib.util.spec_from_file_location", + return_value=fake_spec, + ) + mocker.patch( + execution_engine.__name__ + ".importlib.util.module_from_spec", + return_value=fake_module, + ) + mocker.patch.object(fake_spec, "loader.exec_module", return_value=None) + mocker.patch.object( + fake_module, "DataSource", return_value=fake_parser_class_instance + ) # Act - cut.parse_data(arg_parser_file_name, arg_dataFile, - arg_metadataFile, arg_subsystems_breakdown) + cut.parse_data( + arg_parser_file_name, arg_dataFile, arg_metadataFile, arg_subsystems_breakdown + ) # Assert assert execution_engine.importlib.util.spec_from_file_location.call_count == 1 - assert execution_engine.importlib.util.spec_from_file_location.call_args_list[0].args == ( - 'data_source', arg_parser_file_name, ) + assert execution_engine.importlib.util.spec_from_file_location.call_args_list[ + 0 + ].args == ( + "data_source", + arg_parser_file_name, + ) assert execution_engine.importlib.util.module_from_spec.call_count == 1 assert execution_engine.importlib.util.module_from_spec.call_args_list[0].args == ( - fake_spec, ) + fake_spec, + ) assert fake_spec.loader.exec_module.call_count == 1 assert fake_module.DataSource.call_count == 1 assert fake_module.DataSource.call_args_list[0].args == ( - arg_dataFile, arg_metadataFile, arg_subsystems_breakdown, ) + arg_dataFile, + arg_metadataFile, + arg_subsystems_breakdown, + ) assert cut.simDataSource == fake_parser_class_instance -def test_ExecutionEngine_parse_data_argument_subsystems_breakdown_optional_default_is_False(mocker): +def test_ExecutionEngine_parse_data_argument_subsystems_breakdown_optional_default_is_False( + mocker, +): # Arrange arg_parser_file_name = MagicMock() arg_dataFile = MagicMock() @@ -662,13 +715,18 @@ def __init__(self, data_file, meta_file, subsystems_breakdown): cut = ExecutionEngine.__new__(ExecutionEngine) - mocker.patch(execution_engine.__name__ + - '.importlib.util.spec_from_file_location', return_value=fake_spec) - mocker.patch(execution_engine.__name__ + - '.importlib.util.module_from_spec', return_value=fake_module) - mocker.patch.object(fake_spec, '.loader.exec_module', return_value=None) - mocker.patch.object(fake_module, '.DataSource', - return_value=fake_parser_class_instance) + mocker.patch( + execution_engine.__name__ + ".importlib.util.spec_from_file_location", + return_value=fake_spec, + ) + mocker.patch( + execution_engine.__name__ + ".importlib.util.module_from_spec", + return_value=fake_module, + ) + mocker.patch.object(fake_spec, ".loader.exec_module", return_value=None) + mocker.patch.object( + fake_module, ".DataSource", return_value=fake_parser_class_instance + ) # Act cut.parse_data(arg_parser_file_name, arg_dataFile, arg_metadataFile) @@ -676,7 +734,11 @@ def __init__(self, data_file, meta_file, subsystems_breakdown): # Assert assert fake_module.DataSource.call_count == 1 assert fake_module.DataSource.call_args_list[0].args == ( - arg_dataFile, arg_metadataFile, False, ) + arg_dataFile, + arg_metadataFile, + False, + ) + # setup_sim tests @@ -692,40 +754,44 @@ def test_ExecutionEngine_setup_sim_sets_self_sim_to_new_Simulator(mocker): fake_sim = MagicMock() - mocker.patch(execution_engine.__name__ + - '.Simulator', return_value=fake_sim) + mocker.patch(execution_engine.__name__ + ".Simulator", return_value=fake_sim) # Act cut.setup_sim() # Assert assert execution_engine.Simulator.call_count == 1 - assert execution_engine.Simulator.call_args_list[0].args == (cut.simDataSource, - cut.knowledge_rep_plugin_dict, - cut.learners_plugin_dict, - cut.planners_plugin_dict, - cut.complex_plugin_dict) + assert execution_engine.Simulator.call_args_list[0].args == ( + cut.simDataSource, + cut.knowledge_rep_plugin_dict, + cut.learners_plugin_dict, + cut.planners_plugin_dict, + cut.complex_plugin_dict, + ) assert cut.sim == fake_sim + # run_sim tests -def test_ExecutionEngine_run_sim_runs_but_does_not_save_results_when_save_flag_is_False(mocker): +def test_ExecutionEngine_run_sim_runs_but_does_not_save_results_when_save_flag_is_False( + mocker, +): # Arrange cut = ExecutionEngine.__new__(ExecutionEngine) cut.sim = MagicMock() cut.IO_Enabled = MagicMock() cut.save_flag = False - mocker.patch.object(cut.sim, 'run_sim') - mocker.patch.object(cut, 'save_results') + mocker.patch.object(cut.sim, "run_sim") + mocker.patch.object(cut, "save_results") # Act cut.run_sim() # Assert assert cut.sim.run_sim.call_count == 1 - assert cut.sim.run_sim.call_args_list[0].args == (cut.IO_Enabled, ) + assert cut.sim.run_sim.call_args_list[0].args == (cut.IO_Enabled,) assert cut.save_results.call_count == 0 @@ -737,36 +803,41 @@ def test_ExecutionEngine_run_sim_runs_and_saves_results_when_save_flag_is_True(m cut.save_flag = True cut.save_name = MagicMock() - mocker.patch.object(cut.sim, 'run_sim') - mocker.patch.object(cut, 'save_results') + mocker.patch.object(cut.sim, "run_sim") + mocker.patch.object(cut, "save_results") # Act cut.run_sim() # Assert assert cut.sim.run_sim.call_count == 1 - assert cut.sim.run_sim.call_args_list[0].args == (cut.IO_Enabled, ) + assert cut.sim.run_sim.call_args_list[0].args == (cut.IO_Enabled,) assert cut.save_results.call_count == 1 - assert cut.save_results.call_args_list[0].args == (cut.save_name, ) + assert cut.save_results.call_args_list[0].args == (cut.save_name,) + # init_save_paths tests -def test_ExecutionEngine_init_save_paths_makes_tmp_and_models_and_diagnosis_directories_and_adds_them_to_os_environ(mocker): +def test_ExecutionEngine_init_save_paths_makes_tmp_and_models_and_diagnosis_directories_and_adds_them_to_os_environ( + mocker, +): # Arrange fake_save_path = str(MagicMock()) - fake_environ = {'RESULTS_PATH': fake_save_path} + fake_environ = {"RESULTS_PATH": fake_save_path} fake_tmp_save_path = str(MagicMock()) fake_tmp_models_path = str(MagicMock()) fake_tmp_diagnosis_path = str(MagicMock()) cut = ExecutionEngine.__new__(ExecutionEngine) - mocker.patch.dict(execution_engine.__name__ + '.os.environ', fake_environ) - mocker.patch(execution_engine.__name__ + '.os.path.join', - side_effect=[fake_tmp_save_path, fake_tmp_models_path, fake_tmp_diagnosis_path]) - mocker.patch.object(cut, 'delete_save_paths') - mocker.patch(execution_engine.__name__ + '.os.mkdir') + mocker.patch.dict(execution_engine.__name__ + ".os.environ", fake_environ) + mocker.patch( + execution_engine.__name__ + ".os.path.join", + side_effect=[fake_tmp_save_path, fake_tmp_models_path, fake_tmp_diagnosis_path], + ) + mocker.patch.object(cut, "delete_save_paths") + mocker.patch(execution_engine.__name__ + ".os.mkdir") # Act cut.init_save_paths() @@ -776,24 +847,36 @@ def test_ExecutionEngine_init_save_paths_makes_tmp_and_models_and_diagnosis_dire assert execution_engine.os.path.join.call_count == 3 # NOTE: similar problem with the args lists, bad expected values do not error nicely with good outputs, so beware but correct values pass assert execution_engine.os.path.join.call_args_list[0].args == ( - fake_save_path, 'tmp') + fake_save_path, + "tmp", + ) assert execution_engine.os.path.join.call_args_list[1].args == ( - fake_tmp_save_path, 'models') + fake_tmp_save_path, + "models", + ) assert execution_engine.os.path.join.call_args_list[2].args == ( - fake_tmp_save_path, 'diagnosis') + fake_tmp_save_path, + "diagnosis", + ) # NOTE: apparently the problem persists to other failures because these asserts have the same problem, bad values error, but not correct outputs, good values pass - assert execution_engine.os.environ['ONAIR_SAVE_PATH'] == fake_save_path - assert execution_engine.os.environ['ONAIR_TMP_SAVE_PATH'] == fake_tmp_save_path - assert execution_engine.os.environ['ONAIR_MODELS_SAVE_PATH'] == fake_tmp_models_path - assert execution_engine.os.environ['ONAIR_DIAGNOSIS_SAVE_PATH'] == fake_tmp_diagnosis_path + assert execution_engine.os.environ["ONAIR_SAVE_PATH"] == fake_save_path + assert execution_engine.os.environ["ONAIR_TMP_SAVE_PATH"] == fake_tmp_save_path + assert execution_engine.os.environ["ONAIR_MODELS_SAVE_PATH"] == fake_tmp_models_path + assert ( + execution_engine.os.environ["ONAIR_DIAGNOSIS_SAVE_PATH"] + == fake_tmp_diagnosis_path + ) + # delete_save_path tests -def test_ExecutionEngine_delete_save_paths_does_nothing_when_save_path_has_no_tmp_dir(mocker): +def test_ExecutionEngine_delete_save_paths_does_nothing_when_save_path_has_no_tmp_dir( + mocker, +): # Arrange fake_save_path = str(MagicMock()) - fake_environ = {'RESULTS_PATH': fake_save_path} + fake_environ = {"RESULTS_PATH": fake_save_path} fake_dirs = [] for i in range(pytest.gen.randint(0, 5)): # 0 to 5 @@ -801,95 +884,102 @@ def test_ExecutionEngine_delete_save_paths_does_nothing_when_save_path_has_no_tm cut = ExecutionEngine.__new__(ExecutionEngine) - mocker.patch.dict(execution_engine.__name__ + '.os.environ', fake_environ) - mocker.patch(execution_engine.__name__ + - '.os.listdir', return_value=fake_dirs) - mocker.patch(execution_engine.__name__ + '.shutil.rmtree') + mocker.patch.dict(execution_engine.__name__ + ".os.environ", fake_environ) + mocker.patch(execution_engine.__name__ + ".os.listdir", return_value=fake_dirs) + mocker.patch(execution_engine.__name__ + ".shutil.rmtree") # Act cut.delete_save_paths() # Assert assert execution_engine.os.listdir.call_count == 1 - assert execution_engine.os.listdir.call_args_list[0].args == ( - fake_save_path, ) + assert execution_engine.os.listdir.call_args_list[0].args == (fake_save_path,) assert execution_engine.shutil.rmtree.call_count == 0 def test_ExecutionEngine_delete_save_paths_removes_tmp_tree_when_it_exists(mocker): # Arrange fake_save_path = str(MagicMock()) - fake_environ = {'RESULTS_PATH': fake_save_path} + fake_environ = {"RESULTS_PATH": fake_save_path} fake_dirs = [] for i in range(pytest.gen.randint(0, 5)): # 0 to 5 fake_dirs.append(str(MagicMock())) - fake_dirs.append('tmp') + fake_dirs.append("tmp") for i in range(pytest.gen.randint(0, 5)): # 0 to 5 fake_dirs.append(str(MagicMock())) cut = ExecutionEngine.__new__(ExecutionEngine) - mocker.patch.dict(execution_engine.__name__ + '.os.environ', fake_environ) - mocker.patch(execution_engine.__name__ + - '.os.listdir', return_value=fake_dirs) - mocker.patch(execution_engine.__name__ + '.shutil.rmtree') - mocker.patch(execution_engine.__name__ + '.print') + mocker.patch.dict(execution_engine.__name__ + ".os.environ", fake_environ) + mocker.patch(execution_engine.__name__ + ".os.listdir", return_value=fake_dirs) + mocker.patch(execution_engine.__name__ + ".shutil.rmtree") + mocker.patch(execution_engine.__name__ + ".print") # Act cut.delete_save_paths() # Assert assert execution_engine.os.listdir.call_count == 1 - assert execution_engine.os.listdir.call_args_list[0].args == ( - fake_save_path, ) + assert execution_engine.os.listdir.call_args_list[0].args == (fake_save_path,) assert execution_engine.shutil.rmtree.call_count == 1 assert execution_engine.shutil.rmtree.call_args_list[0].args == ( - fake_save_path + '/tmp', ) + fake_save_path + "/tmp", + ) assert execution_engine.print.call_count == 0 -def test_ExecutionEngine_delete_save_paths_prints_error_message_when_rmtree_raises_OSError(mocker): +def test_ExecutionEngine_delete_save_paths_prints_error_message_when_rmtree_raises_OSError( + mocker, +): # Arrange fake_save_path = str(MagicMock()) - fake_environ = {'RESULTS_PATH': fake_save_path} + fake_environ = {"RESULTS_PATH": fake_save_path} fake_dirs = [] fake_error_message = str(MagicMock()) for i in range(pytest.gen.randint(0, 5)): # 0 to 5 fake_dirs.append(str(MagicMock())) - fake_dirs.append('tmp') + fake_dirs.append("tmp") for i in range(pytest.gen.randint(0, 5)): # 0 to 5 fake_dirs.append(str(MagicMock())) cut = ExecutionEngine.__new__(ExecutionEngine) - mocker.patch.dict(execution_engine.__name__ + '.os.environ', fake_environ) - mocker.patch(execution_engine.__name__ + - '.os.listdir', return_value=fake_dirs) - mocker.patch(execution_engine.__name__ + '.shutil.rmtree', side_effect=OSError( - # 0 to 10 arbitrary error value for errno - pytest.gen.randint(0, 10), fake_error_message)) - mocker.patch(execution_engine.__name__ + '.print') + mocker.patch.dict(execution_engine.__name__ + ".os.environ", fake_environ) + mocker.patch(execution_engine.__name__ + ".os.listdir", return_value=fake_dirs) + mocker.patch( + execution_engine.__name__ + ".shutil.rmtree", + side_effect=OSError( + # 0 to 10 arbitrary error value for errno + pytest.gen.randint(0, 10), + fake_error_message, + ), + ) + mocker.patch(execution_engine.__name__ + ".print") # Act cut.delete_save_paths() # Assert assert execution_engine.os.listdir.call_count == 1 - assert execution_engine.os.listdir.call_args_list[0].args == ( - fake_save_path, ) + assert execution_engine.os.listdir.call_args_list[0].args == (fake_save_path,) assert execution_engine.shutil.rmtree.call_count == 1 assert execution_engine.shutil.rmtree.call_args_list[0].args == ( - fake_save_path + '/tmp', ) + fake_save_path + "/tmp", + ) assert execution_engine.print.call_count == 1 assert execution_engine.print.call_args_list[0].args == ( - ("Error: " + fake_save_path + " : " + fake_error_message), ) + ("Error: " + fake_save_path + " : " + fake_error_message), + ) + # save_results tests -def test_ExecutionEngine_save_results_creates_expected_save_path_and_copies_proper_tree_to_it(mocker): +def test_ExecutionEngine_save_results_creates_expected_save_path_and_copies_proper_tree_to_it( + mocker, +): # Arrange arg_save_name = str(MagicMock()) @@ -897,20 +987,23 @@ def test_ExecutionEngine_save_results_creates_expected_save_path_and_copies_prop fake_complete_time = str(MagicMock()) fake_onair_save_path = str(MagicMock()) fake_onair_tmp_save_path = str(MagicMock()) - fake_environ = {'ONAIR_SAVE_PATH': fake_onair_save_path, - 'ONAIR_TMP_SAVE_PATH': fake_onair_tmp_save_path} - fake_save_path = fake_onair_save_path + 'saved/' + \ - arg_save_name + '_' + fake_complete_time + fake_environ = { + "ONAIR_SAVE_PATH": fake_onair_save_path, + "ONAIR_TMP_SAVE_PATH": fake_onair_tmp_save_path, + } + fake_save_path = ( + fake_onair_save_path + "saved/" + arg_save_name + "_" + fake_complete_time + ) cut = ExecutionEngine.__new__(ExecutionEngine) - mocker.patch(execution_engine.__name__ + - '.gmtime', return_value=fake_gmtime) - mocker.patch(execution_engine.__name__ + '.strftime', - return_value=fake_complete_time) - mocker.patch.dict(execution_engine.__name__ + '.os.environ', fake_environ) - mocker.patch(execution_engine.__name__ + '.os.makedirs') - mocker.patch(execution_engine.__name__ + '.copy_tree') + mocker.patch(execution_engine.__name__ + ".gmtime", return_value=fake_gmtime) + mocker.patch( + execution_engine.__name__ + ".strftime", return_value=fake_complete_time + ) + mocker.patch.dict(execution_engine.__name__ + ".os.environ", fake_environ) + mocker.patch(execution_engine.__name__ + ".os.makedirs") + mocker.patch(execution_engine.__name__ + ".copytree") # Act cut.save_results(arg_save_name) @@ -920,15 +1013,18 @@ def test_ExecutionEngine_save_results_creates_expected_save_path_and_copies_prop assert execution_engine.gmtime.call_args_list[0].args == () assert execution_engine.strftime.call_count == 1 assert execution_engine.strftime.call_args_list[0].args == ( - "%H-%M-%S", fake_gmtime,) + "%H-%M-%S", + fake_gmtime, + ) assert execution_engine.os.makedirs.call_count == 1 - assert execution_engine.os.makedirs.call_args_list[0].args == ( - fake_save_path, ) - assert execution_engine.os.makedirs.call_args_list[0].kwargs == { - "exist_ok": True} - assert execution_engine.copy_tree.call_count == 1 - assert execution_engine.copy_tree.call_args_list[0].args == ( - fake_onair_tmp_save_path, fake_save_path, ) + assert execution_engine.os.makedirs.call_args_list[0].args == (fake_save_path,) + assert execution_engine.os.makedirs.call_args_list[0].kwargs == {"exist_ok": True} + assert execution_engine.copytree.call_count == 1 + assert execution_engine.copytree.call_args_list[0].args == ( + fake_onair_tmp_save_path, + fake_save_path, + ) + # set_run_param tests @@ -939,7 +1035,7 @@ def test_ExecutionEngine_set_run_param_passes_given_arguments_to_setattr(mocker) arg_val = MagicMock() cut = ExecutionEngine.__new__(ExecutionEngine) - mocker.patch(execution_engine.__name__ + '.setattr') + mocker.patch(execution_engine.__name__ + ".setattr") # Act cut.set_run_param(arg_name, arg_val) @@ -947,19 +1043,24 @@ def test_ExecutionEngine_set_run_param_passes_given_arguments_to_setattr(mocker) # Assert assert execution_engine.setattr.call_count == 1 assert execution_engine.setattr.call_args_list[0].args == ( - cut, arg_name, arg_val, ) + cut, + arg_name, + arg_val, + ) + # ast_parse_eval tests -def test_ExecutionEngine_ast_parse_eval_returns_call_to_ast_parse_with_mode_eval(mocker): +def test_ExecutionEngine_ast_parse_eval_returns_call_to_ast_parse_with_mode_eval( + mocker, +): # Arrange arg_config_list = MagicMock() expected_result = MagicMock() cut = ExecutionEngine.__new__(ExecutionEngine) - mocker.patch(execution_engine.__name__ + ".ast.parse", - return_value=expected_result) + mocker.patch(execution_engine.__name__ + ".ast.parse", return_value=expected_result) # Act result = cut.ast_parse_eval(arg_config_list) @@ -967,7 +1068,5 @@ def test_ExecutionEngine_ast_parse_eval_returns_call_to_ast_parse_with_mode_eval # Assert assert result == expected_result assert execution_engine.ast.parse.call_count == 1 - assert execution_engine.ast.parse.call_args_list[0].args == ( - arg_config_list, ) - assert execution_engine.ast.parse.call_args_list[0].kwargs == { - 'mode': 'eval'} + assert execution_engine.ast.parse.call_args_list[0].args == (arg_config_list,) + assert execution_engine.ast.parse.call_args_list[0].kwargs == {"mode": "eval"} diff --git a/test/onair/src/run_scripts/test_sim.py b/test/onair/src/run_scripts/test_sim.py index 320d9b5f..050df80b 100644 --- a/test/onair/src/run_scripts/test_sim.py +++ b/test/onair/src/run_scripts/test_sim.py @@ -16,10 +16,12 @@ from math import ceil, floor + # constants tests def test_Simulator_DIAGNOSIS_INTERVAL_is_expected_value(): assert sim.DIAGNOSIS_INTERVAL == 100 + # __init__ tests def test_Simulator__init__creates_Vehicle_and_Agent(mocker): # Arrange @@ -37,27 +39,43 @@ def test_Simulator__init__creates_Vehicle_and_Agent(mocker): cut = Simulator.__new__(Simulator) - mocker.patch.object(arg_dataSource, 'get_vehicle_metadata', return_value=fake_vehicle_metadata) - mocker.patch(sim.__name__ + '.VehicleRepresentation', return_value=fake_vehicle) - mocker.patch(sim.__name__ + '.Agent', return_value=fake_agent) + mocker.patch.object( + arg_dataSource, "get_vehicle_metadata", return_value=fake_vehicle_metadata + ) + mocker.patch(sim.__name__ + ".VehicleRepresentation", return_value=fake_vehicle) + mocker.patch(sim.__name__ + ".Agent", return_value=fake_agent) # Act - cut.__init__(arg_dataSource, - arg_knowledge_rep_plugin_list, - arg_learners_plugin_list, - arg_planners_plugin_list, - arg_complex_plugin_list) + cut.__init__( + arg_dataSource, + arg_knowledge_rep_plugin_list, + arg_learners_plugin_list, + arg_planners_plugin_list, + arg_complex_plugin_list, + ) # Assert assert cut.simData == arg_dataSource assert sim.VehicleRepresentation.call_count == 1 - assert sim.VehicleRepresentation.call_args_list[0].args == (fake_headers, fake_tests, arg_knowledge_rep_plugin_list) + assert sim.VehicleRepresentation.call_args_list[0].args == ( + fake_headers, + fake_tests, + arg_knowledge_rep_plugin_list, + ) assert sim.Agent.call_count == 1 - assert sim.Agent.call_args_list[0].args == (fake_vehicle, arg_learners_plugin_list, arg_planners_plugin_list, arg_complex_plugin_list) + assert sim.Agent.call_args_list[0].args == ( + fake_vehicle, + arg_learners_plugin_list, + arg_planners_plugin_list, + arg_complex_plugin_list, + ) assert cut.agent == fake_agent + # run_sim tests -def test_Simulator_run_sim_simData_never_has_more_so_loop_does_not_run_and_diagnosis_list_is_empty_but_filled_with_agent_diagnose_and_returns_last_diagnosis(mocker): +def test_Simulator_run_sim_simData_never_has_more_so_loop_does_not_run_and_diagnosis_list_is_empty_but_filled_with_agent_diagnose_and_returns_last_diagnosis( + mocker, +): # Arrange cut = Simulator.__new__(Simulator) cut.simData = MagicMock() @@ -66,10 +84,10 @@ def test_Simulator_run_sim_simData_never_has_more_so_loop_does_not_run_and_diagn fake_diagnosis = MagicMock() fake_time_step = 0 - mocker.patch(sim.__name__ + '.print_sim_header') - mocker.patch(sim.__name__ + '.print_msg') - mocker.patch.object(cut.simData, 'has_more', return_value=False) - mocker.patch.object(cut.agent, 'diagnose', return_value=fake_diagnosis) + mocker.patch(sim.__name__ + ".print_sim_header") + mocker.patch(sim.__name__ + ".print_msg") + mocker.patch.object(cut.simData, "has_more", return_value=False) + mocker.patch.object(cut.agent, "diagnose", return_value=fake_diagnosis) # Act result = cut.run_sim() @@ -80,9 +98,10 @@ def test_Simulator_run_sim_simData_never_has_more_so_loop_does_not_run_and_diagn assert cut.simData.has_more.call_count == 1 assert cut.simData.has_more.call_args_list[0].args == () assert cut.agent.diagnose.call_count == 1 - assert cut.agent.diagnose.call_args_list[0].args == (fake_time_step, ) + assert cut.agent.diagnose.call_args_list[0].args == (fake_time_step,) assert result == fake_diagnosis + def test_Simulator_run_sim_prints_header_when_given_IO_Flag_is_equal_to_True(mocker): # Arrange cut = Simulator.__new__(Simulator) @@ -91,10 +110,10 @@ def test_Simulator_run_sim_prints_header_when_given_IO_Flag_is_equal_to_True(moc fake_diagnosis = MagicMock() - mocker.patch(sim.__name__ + '.print_sim_header') - mocker.patch(sim.__name__ + '.print_msg') - mocker.patch.object(cut.simData, 'has_more', return_value=False) - mocker.patch.object(cut.agent, 'diagnose', return_value=fake_diagnosis) + mocker.patch(sim.__name__ + ".print_sim_header") + mocker.patch(sim.__name__ + ".print_msg") + mocker.patch.object(cut.simData, "has_more", return_value=False) + mocker.patch.object(cut.agent, "diagnose", return_value=fake_diagnosis) # Act result = cut.run_sim(True) @@ -103,7 +122,8 @@ def test_Simulator_run_sim_prints_header_when_given_IO_Flag_is_equal_to_True(moc assert sim.print_sim_header.call_count == 1 assert sim.print_sim_header.call_args_list[0].args == () assert sim.print_msg.call_count == 0 - assert result == fake_diagnosis # check we ran through the method correctly + assert result == fake_diagnosis # check we ran through the method correctly + def test_Simulator_run_sim_runs_until_has_more_is_false(mocker): # Arrange @@ -111,20 +131,20 @@ def test_Simulator_run_sim_runs_until_has_more_is_false(mocker): cut.simData = MagicMock() cut.agent = MagicMock() - num_fake_steps = pytest.gen.randint(1, 100) # from 1 to 100 arbitrary for fast test + num_fake_steps = pytest.gen.randint(1, 100) # from 1 to 100 arbitrary for fast test fake_diagnosis = MagicMock() fake_next = MagicMock() fake_IO_Flag = MagicMock() side_effects_for_has_more = [True] * (num_fake_steps) + [False] - mocker.patch(sim.__name__ + '.print_sim_header') - mocker.patch(sim.__name__ + '.print_msg') - mocker.patch.object(cut.simData, 'has_more', side_effect=side_effects_for_has_more) - mocker.patch.object(cut.simData, 'get_next', return_value=fake_next) - mocker.patch.object(cut.agent, 'reason') - mocker.patch.object(cut, 'IO_check') - mocker.patch.object(cut.agent, 'mission_status', MagicMock()) # never equals 'RED' - mocker.patch.object(cut.agent, 'diagnose', return_value=fake_diagnosis) + mocker.patch(sim.__name__ + ".print_sim_header") + mocker.patch(sim.__name__ + ".print_msg") + mocker.patch.object(cut.simData, "has_more", side_effect=side_effects_for_has_more) + mocker.patch.object(cut.simData, "get_next", return_value=fake_next) + mocker.patch.object(cut.agent, "reason") + mocker.patch.object(cut, "IO_check") + mocker.patch.object(cut.agent, "mission_status", MagicMock()) # never equals 'RED' + mocker.patch.object(cut.agent, "diagnose", return_value=fake_diagnosis) # Act result = cut.run_sim(fake_IO_Flag) @@ -137,15 +157,21 @@ def test_Simulator_run_sim_runs_until_has_more_is_false(mocker): assert cut.simData.get_next.call_args_list[i].args == () assert cut.agent.reason.call_count == num_fake_steps for i in range(num_fake_steps): - assert cut.agent.reason.call_args_list[i].args == (fake_next, ) + assert cut.agent.reason.call_args_list[i].args == (fake_next,) assert cut.IO_check.call_count == num_fake_steps for i in range(num_fake_steps): - assert cut.IO_check.call_args_list[i].args == (i, fake_IO_Flag, ) + assert cut.IO_check.call_args_list[i].args == ( + i, + fake_IO_Flag, + ) assert cut.agent.diagnose.call_count == 1 - assert cut.agent.diagnose.call_args_list[0].args == (num_fake_steps, ) + assert cut.agent.diagnose.call_args_list[0].args == (num_fake_steps,) assert result == fake_diagnosis -def test_Simulator_run_sim_diagnose_always_performed_when_fault_is_on_first_time_step(mocker): + +def test_Simulator_run_sim_diagnose_always_performed_when_fault_is_on_first_time_step( + mocker, +): # Arrange cut = Simulator.__new__(Simulator) cut.simData = MagicMock() @@ -155,46 +181,55 @@ def test_Simulator_run_sim_diagnose_always_performed_when_fault_is_on_first_time fake_next = MagicMock() fake_IO_Flag = MagicMock() - mocker.patch(sim.__name__ + '.print_sim_header') - mocker.patch(sim.__name__ + '.print_msg') - mocker.patch.object(cut.simData, 'has_more', side_effect=[True, False]) # single loop - mocker.patch.object(cut.simData, 'get_next', return_value=fake_next) - mocker.patch.object(cut.agent, 'reason') - mocker.patch.object(cut, 'IO_check') - mocker.patch.object(cut.agent, 'mission_status', 'RED') - mocker.patch.object(cut.agent, 'diagnose', return_value=fake_diagnosis) + mocker.patch(sim.__name__ + ".print_sim_header") + mocker.patch(sim.__name__ + ".print_msg") + mocker.patch.object( + cut.simData, "has_more", side_effect=[True, False] + ) # single loop + mocker.patch.object(cut.simData, "get_next", return_value=fake_next) + mocker.patch.object(cut.agent, "reason") + mocker.patch.object(cut, "IO_check") + mocker.patch.object(cut.agent, "mission_status", "RED") + mocker.patch.object(cut.agent, "diagnose", return_value=fake_diagnosis) # Act result = cut.run_sim(fake_IO_Flag) # Assert - assert cut.simData.get_next.call_count == 1 # verifies in loop - assert cut.agent.reason.call_count == 1 # verifies in loop - assert cut.IO_check.call_count == 1 # verifies in loop + assert cut.simData.get_next.call_count == 1 # verifies in loop + assert cut.agent.reason.call_count == 1 # verifies in loop + assert cut.IO_check.call_count == 1 # verifies in loop assert cut.agent.diagnose.call_count == 1 - assert cut.agent.diagnose.call_args_list[0].args == (0, ) - assert result == fake_diagnosis # check we ran through the method correctly + assert cut.agent.diagnose.call_args_list[0].args == (0,) + assert result == fake_diagnosis # check we ran through the method correctly + -def test_Simulator_run_sim_diagnose_is_not_performed_again_when_faults_are_consecutive_until_the_hundreth_step_after_last_diagnosis_and_returns_last_diagnosis(mocker): +def test_Simulator_run_sim_diagnose_is_not_performed_again_when_faults_are_consecutive_until_the_hundreth_step_after_last_diagnosis_and_returns_last_diagnosis( + mocker, +): # Arrange cut = Simulator.__new__(Simulator) cut.simData = MagicMock() cut.agent = MagicMock() - num_fake_steps = pytest.gen.randint(sim.DIAGNOSIS_INTERVAL, sim.DIAGNOSIS_INTERVAL * 10) # from interval to (10 * interval) arbitrary - fake_diagnoses = [MagicMock()] * (floor(num_fake_steps/sim.DIAGNOSIS_INTERVAL) + 1) # + 1 is for last diagnosis + num_fake_steps = pytest.gen.randint( + sim.DIAGNOSIS_INTERVAL, sim.DIAGNOSIS_INTERVAL * 10 + ) # from interval to (10 * interval) arbitrary + fake_diagnoses = [MagicMock()] * ( + floor(num_fake_steps / sim.DIAGNOSIS_INTERVAL) + 1 + ) # + 1 is for last diagnosis fake_next = MagicMock() fake_IO_Flag = MagicMock() side_effects_for_has_more = [True] * (num_fake_steps) + [False] - mocker.patch(sim.__name__ + '.print_sim_header') - mocker.patch(sim.__name__ + '.print_msg') - mocker.patch.object(cut.simData, 'has_more', side_effect=side_effects_for_has_more) - mocker.patch.object(cut.simData, 'get_next', return_value=fake_next) - mocker.patch.object(cut.agent, 'reason') - mocker.patch.object(cut, 'IO_check') - mocker.patch.object(cut.agent, 'mission_status', 'RED') - mocker.patch.object(cut.agent, 'diagnose', side_effect=fake_diagnoses) + mocker.patch(sim.__name__ + ".print_sim_header") + mocker.patch(sim.__name__ + ".print_msg") + mocker.patch.object(cut.simData, "has_more", side_effect=side_effects_for_has_more) + mocker.patch.object(cut.simData, "get_next", return_value=fake_next) + mocker.patch.object(cut.agent, "reason") + mocker.patch.object(cut, "IO_check") + mocker.patch.object(cut.agent, "mission_status", "RED") + mocker.patch.object(cut.agent, "diagnose", side_effect=fake_diagnoses) # Act result = cut.run_sim(fake_IO_Flag) @@ -205,23 +240,33 @@ def test_Simulator_run_sim_diagnose_is_not_performed_again_when_faults_are_conse assert cut.simData.get_next.call_args_list[i].args == () assert cut.agent.reason.call_count == num_fake_steps for i in range(num_fake_steps): - assert cut.agent.reason.call_args_list[i].args == (fake_next, ) + assert cut.agent.reason.call_args_list[i].args == (fake_next,) assert cut.IO_check.call_count == num_fake_steps for i in range(num_fake_steps): - assert cut.IO_check.call_args_list[i].args == (i, fake_IO_Flag, ) - assert cut.agent.diagnose.call_count == ceil(num_fake_steps/sim.DIAGNOSIS_INTERVAL) + assert cut.IO_check.call_args_list[i].args == ( + i, + fake_IO_Flag, + ) + assert cut.agent.diagnose.call_count == ceil( + num_fake_steps / sim.DIAGNOSIS_INTERVAL + ) for i in range(cut.agent.diagnose.call_count): - assert cut.agent.diagnose.call_args_list[i].args == (i * sim.DIAGNOSIS_INTERVAL, ) - assert result == fake_diagnoses[-1] # check we actually got the last diagnosis + assert cut.agent.diagnose.call_args_list[i].args == ( + i * sim.DIAGNOSIS_INTERVAL, + ) + assert result == fake_diagnoses[-1] # check we actually got the last diagnosis + # IO_check tests -def test_Simulator_IO_check_prints_sim_step_and_mission_status_when_given_IO_Flag_is_True(mocker): +def test_Simulator_IO_check_prints_sim_step_and_mission_status_when_given_IO_Flag_is_True( + mocker, +): # Arrange - arg_time_step = pytest.gen.randint(0, 100) # arbitrary from 0 to 100 + arg_time_step = pytest.gen.randint(0, 100) # arbitrary from 0 to 100 arg_IO_Flag = True - mocker.patch(sim.__name__ + '.print_sim_step') - mocker.patch(sim.__name__ + '.print_system_status') + mocker.patch(sim.__name__ + ".print_sim_step") + mocker.patch(sim.__name__ + ".print_system_status") cut = Simulator.__new__(Simulator) cut.agent = MagicMock() @@ -231,17 +276,21 @@ def test_Simulator_IO_check_prints_sim_step_and_mission_status_when_given_IO_Fla # Assert assert sim.print_sim_step.call_count == 1 - assert sim.print_sim_step.call_args_list[0].args == (arg_time_step + 1, ) + assert sim.print_sim_step.call_args_list[0].args == (arg_time_step + 1,) assert sim.print_system_status.call_count == 1 - assert sim.print_system_status.call_args_list[0].args == (cut.agent, cut.agent.vehicle_rep.curr_data, ) + assert sim.print_system_status.call_args_list[0].args == ( + cut.agent, + cut.agent.vehicle_rep.curr_data, + ) + def test_Simulator_IO_check_does_nothing_when_given_IO_Flag_is_not_True(mocker): # Arrange - arg_time_step = pytest.gen.randint(0, 100) # arbitrary from 0 to 100 + arg_time_step = pytest.gen.randint(0, 100) # arbitrary from 0 to 100 arg_IO_Flag = MagicMock() - mocker.patch(sim.__name__ + '.print_sim_step') - mocker.patch(sim.__name__ + '.print_system_status') + mocker.patch(sim.__name__ + ".print_sim_step") + mocker.patch(sim.__name__ + ".print_system_status") cut = Simulator.__new__(Simulator) diff --git a/test/onair/src/systems/test_status.py b/test/onair/src/systems/test_status.py index 9760feb6..802df048 100644 --- a/test/onair/src/systems/test_status.py +++ b/test/onair/src/systems/test_status.py @@ -13,29 +13,35 @@ from onair.src.systems.status import Status + # tests for init -def test_Status__init__with_empty_args_initializes_name_and_calls_set_status_with_default_values(mocker): +def test_Status__init__with_empty_args_initializes_name_and_calls_set_status_with_default_values( + mocker, +): # Arrange cut = Status.__new__(Status) - mocker.patch.object(cut, 'set_status') + mocker.patch.object(cut, "set_status") # Act cut.__init__() # Assert - assert cut.name == 'MISSION' + assert cut.name == "MISSION" assert cut.set_status.call_count == 1 - assert cut.set_status.call_args_list[0].args == ('---', -1.0) + assert cut.set_status.call_args_list[0].args == ("---", -1.0) + -def test_Status__init__with_valid_args_initializes_name_and_calls_set_status_with_expected_values(mocker): +def test_Status__init__with_valid_args_initializes_name_and_calls_set_status_with_expected_values( + mocker, +): # Arrange cut = Status.__new__(Status) arg_name = MagicMock() arg_status = MagicMock() arg_bayesian_conf = MagicMock() - mocker.patch.object(cut, 'set_status') + mocker.patch.object(cut, "set_status") # Act cut.__init__(arg_name, arg_status, arg_bayesian_conf) @@ -45,13 +51,16 @@ def test_Status__init__with_valid_args_initializes_name_and_calls_set_status_wit assert cut.set_status.call_count == 1 assert cut.set_status.call_args_list[0].args == (arg_status, arg_bayesian_conf) + # tests for set status def test_Status_set_status_when_both_args_are_provided_and_valid_sets_variables_to_expected_values(): # Arrange - rand_index = pytest.gen.randint(0, 3) # index, from 0 to 3 - valid_statuses = ['---', 'RED', 'YELLOW', 'GREEN'] + rand_index = pytest.gen.randint(0, 3) # index, from 0 to 3 + valid_statuses = ["---", "RED", "YELLOW", "GREEN"] arg_status = valid_statuses[rand_index] - arg_bayesian_conf = pytest.gen.uniform(-1.0, 1.0) # float in accepted range from -1.0 to 1.0 + arg_bayesian_conf = pytest.gen.uniform( + -1.0, 1.0 + ) # float in accepted range from -1.0 to 1.0 cut = Status.__new__(Status) @@ -62,9 +71,10 @@ def test_Status_set_status_when_both_args_are_provided_and_valid_sets_variables_ assert cut.bayesian_conf == arg_bayesian_conf assert cut.status == arg_status + def test_Status_set_status_when_arg_status_is_valid_and_arg_conf_is_1_sets_variables_to_expected_values(): # Arrange - arg_status = '---' + arg_status = "---" arg_bayesian_conf = 1.0 cut = Status.__new__(Status) @@ -76,9 +86,10 @@ def test_Status_set_status_when_arg_status_is_valid_and_arg_conf_is_1_sets_varia assert cut.bayesian_conf == arg_bayesian_conf assert cut.status == arg_status + def test_Status_set_status_when_arg_status_is_valid_and_arg_conf_is_negative_1_sets_variables_to_expected_values(): # Arrange - arg_status = '---' + arg_status = "---" arg_bayesian_conf = -1.0 cut = Status.__new__(Status) @@ -90,9 +101,10 @@ def test_Status_set_status_when_arg_status_is_valid_and_arg_conf_is_negative_1_s assert cut.bayesian_conf == arg_bayesian_conf assert cut.status == arg_status + def test_Status_set_status_when_only_stat_arg_is_provided_sets_variables_to_expected_values(): # Arrange - arg_status = '---' + arg_status = "---" cut = Status.__new__(Status) @@ -103,33 +115,40 @@ def test_Status_set_status_when_only_stat_arg_is_provided_sets_variables_to_expe assert cut.bayesian_conf == -1.0 assert cut.status == arg_status + def test_Status_set_status_raises_error_because_bayesian_conf_greater_than_1(): # Arrange cut = Status.__new__(Status) - arg_status = '---' - arg_bayesian_conf = pytest.gen.uniform(1.01, 10.0) # arbitrary float greater than 1.0 (top of accepted range) + arg_status = "---" + arg_bayesian_conf = pytest.gen.uniform( + 1.01, 10.0 + ) # arbitrary float greater than 1.0 (top of accepted range) # Act with pytest.raises(AssertionError) as e_info: cut.set_status(arg_status, arg_bayesian_conf) # Assert - assert e_info.match('') + assert e_info.match("") + def test_Status_set_status_raises_error_because_bayesian_conf_less_than_neg_1(): # Arrange cut = Status.__new__(Status) - arg_status = '---' - arg_bayesian_conf = pytest.gen.uniform(-10.0, -1.01) # arbitrary float less than -1.0 (bottom of accepted range) + arg_status = "---" + arg_bayesian_conf = pytest.gen.uniform( + -10.0, -1.01 + ) # arbitrary float less than -1.0 (bottom of accepted range) # Act with pytest.raises(AssertionError) as e_info: cut.set_status(arg_status, arg_bayesian_conf) # Assert - assert e_info.match('') + assert e_info.match("") + def test_Status_set_status_raises_error_because_invalid_status_arg(): # Arrange @@ -142,7 +161,8 @@ def test_Status_set_status_raises_error_because_invalid_status_arg(): cut.set_status(arg_status, arg_bayesian_conf) # Assert - assert e_info.match('') + assert e_info.match("") + # tests for get_status def test_Status_get_status_returns_expected_values(): @@ -157,6 +177,7 @@ def test_Status_get_status_returns_expected_values(): # Assert assert result == fake_status + # tests for get_bayesian_status def test_Status_get_bayesian_status_returns_expected_values(): # Arrange @@ -174,6 +195,7 @@ def test_Status_get_bayesian_status_returns_expected_values(): assert result_status == fake_status assert result_bayesian_conf == fake_bayesian_conf + # tests for get_name def test_Status_get_name_returns_expected_value(): # Arrange diff --git a/test/onair/src/systems/test_telemetry_test_suite.py b/test/onair/src/systems/test_telemetry_test_suite.py index 4638b312..f9c6fce4 100644 --- a/test/onair/src/systems/test_telemetry_test_suite.py +++ b/test/onair/src/systems/test_telemetry_test_suite.py @@ -14,8 +14,11 @@ import onair.src.systems.telemetry_test_suite as telemetry_test_suite from onair.src.systems.telemetry_test_suite import TelemetryTestSuite + # __init__ tests -def test_TelemetryTestSuite__init__sets_the_expected_values_with_given_headers_and_tests(mocker): +def test_TelemetryTestSuite__init__sets_the_expected_values_with_given_headers_and_tests( + mocker, +): # Arrange arg_headers = MagicMock() arg_tests = MagicMock() @@ -29,10 +32,15 @@ def test_TelemetryTestSuite__init__sets_the_expected_values_with_given_headers_a assert cut.dataFields == arg_headers assert cut.tests == arg_tests assert cut.latest_results == None - assert cut.epsilon == 1/100000 # production codes notes this value as needing intelligent definition - assert cut.all_tests == {'STATE' : cut.state, - 'FEASIBILITY' : cut.feasibility, - 'NOOP' : cut.noop} + assert ( + cut.epsilon == 1 / 100000 + ) # production codes notes this value as needing intelligent definition + assert cut.all_tests == { + "STATE": cut.state, + "FEASIBILITY": cut.feasibility, + "NOOP": cut.noop, + } + def test_TelemetryTestSuite__init__default_arg_tests_is_empty_list(mocker): # Arrange @@ -46,6 +54,7 @@ def test_TelemetryTestSuite__init__default_arg_tests_is_empty_list(mocker): # Assert assert cut.tests == [] + def test_TelemetryTestSuite__init__default_arg_headers_is_empty_list(mocker): # Arrange cut = TelemetryTestSuite.__new__(TelemetryTestSuite) @@ -56,10 +65,13 @@ def test_TelemetryTestSuite__init__default_arg_headers_is_empty_list(mocker): # Assert assert cut.dataFields == [] + # execute_suite tests -def test_TelemetryTestSuite_execute_suite_sets_the_latest_results_to_empty_list_when_updated_frame_len_is_0(mocker): +def test_TelemetryTestSuite_execute_suite_sets_the_latest_results_to_empty_list_when_updated_frame_len_is_0( + mocker, +): # Arrange - arg_update_frame = '' # empty string for len of 0 + arg_update_frame = "" # empty string for len of 0 arg_sync_data = MagicMock() cut = TelemetryTestSuite.__new__(TelemetryTestSuite) @@ -70,12 +82,15 @@ def test_TelemetryTestSuite_execute_suite_sets_the_latest_results_to_empty_list_ # Assert assert cut.latest_results == [] -def test_TelemetryTestSuite_execute_suite_sets_latests_results_to_list_of_run_tests_for_each_item_in_given_updated_frame(mocker): + +def test_TelemetryTestSuite_execute_suite_sets_latests_results_to_list_of_run_tests_for_each_item_in_given_updated_frame( + mocker, +): # Arrange arg_update_frame = [] arg_sync_data = MagicMock() - num_items_in_update = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10 + num_items_in_update = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10 expected_results = [] for i in range(num_items_in_update): @@ -84,7 +99,7 @@ def test_TelemetryTestSuite_execute_suite_sets_latests_results_to_list_of_run_te cut = TelemetryTestSuite.__new__(TelemetryTestSuite) - mocker.patch.object(cut, 'run_tests', side_effect=expected_results) + mocker.patch.object(cut, "run_tests", side_effect=expected_results) # Act cut.execute_suite(arg_update_frame, arg_sync_data) @@ -92,16 +107,21 @@ def test_TelemetryTestSuite_execute_suite_sets_latests_results_to_list_of_run_te # Assert assert cut.run_tests.call_count == num_items_in_update for i in range(num_items_in_update): - assert cut.run_tests.call_args_list[i].args == (i, arg_update_frame[i], arg_sync_data, ) + assert cut.run_tests.call_args_list[i].args == ( + i, + arg_update_frame[i], + arg_sync_data, + ) assert cut.latest_results == expected_results + def test_TelemetryTestSuite_execute_suite_default_arg_sync_data_is_empty_map(mocker): # Arrange arg_update_frame = [MagicMock()] cut = TelemetryTestSuite.__new__(TelemetryTestSuite) - mocker.patch.object(cut, 'run_tests', return_value=86) # arbitrary 86 + mocker.patch.object(cut, "run_tests", return_value=86) # arbitrary 86 # Act cut.execute_suite(arg_update_frame) @@ -109,88 +129,117 @@ def test_TelemetryTestSuite_execute_suite_default_arg_sync_data_is_empty_map(moc # Assert assert cut.run_tests.call_args_list[0].args == (0, arg_update_frame[0], {}) + # run_tests tests -def test_TelemetryTestSuite_run_tests_return_Status_object_based_upon_given_header_index_but_does_not_append_to_status_when_given_header_index_leads_to_empty_tests(mocker): +def test_TelemetryTestSuite_run_tests_return_Status_object_based_upon_given_header_index_but_does_not_append_to_status_when_given_header_index_leads_to_empty_tests( + mocker, +): # Arrange arg_header_index = MagicMock() arg_test_val = MagicMock() arg_sync_data = MagicMock() - fake_bayesian = [MagicMock(),MagicMock()] + fake_bayesian = [MagicMock(), MagicMock()] expected_datafield = MagicMock() expected_result = telemetry_test_suite.Status.__new__(telemetry_test_suite.Status) cut = TelemetryTestSuite.__new__(TelemetryTestSuite) - cut.tests = {arg_header_index:[]} - cut.dataFields = {arg_header_index:expected_datafield} + cut.tests = {arg_header_index: []} + cut.dataFields = {arg_header_index: expected_datafield} - mocker.patch.object(cut, 'calc_single_status', return_value=fake_bayesian) - mocker.patch(telemetry_test_suite.__name__ + '.Status', return_value = expected_result) + mocker.patch.object(cut, "calc_single_status", return_value=fake_bayesian) + mocker.patch( + telemetry_test_suite.__name__ + ".Status", return_value=expected_result + ) # Act result = cut.run_tests(arg_header_index, arg_test_val, arg_sync_data) # Assert assert cut.calc_single_status.call_count == 1 - assert cut.calc_single_status.call_args_list[0].args == ([], ) + assert cut.calc_single_status.call_args_list[0].args == ([],) assert telemetry_test_suite.Status.call_count == 1 - assert telemetry_test_suite.Status.call_args_list[0].args == (expected_datafield, fake_bayesian[0], fake_bayesian[1]) + assert telemetry_test_suite.Status.call_args_list[0].args == ( + expected_datafield, + fake_bayesian[0], + fake_bayesian[1], + ) assert result == expected_result -def test_TelemetryTestSuite_run_tests_return_Status_object_based_upon_given_header_index_appends_status_when_given_header_index_leads_to_a_single_test_not_named_SYNC(mocker): + +def test_TelemetryTestSuite_run_tests_return_Status_object_based_upon_given_header_index_appends_status_when_given_header_index_leads_to_a_single_test_not_named_SYNC( + mocker, +): # Arrange arg_header_index = MagicMock() arg_test_val = MagicMock() arg_sync_data = MagicMock() fake_tests = [[str(MagicMock())]] - for i in range(pytest.gen.randint(0,5)): # arbirary, from 0 to 5 test data points + for i in range(pytest.gen.randint(0, 5)): # arbirary, from 0 to 5 test data points fake_tests[0].append(MagicMock()) fake_stat = MagicMock() fake_mass_assigments = MagicMock() - fake_bayesian = [MagicMock(),MagicMock()] + fake_bayesian = [MagicMock(), MagicMock()] expected_datafield = MagicMock() expected_result = telemetry_test_suite.Status.__new__(telemetry_test_suite.Status) cut = TelemetryTestSuite.__new__(TelemetryTestSuite) - cut.tests = {arg_header_index:fake_tests} - cut.dataFields = {arg_header_index:expected_datafield} + cut.tests = {arg_header_index: fake_tests} + cut.dataFields = {arg_header_index: expected_datafield} cut.epsilon = MagicMock() # IMPORTANT: note, using state function as an easy mock -- not really calling it here!! - mocker.patch.object(cut, 'state', return_value=(fake_stat, fake_mass_assigments)) - mocker.patch.object(cut, 'calc_single_status', return_value=fake_bayesian) - mocker.patch(telemetry_test_suite.__name__ + '.Status', return_value = expected_result) + mocker.patch.object(cut, "state", return_value=(fake_stat, fake_mass_assigments)) + mocker.patch.object(cut, "calc_single_status", return_value=fake_bayesian) + mocker.patch( + telemetry_test_suite.__name__ + ".Status", return_value=expected_result + ) - cut.all_tests = {fake_tests[0][0]:cut.state} # IMPORTANT: purposely set AFTER patch of cut's sync function + cut.all_tests = { + fake_tests[0][0]: cut.state + } # IMPORTANT: purposely set AFTER patch of cut's sync function # Act result = cut.run_tests(arg_header_index, arg_test_val, arg_sync_data) # Assert assert cut.state.call_count == 1 - assert cut.state.call_args_list[0].args == (arg_test_val, fake_tests[0][1:], cut.epsilon) + assert cut.state.call_args_list[0].args == ( + arg_test_val, + fake_tests[0][1:], + cut.epsilon, + ) assert cut.calc_single_status.call_count == 1 - assert cut.calc_single_status.call_args_list[0].args == ([fake_stat], ) + assert cut.calc_single_status.call_args_list[0].args == ([fake_stat],) assert telemetry_test_suite.Status.call_count == 1 - assert telemetry_test_suite.Status.call_args_list[0].args == (expected_datafield, fake_bayesian[0], fake_bayesian[1]) + assert telemetry_test_suite.Status.call_args_list[0].args == ( + expected_datafield, + fake_bayesian[0], + fake_bayesian[1], + ) assert result == expected_result -def test_TelemetryTestSuite_run_tests_return_Status_object_based_upon_given_header_index_appends_status_with_any_updates_where_vars_in_sync_data_keys_when_given_header_index_leads_to_multiple_tests(mocker): + +def test_TelemetryTestSuite_run_tests_return_Status_object_based_upon_given_header_index_appends_status_with_any_updates_where_vars_in_sync_data_keys_when_given_header_index_leads_to_multiple_tests( + mocker, +): # Arrange arg_header_index = MagicMock() arg_test_val = MagicMock() arg_sync_data = {} - num_fake_tests = pytest.gen.randint(1, 5) # arbitrary, from 1 to 5 tests (0 has own test) + num_fake_tests = pytest.gen.randint( + 1, 5 + ) # arbitrary, from 1 to 5 tests (0 has own test) fake_tests = [] fake_vars = [] fake_sync_vars = [] fake_stat = MagicMock() fake_mass_assigments = MagicMock() - fake_bayesian = [MagicMock(),MagicMock()] + fake_bayesian = [MagicMock(), MagicMock()] expected_datafield = MagicMock() expected_result = telemetry_test_suite.Status.__new__(telemetry_test_suite.Status) @@ -199,15 +248,19 @@ def test_TelemetryTestSuite_run_tests_return_Status_object_based_upon_given_head expected_stats.append(fake_stat) cut = TelemetryTestSuite.__new__(TelemetryTestSuite) - cut.tests = {arg_header_index:fake_tests} - cut.dataFields = {arg_header_index:expected_datafield} + cut.tests = {arg_header_index: fake_tests} + cut.dataFields = {arg_header_index: expected_datafield} cut.epsilon = MagicMock() - mocker.patch.object(cut, 'state', return_value=(fake_stat, fake_mass_assigments)) - mocker.patch.object(cut, 'calc_single_status', return_value=fake_bayesian) - mocker.patch(telemetry_test_suite.__name__ + '.Status', return_value = expected_result) + mocker.patch.object(cut, "state", return_value=(fake_stat, fake_mass_assigments)) + mocker.patch.object(cut, "calc_single_status", return_value=fake_bayesian) + mocker.patch( + telemetry_test_suite.__name__ + ".Status", return_value=expected_result + ) - cut.all_tests = {'STATE':cut.state} # IMPORTANT: purposely set AFTER patch of cut's state function + cut.all_tests = { + "STATE": cut.state + } # IMPORTANT: purposely set AFTER patch of cut's state function # setup random input and results for i in range(num_fake_tests): @@ -222,13 +275,22 @@ def test_TelemetryTestSuite_run_tests_return_Status_object_based_upon_given_head # Assert assert cut.state.call_count == num_fake_tests for i in range(num_fake_tests): - assert cut.state.call_args_list[i].args == (arg_test_val, fake_sync_vars[i], cut.epsilon) + assert cut.state.call_args_list[i].args == ( + arg_test_val, + fake_sync_vars[i], + cut.epsilon, + ) assert cut.calc_single_status.call_count == 1 - assert cut.calc_single_status.call_args_list[0].args == (expected_stats, ) + assert cut.calc_single_status.call_args_list[0].args == (expected_stats,) assert telemetry_test_suite.Status.call_count == 1 - assert telemetry_test_suite.Status.call_args_list[0].args == (expected_datafield, fake_bayesian[0], fake_bayesian[1]) + assert telemetry_test_suite.Status.call_args_list[0].args == ( + expected_datafield, + fake_bayesian[0], + fake_bayesian[1], + ) assert result == expected_result + # get_latest_result tests def test_TelemetryTestSuite_get_latest_results_returns_None_when_latest_results_is_None(): # Arrange @@ -243,7 +305,10 @@ def test_TelemetryTestSuite_get_latest_results_returns_None_when_latest_results_ # Assert assert result == None -def test_TelemetryTestSuite_get_latest_results_returns_None_when_latest_results_is_filled(mocker): + +def test_TelemetryTestSuite_get_latest_results_returns_None_when_latest_results_is_filled( + mocker, +): # Arrange arg_field_name = MagicMock() @@ -252,10 +317,10 @@ def test_TelemetryTestSuite_get_latest_results_returns_None_when_latest_results_ expected_result = MagicMock() cut = TelemetryTestSuite.__new__(TelemetryTestSuite) - cut.latest_results = {fake_hdr_index:expected_result} + cut.latest_results = {fake_hdr_index: expected_result} cut.dataFields = MagicMock() - mocker.patch.object(cut.dataFields, 'index', return_value=fake_hdr_index) + mocker.patch.object(cut.dataFields, "index", return_value=fake_hdr_index) # Act result = cut.get_latest_result(arg_field_name) @@ -263,6 +328,7 @@ def test_TelemetryTestSuite_get_latest_results_returns_None_when_latest_results_ # Assert assert result == expected_result + # state tests def test_TelemetryTestSuite_state_returns_tuple_of_str_GREEN_and_list_containing_tuple_of_set_of_str_GREEN_and_1_pt_0_when_int_val_is_in_range_test_params_0(): # Arrange @@ -270,21 +336,29 @@ def test_TelemetryTestSuite_state_returns_tuple_of_str_GREEN_and_list_containing arg_epsilon = MagicMock() factor = 1 - if pytest.gen.randint(0,1) == 1: + if pytest.gen.randint(0, 1) == 1: factor *= -1 - # arbitrary, from 0 to 200 with 50/50 change of negative - fake_mid_point = pytest.gen.randint(0, 200) * factor # arbitrary, from 0 to 200 with 50/50 change of negative - fake_green_tol = pytest.gen.randint(1, 50) # arbitrary, from 1 to 50 allowance in both directions from fake_mid_point - - arg_test_params.append(range((fake_mid_point - fake_green_tol), (fake_mid_point + fake_green_tol))) + # arbitrary, from 0 to 200 with 50/50 change of negative + fake_mid_point = ( + pytest.gen.randint(0, 200) * factor + ) # arbitrary, from 0 to 200 with 50/50 change of negative + fake_green_tol = pytest.gen.randint( + 1, 50 + ) # arbitrary, from 1 to 50 allowance in both directions from fake_mid_point + + arg_test_params.append( + range((fake_mid_point - fake_green_tol), (fake_mid_point + fake_green_tol)) + ) arg_test_params.append(MagicMock()) arg_test_params.append(MagicMock()) - arg_val = pytest.gen.randint(0 - fake_green_tol, fake_green_tol - 1) + fake_mid_point # random val within green range + arg_val = ( + pytest.gen.randint(0 - fake_green_tol, fake_green_tol - 1) + fake_mid_point + ) # random val within green range if arg_val > 0: - arg_val += pytest.gen.random() # make float by adding some random decimal + arg_val += pytest.gen.random() # make float by adding some random decimal else: - arg_val -= pytest.gen.random() # make float by adding some random decimal + arg_val -= pytest.gen.random() # make float by adding some random decimal cut = TelemetryTestSuite.__new__(TelemetryTestSuite) @@ -292,7 +366,8 @@ def test_TelemetryTestSuite_state_returns_tuple_of_str_GREEN_and_list_containing result = cut.state(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('GREEN', [({'GREEN'}, 1.0)]) + assert result == ("GREEN", [({"GREEN"}, 1.0)]) + def test_TelemetryTestSuite_state_returns_tuple_of_str_YELLOW_and_list_containing_tuple_of_set_of_str_YELLOW_and_1_pt_0_when_int_val_is_in_range_test_params_1_and_not_in_0(): # Arrange @@ -300,25 +375,40 @@ def test_TelemetryTestSuite_state_returns_tuple_of_str_YELLOW_and_list_containin arg_epsilon = MagicMock() factor = 1 - if pytest.gen.randint(0,1) == 1: + if pytest.gen.randint(0, 1) == 1: factor *= -1 - # arbitrary, from 0 to 200 with 50/50 change of negative - fake_mid_point = pytest.gen.randint(0, 200) * factor # arbitrary, from 0 to 200 with 50/50 change of negative - fake_green_tol = pytest.gen.randint(1, 50) # arbitrary, from 1 to 50 allowance in both directions from fake_mid_point - fake_yellow_tol = pytest.gen.randint(1, 20) + fake_green_tol # arbitrary, from 1 to 20 allowance in both directions from fake_mid_point + fake_green_tol - - arg_test_params.append(range((fake_mid_point - fake_green_tol), (fake_mid_point + fake_green_tol))) - arg_test_params.append(range((fake_mid_point - fake_yellow_tol), (fake_mid_point + fake_yellow_tol))) + # arbitrary, from 0 to 200 with 50/50 change of negative + fake_mid_point = ( + pytest.gen.randint(0, 200) * factor + ) # arbitrary, from 0 to 200 with 50/50 change of negative + fake_green_tol = pytest.gen.randint( + 1, 50 + ) # arbitrary, from 1 to 50 allowance in both directions from fake_mid_point + fake_yellow_tol = ( + pytest.gen.randint(1, 20) + fake_green_tol + ) # arbitrary, from 1 to 20 allowance in both directions from fake_mid_point + fake_green_tol + + arg_test_params.append( + range((fake_mid_point - fake_green_tol), (fake_mid_point + fake_green_tol)) + ) + arg_test_params.append( + range((fake_mid_point - fake_yellow_tol), (fake_mid_point + fake_yellow_tol)) + ) arg_test_params.append(MagicMock()) - if pytest.gen.randint(0,1) == 1: - arg_val = pytest.gen.randint(fake_green_tol, fake_yellow_tol - 1) + fake_mid_point # random val within upper yellow range + if pytest.gen.randint(0, 1) == 1: + arg_val = ( + pytest.gen.randint(fake_green_tol, fake_yellow_tol - 1) + fake_mid_point + ) # random val within upper yellow range else: - arg_val = pytest.gen.randint(0 - fake_yellow_tol, 0 - fake_green_tol - 1) + fake_mid_point # sometimes flip to lower yellow range + arg_val = ( + pytest.gen.randint(0 - fake_yellow_tol, 0 - fake_green_tol - 1) + + fake_mid_point + ) # sometimes flip to lower yellow range if arg_val > 0: - arg_val += pytest.gen.random() # make float by adding some random decimal + arg_val += pytest.gen.random() # make float by adding some random decimal else: - arg_val -= pytest.gen.random() # make float by adding some random decimal + arg_val -= pytest.gen.random() # make float by adding some random decimal cut = TelemetryTestSuite.__new__(TelemetryTestSuite) @@ -326,7 +416,8 @@ def test_TelemetryTestSuite_state_returns_tuple_of_str_YELLOW_and_list_containin result = cut.state(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('YELLOW', [({'YELLOW'}, 1.0)]) + assert result == ("YELLOW", [({"YELLOW"}, 1.0)]) + def test_TelemetryTestSuite_state_returns_tuple_of_str_RED_and_list_containing_tuple_of_set_of_str_RED_and_1_pt_0_when_int_val_is_in_range_test_params_2_and_not_in_0_or_1(): # Arrange @@ -334,26 +425,45 @@ def test_TelemetryTestSuite_state_returns_tuple_of_str_RED_and_list_containing_t arg_epsilon = MagicMock() factor = 1 - if pytest.gen.randint(0,1) == 1: + if pytest.gen.randint(0, 1) == 1: factor *= -1 - # arbitrary, from 0 to 200 with 50/50 change of negative - fake_mid_point = pytest.gen.randint(0, 200) * factor # arbitrary, from 0 to 200 with 50/50 change of negative - fake_green_tol = pytest.gen.randint(1, 50) # arbitrary, from 1 to 50 allowance in both directions from fake_mid_point - fake_yellow_tol = pytest.gen.randint(1, 20) + fake_green_tol # arbitrary, from 1 to 20 allowance in both directions from fake_mid_point + fake_green_tol - fake_red_tol = pytest.gen.randint(1, 10) + fake_yellow_tol # arbitrary, from 1 to 10 allowance in both directions from fake_mid_point + fake_yellow_tol - - arg_test_params.append(range((fake_mid_point - fake_green_tol), (fake_mid_point + fake_green_tol))) - arg_test_params.append(range((fake_mid_point - fake_yellow_tol), (fake_mid_point + fake_yellow_tol))) - arg_test_params.append(range((fake_mid_point - fake_red_tol), (fake_mid_point + fake_red_tol))) - - if pytest.gen.randint(0,1) == 1: - arg_val = pytest.gen.randint(fake_yellow_tol, fake_red_tol - 1) + fake_mid_point # random val within upper red range + # arbitrary, from 0 to 200 with 50/50 change of negative + fake_mid_point = ( + pytest.gen.randint(0, 200) * factor + ) # arbitrary, from 0 to 200 with 50/50 change of negative + fake_green_tol = pytest.gen.randint( + 1, 50 + ) # arbitrary, from 1 to 50 allowance in both directions from fake_mid_point + fake_yellow_tol = ( + pytest.gen.randint(1, 20) + fake_green_tol + ) # arbitrary, from 1 to 20 allowance in both directions from fake_mid_point + fake_green_tol + fake_red_tol = ( + pytest.gen.randint(1, 10) + fake_yellow_tol + ) # arbitrary, from 1 to 10 allowance in both directions from fake_mid_point + fake_yellow_tol + + arg_test_params.append( + range((fake_mid_point - fake_green_tol), (fake_mid_point + fake_green_tol)) + ) + arg_test_params.append( + range((fake_mid_point - fake_yellow_tol), (fake_mid_point + fake_yellow_tol)) + ) + arg_test_params.append( + range((fake_mid_point - fake_red_tol), (fake_mid_point + fake_red_tol)) + ) + + if pytest.gen.randint(0, 1) == 1: + arg_val = ( + pytest.gen.randint(fake_yellow_tol, fake_red_tol - 1) + fake_mid_point + ) # random val within upper red range else: - arg_val = pytest.gen.randint(0 - fake_red_tol, 0 - fake_yellow_tol - 1) + fake_mid_point # sometimes flip to lower red range + arg_val = ( + pytest.gen.randint(0 - fake_red_tol, 0 - fake_yellow_tol - 1) + + fake_mid_point + ) # sometimes flip to lower red range if arg_val > 0: - arg_val += pytest.gen.random() # make float by adding some random decimal + arg_val += pytest.gen.random() # make float by adding some random decimal else: - arg_val -= pytest.gen.random() # make float by adding some random decimal + arg_val -= pytest.gen.random() # make float by adding some random decimal cut = TelemetryTestSuite.__new__(TelemetryTestSuite) @@ -361,7 +471,8 @@ def test_TelemetryTestSuite_state_returns_tuple_of_str_RED_and_list_containing_t result = cut.state(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('RED', [({'RED'}, 1.0)]) + assert result == ("RED", [({"RED"}, 1.0)]) + def test_TelemetryTestSuite_state_returns_tuple_of_str_3_dashes_and_list_containing_tuple_of_set_of_str_RED_YELLOW_and_GREEN_and_1_pt_0_when_int_val_is_in_not_in_any_range(): # Arrange @@ -369,26 +480,40 @@ def test_TelemetryTestSuite_state_returns_tuple_of_str_3_dashes_and_list_contain arg_epsilon = MagicMock() factor = 1 - if pytest.gen.randint(0,1) == 1: + if pytest.gen.randint(0, 1) == 1: factor *= -1 - # arbitrary, from 0 to 200 with 50/50 change of negative - fake_mid_point = pytest.gen.randint(0, 200) * factor # arbitrary, from 0 to 200 with 50/50 change of negative - fake_green_tol = pytest.gen.randint(1, 50) # arbitrary, from 1 to 50 allowance in both directions from fake_mid_point - fake_yellow_tol = pytest.gen.randint(1, 20) + fake_green_tol # arbitrary, from 1 to 20 allowance in both directions from fake_mid_point + fake_green_tol - fake_red_tol = pytest.gen.randint(1, 10) + fake_yellow_tol # arbitrary, from 1 to 10 allowance in both directions from fake_mid_point + fake_yellow_tol - - arg_test_params.append(range((fake_mid_point - fake_green_tol), (fake_mid_point + fake_green_tol))) - arg_test_params.append(range((fake_mid_point - fake_yellow_tol), (fake_mid_point + fake_yellow_tol))) - arg_test_params.append(range((fake_mid_point - fake_red_tol), (fake_mid_point + fake_red_tol))) - - if pytest.gen.randint(0,1) == 1: - arg_val = fake_red_tol + fake_mid_point + 1 # random val outside upper red + # arbitrary, from 0 to 200 with 50/50 change of negative + fake_mid_point = ( + pytest.gen.randint(0, 200) * factor + ) # arbitrary, from 0 to 200 with 50/50 change of negative + fake_green_tol = pytest.gen.randint( + 1, 50 + ) # arbitrary, from 1 to 50 allowance in both directions from fake_mid_point + fake_yellow_tol = ( + pytest.gen.randint(1, 20) + fake_green_tol + ) # arbitrary, from 1 to 20 allowance in both directions from fake_mid_point + fake_green_tol + fake_red_tol = ( + pytest.gen.randint(1, 10) + fake_yellow_tol + ) # arbitrary, from 1 to 10 allowance in both directions from fake_mid_point + fake_yellow_tol + + arg_test_params.append( + range((fake_mid_point - fake_green_tol), (fake_mid_point + fake_green_tol)) + ) + arg_test_params.append( + range((fake_mid_point - fake_yellow_tol), (fake_mid_point + fake_yellow_tol)) + ) + arg_test_params.append( + range((fake_mid_point - fake_red_tol), (fake_mid_point + fake_red_tol)) + ) + + if pytest.gen.randint(0, 1) == 1: + arg_val = fake_red_tol + fake_mid_point + 1 # random val outside upper red else: - arg_val = 0 - fake_red_tol + fake_mid_point - 1 # random val outside lower red + arg_val = 0 - fake_red_tol + fake_mid_point - 1 # random val outside lower red if arg_val > 0: - arg_val += pytest.gen.random() # make float by adding some random decimal + arg_val += pytest.gen.random() # make float by adding some random decimal else: - arg_val -= pytest.gen.random() # make float by adding some random decimal + arg_val -= pytest.gen.random() # make float by adding some random decimal cut = TelemetryTestSuite.__new__(TelemetryTestSuite) @@ -396,16 +521,19 @@ def test_TelemetryTestSuite_state_returns_tuple_of_str_3_dashes_and_list_contain result = cut.state(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('---', [({'RED', 'YELLOW', 'GREEN'}, 1.0)]) + assert result == ("---", [({"RED", "YELLOW", "GREEN"}, 1.0)]) + # feasibility tests -def test_TelemetryTestSuite_feasibility_asserts_len_given_test_params_is_not_2_or_4(mocker): +def test_TelemetryTestSuite_feasibility_asserts_len_given_test_params_is_not_2_or_4( + mocker, +): # Arrange arg_val = MagicMock() arg_test_params = [] arg_epsilon = MagicMock() - num_test_params = pytest.gen.sample([1,3,5], 1) + num_test_params = pytest.gen.sample([1, 3, 5], 1) for i in range(num_test_params[0]): arg_test_params.append(MagicMock()) @@ -417,14 +545,19 @@ def test_TelemetryTestSuite_feasibility_asserts_len_given_test_params_is_not_2_o result = cut.feasibility(arg_val, arg_test_params, arg_epsilon) # Assert - assert e_info.match('') + assert e_info.match("") -def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_lower_boundry_val_eq_to_lowest_bound_when_given_test_params_length_2(mocker): + +def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_lower_boundry_val_eq_to_lowest_bound_when_given_test_params_length_2( + mocker, +): # Arrange arg_epsilon = MagicMock() - fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 - fake_highest_bound = fake_lowest_bound + pytest.gen.randint(10, 100) # arbitrary, from 10 to 100 higher than lowest bound + fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 + fake_highest_bound = fake_lowest_bound + pytest.gen.randint( + 10, 100 + ) # arbitrary, from 10 to 100 higher than lowest bound arg_test_params = [fake_lowest_bound, fake_highest_bound] arg_val = fake_lowest_bound @@ -435,19 +568,26 @@ def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignment result = cut.feasibility(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('RED', [({'RED', 'GREEN'}, 1.0)]) + assert result == ("RED", [({"RED", "GREEN"}, 1.0)]) + -def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_lower_boundry_given_val_eq_to_lowest_bound_when_given_test_params_length_4(mocker): +def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_lower_boundry_given_val_eq_to_lowest_bound_when_given_test_params_length_4( + mocker, +): # Arrange arg_epsilon = MagicMock() - fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 - fake_highest_bound = fake_lowest_bound + pytest.gen.randint(10, 100) # arbitrary, from 10 to 100 higher than lowest bound + fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 + fake_highest_bound = fake_lowest_bound + pytest.gen.randint( + 10, 100 + ) # arbitrary, from 10 to 100 higher than lowest bound - arg_test_params = [fake_lowest_bound, - fake_lowest_bound + 1, - fake_highest_bound - 1, - fake_highest_bound] + arg_test_params = [ + fake_lowest_bound, + fake_lowest_bound + 1, + fake_highest_bound - 1, + fake_highest_bound, + ] arg_val = fake_lowest_bound @@ -457,20 +597,24 @@ def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignment result = cut.feasibility(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('RED', [({'RED', 'YELLOW'}, 1.0)]) + assert result == ("RED", [({"RED", "YELLOW"}, 1.0)]) -def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_lower_boundry_given_val_less_than_low_range_minus_delta_when_given_test_params_length_2(mocker): + +def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_lower_boundry_given_val_less_than_low_range_minus_delta_when_given_test_params_length_2( + mocker, +): # Arrange arg_epsilon = 1.0 - fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 - fake_highest_bound = fake_lowest_bound + pytest.gen.randint(10, 100) # arbitrary, from 10 to 100 higher than lowest bound + fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 + fake_highest_bound = fake_lowest_bound + pytest.gen.randint( + 10, 100 + ) # arbitrary, from 10 to 100 higher than lowest bound fake_delta = arg_epsilon * abs(fake_highest_bound - fake_lowest_bound) - arg_test_params = [fake_lowest_bound, - fake_highest_bound] + arg_test_params = [fake_lowest_bound, fake_highest_bound] - arg_val = fake_lowest_bound - fake_delta - 1 # -1 for less than low minus delta + arg_val = fake_lowest_bound - fake_delta - 1 # -1 for less than low minus delta cut = TelemetryTestSuite.__new__(TelemetryTestSuite) @@ -478,20 +622,24 @@ def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignment result = cut.feasibility(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('RED', [({'RED'}, 1.0)]) + assert result == ("RED", [({"RED"}, 1.0)]) -def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_lower_boundry_given_val_less_than_low_range_minus_delta_when_given_test_params_length_4(mocker): + +def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_lower_boundry_given_val_less_than_low_range_minus_delta_when_given_test_params_length_4( + mocker, +): # Arrange arg_epsilon = 1.0 - fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 - fake_highest_bound = fake_lowest_bound + pytest.gen.randint(10, 100) # arbitrary, from 10 to 100 higher than lowest bound + fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 + fake_highest_bound = fake_lowest_bound + pytest.gen.randint( + 10, 100 + ) # arbitrary, from 10 to 100 higher than lowest bound fake_delta = arg_epsilon * abs(fake_highest_bound - fake_lowest_bound) - arg_test_params = [fake_lowest_bound, - fake_highest_bound] + arg_test_params = [fake_lowest_bound, fake_highest_bound] - arg_val = fake_lowest_bound - fake_delta - 1 # -1 for less than low minus delta + arg_val = fake_lowest_bound - fake_delta - 1 # -1 for less than low minus delta cut = TelemetryTestSuite.__new__(TelemetryTestSuite) @@ -499,21 +647,25 @@ def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignment result = cut.feasibility(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('RED', [({'RED'}, 1.0)]) + assert result == ("RED", [({"RED"}, 1.0)]) -def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_lower_boundry_given_val_within_low_range_minus_delta_when_given_test_params_length_2(mocker): + +def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_lower_boundry_given_val_within_low_range_minus_delta_when_given_test_params_length_2( + mocker, +): # Arrange arg_epsilon = 1.0 - fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 - fake_highest_bound = fake_lowest_bound + pytest.gen.randint(10, 100) # arbitrary, from 10 to 100 higher than lowest bound + fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 + fake_highest_bound = fake_lowest_bound + pytest.gen.randint( + 10, 100 + ) # arbitrary, from 10 to 100 higher than lowest bound fake_delta = arg_epsilon * abs(fake_highest_bound - fake_lowest_bound) - arg_test_params = [fake_lowest_bound, - fake_highest_bound] + arg_test_params = [fake_lowest_bound, fake_highest_bound] arg_val = fake_lowest_bound - 1 - expected_mass = abs(fake_lowest_bound - arg_val)/fake_delta + expected_mass = abs(fake_lowest_bound - arg_val) / fake_delta expected_red_yellow_mass = 1.0 - expected_mass cut = TelemetryTestSuite.__new__(TelemetryTestSuite) @@ -522,23 +674,33 @@ def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignment result = cut.feasibility(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('RED', [({'RED'}, expected_mass), ({'GREEN', 'RED'}, expected_red_yellow_mass)]) + assert result == ( + "RED", + [({"RED"}, expected_mass), ({"GREEN", "RED"}, expected_red_yellow_mass)], + ) -def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_lower_boundry_given_val_within_low_range_minus_delta_when_given_test_params_length_4(mocker): + +def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_lower_boundry_given_val_within_low_range_minus_delta_when_given_test_params_length_4( + mocker, +): # Arrange arg_epsilon = 1.0 - fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 - fake_highest_bound = fake_lowest_bound + pytest.gen.randint(10, 100) # arbitrary, from 10 to 100 higher than lowest bound + fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 + fake_highest_bound = fake_lowest_bound + pytest.gen.randint( + 10, 100 + ) # arbitrary, from 10 to 100 higher than lowest bound fake_delta = arg_epsilon * abs(fake_highest_bound - fake_highest_bound - 2) - arg_test_params = [fake_lowest_bound, - fake_lowest_bound + 2, - fake_highest_bound - 2, - fake_highest_bound] + arg_test_params = [ + fake_lowest_bound, + fake_lowest_bound + 2, + fake_highest_bound - 2, + fake_highest_bound, + ] arg_val = fake_lowest_bound - 1 - expected_mass = abs(fake_lowest_bound - arg_val)/fake_delta + expected_mass = abs(fake_lowest_bound - arg_val) / fake_delta expected_red_yellow_mass = 1.0 - expected_mass cut = TelemetryTestSuite.__new__(TelemetryTestSuite) @@ -547,14 +709,22 @@ def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignment result = cut.feasibility(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('RED', [({'RED'}, expected_mass), ({'YELLOW', 'RED'}, expected_red_yellow_mass)]) + assert result == ( + "RED", + [({"RED"}, expected_mass), ({"YELLOW", "RED"}, expected_red_yellow_mass)], + ) + -def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_high_boundry_val_eq_to_lowest_bound_when_given_test_params_length_2(mocker): +def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_high_boundry_val_eq_to_lowest_bound_when_given_test_params_length_2( + mocker, +): # Arrange arg_epsilon = MagicMock() - fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 - fake_highest_bound = fake_lowest_bound + pytest.gen.randint(10, 100) # arbitrary, from 10 to 100 higher than lowest bound + fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 + fake_highest_bound = fake_lowest_bound + pytest.gen.randint( + 10, 100 + ) # arbitrary, from 10 to 100 higher than lowest bound arg_test_params = [fake_lowest_bound, fake_highest_bound] arg_val = fake_highest_bound @@ -565,19 +735,26 @@ def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignment result = cut.feasibility(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('RED', [({'RED', 'GREEN'}, 1.0)]) + assert result == ("RED", [({"RED", "GREEN"}, 1.0)]) -def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_high_boundry_given_val_eq_to_lowest_bound_when_given_test_params_length_4(mocker): + +def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_high_boundry_given_val_eq_to_lowest_bound_when_given_test_params_length_4( + mocker, +): # Arrange arg_epsilon = MagicMock() - fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 - fake_highest_bound = fake_lowest_bound + pytest.gen.randint(10, 100) # arbitrary, from 10 to 100 higher than lowest bound + fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 + fake_highest_bound = fake_lowest_bound + pytest.gen.randint( + 10, 100 + ) # arbitrary, from 10 to 100 higher than lowest bound - arg_test_params = [fake_lowest_bound, - fake_lowest_bound + 1, - fake_highest_bound - 1, - fake_highest_bound] + arg_test_params = [ + fake_lowest_bound, + fake_lowest_bound + 1, + fake_highest_bound - 1, + fake_highest_bound, + ] arg_val = fake_highest_bound @@ -587,20 +764,24 @@ def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignment result = cut.feasibility(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('RED', [({'RED', 'YELLOW'}, 1.0)]) + assert result == ("RED", [({"RED", "YELLOW"}, 1.0)]) + -def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_high_boundry_given_val_less_than_low_range_minus_delta_when_given_test_params_length_2(mocker): +def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_high_boundry_given_val_less_than_low_range_minus_delta_when_given_test_params_length_2( + mocker, +): # Arrange arg_epsilon = 1.0 - fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 - fake_highest_bound = fake_lowest_bound + pytest.gen.randint(10, 100) # arbitrary, from 10 to 100 higher than lowest bound + fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 + fake_highest_bound = fake_lowest_bound + pytest.gen.randint( + 10, 100 + ) # arbitrary, from 10 to 100 higher than lowest bound fake_delta = arg_epsilon * abs(fake_highest_bound - fake_lowest_bound) - arg_test_params = [fake_lowest_bound, - fake_highest_bound] + arg_test_params = [fake_lowest_bound, fake_highest_bound] - arg_val = fake_highest_bound + fake_delta + 1 # +1 for more than high plus delta + arg_val = fake_highest_bound + fake_delta + 1 # +1 for more than high plus delta cut = TelemetryTestSuite.__new__(TelemetryTestSuite) @@ -608,20 +789,24 @@ def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignment result = cut.feasibility(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('RED', [({'RED'}, 1.0)]) + assert result == ("RED", [({"RED"}, 1.0)]) + -def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_high_boundry_given_val_less_than_low_range_minus_delta_when_given_test_params_length_4(mocker): +def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_high_boundry_given_val_less_than_low_range_minus_delta_when_given_test_params_length_4( + mocker, +): # Arrange arg_epsilon = 1.0 - fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 - fake_highest_bound = fake_lowest_bound + pytest.gen.randint(10, 100) # arbitrary, from 10 to 100 higher than lowest bound + fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 + fake_highest_bound = fake_lowest_bound + pytest.gen.randint( + 10, 100 + ) # arbitrary, from 10 to 100 higher than lowest bound fake_delta = arg_epsilon * abs(fake_highest_bound - fake_lowest_bound) - arg_test_params = [fake_lowest_bound, - fake_highest_bound] + arg_test_params = [fake_lowest_bound, fake_highest_bound] - arg_val = fake_highest_bound + fake_delta + 1 # +1 for more than high plus delta + arg_val = fake_highest_bound + fake_delta + 1 # +1 for more than high plus delta cut = TelemetryTestSuite.__new__(TelemetryTestSuite) @@ -629,21 +814,25 @@ def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignment result = cut.feasibility(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('RED', [({'RED'}, 1.0)]) + assert result == ("RED", [({"RED"}, 1.0)]) + -def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_high_boundry_given_val_within_low_range_minus_delta_when_given_test_params_length_2(mocker): +def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_high_boundry_given_val_within_low_range_minus_delta_when_given_test_params_length_2( + mocker, +): # Arrange arg_epsilon = 1.0 - fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 - fake_highest_bound = fake_lowest_bound + pytest.gen.randint(10, 100) # arbitrary, from 10 to 100 higher than lowest bound + fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 + fake_highest_bound = fake_lowest_bound + pytest.gen.randint( + 10, 100 + ) # arbitrary, from 10 to 100 higher than lowest bound fake_delta = arg_epsilon * abs(fake_highest_bound - fake_lowest_bound) - arg_test_params = [fake_lowest_bound, - fake_highest_bound] + arg_test_params = [fake_lowest_bound, fake_highest_bound] arg_val = fake_highest_bound + 1 - expected_mass = abs(fake_highest_bound - arg_val)/fake_delta + expected_mass = abs(fake_highest_bound - arg_val) / fake_delta expected_red_yellow_mass = 1.0 - expected_mass cut = TelemetryTestSuite.__new__(TelemetryTestSuite) @@ -652,23 +841,33 @@ def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignment result = cut.feasibility(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('RED', [({'RED'}, expected_mass), ({'RED', 'GREEN'}, expected_red_yellow_mass)]) + assert result == ( + "RED", + [({"RED"}, expected_mass), ({"RED", "GREEN"}, expected_red_yellow_mass)], + ) + -def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_high_boundry_given_val_within_low_range_minus_delta_when_given_test_params_length_4(mocker): +def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_high_boundry_given_val_within_low_range_minus_delta_when_given_test_params_length_4( + mocker, +): # Arrange arg_epsilon = 1.0 - fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 - fake_highest_bound = fake_lowest_bound + pytest.gen.randint(10, 100) # arbitrary, from 10 to 100 higher than lowest bound + fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 + fake_highest_bound = fake_lowest_bound + pytest.gen.randint( + 10, 100 + ) # arbitrary, from 10 to 100 higher than lowest bound fake_delta = arg_epsilon * abs(fake_highest_bound - fake_highest_bound - 2) - arg_test_params = [fake_lowest_bound, - fake_lowest_bound + 2, - fake_highest_bound - 2, - fake_highest_bound] + arg_test_params = [ + fake_lowest_bound, + fake_lowest_bound + 2, + fake_highest_bound - 2, + fake_highest_bound, + ] arg_val = fake_highest_bound + 1 - expected_mass = abs(fake_highest_bound - arg_val)/fake_delta + expected_mass = abs(fake_highest_bound - arg_val) / fake_delta expected_red_yellow_mass = 1.0 - expected_mass cut = TelemetryTestSuite.__new__(TelemetryTestSuite) @@ -677,20 +876,30 @@ def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignment result = cut.feasibility(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('RED', [({'RED'}, expected_mass), ({'YELLOW', 'RED'}, expected_red_yellow_mass)]) + assert result == ( + "RED", + [({"RED"}, expected_mass), ({"YELLOW", "RED"}, expected_red_yellow_mass)], + ) -def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_within_bound_val_in_green_zone_when_given_test_params_length_2(mocker): + +def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_within_bound_val_in_green_zone_when_given_test_params_length_2( + mocker, +): # Arrange arg_epsilon = 1.0 - fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 - fake_highest_bound = fake_lowest_bound + pytest.gen.randint(10, 100) # arbitrary, from 10 to 100 higher than lowest bound + fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 + fake_highest_bound = fake_lowest_bound + pytest.gen.randint( + 10, 100 + ) # arbitrary, from 10 to 100 higher than lowest bound arg_test_params = [fake_lowest_bound, fake_highest_bound] - arg_val = int(fake_highest_bound - (abs(fake_highest_bound - fake_lowest_bound) / 2)) + arg_val = int( + fake_highest_bound - (abs(fake_highest_bound - fake_lowest_bound) / 2) + ) - fake_delta = arg_epsilon * (abs(fake_highest_bound-fake_lowest_bound)) - expected_mass = abs(fake_lowest_bound - arg_val)/fake_delta + fake_delta = arg_epsilon * (abs(fake_highest_bound - fake_lowest_bound)) + expected_mass = abs(fake_lowest_bound - arg_val) / fake_delta print(fake_lowest_bound, arg_val, fake_highest_bound) cut = TelemetryTestSuite.__new__(TelemetryTestSuite) @@ -699,24 +908,42 @@ def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignment result = cut.feasibility(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('GREEN', [({'GREEN'}, expected_mass), ({'GREEN', 'RED'}, 1.0 - expected_mass)]) + assert result == ( + "GREEN", + [({"GREEN"}, expected_mass), ({"GREEN", "RED"}, 1.0 - expected_mass)], + ) + -def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_within_bound_val_in_green_zone_when_given_test_params_length_4(mocker): +def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_within_bound_val_in_green_zone_when_given_test_params_length_4( + mocker, +): # Arrange arg_epsilon = 1.0 - fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 - fake_highest_bound = fake_lowest_bound + pytest.gen.randint(10, 100) # arbitrary, from 3 to 100 higher than lowest bound - - arg_test_params = [fake_lowest_bound, - fake_lowest_bound + 1, - fake_highest_bound - 1, - fake_highest_bound] - - fake_delta = arg_epsilon * (abs(fake_highest_bound-fake_lowest_bound)) - - arg_val = int(fake_highest_bound - (abs(fake_highest_bound - fake_lowest_bound) / 2)) - print(fake_lowest_bound, arg_val, fake_lowest_bound + 2, fake_highest_bound -2, fake_highest_bound) + fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 + fake_highest_bound = fake_lowest_bound + pytest.gen.randint( + 10, 100 + ) # arbitrary, from 3 to 100 higher than lowest bound + + arg_test_params = [ + fake_lowest_bound, + fake_lowest_bound + 1, + fake_highest_bound - 1, + fake_highest_bound, + ] + + fake_delta = arg_epsilon * (abs(fake_highest_bound - fake_lowest_bound)) + + arg_val = int( + fake_highest_bound - (abs(fake_highest_bound - fake_lowest_bound) / 2) + ) + print( + fake_lowest_bound, + arg_val, + fake_lowest_bound + 2, + fake_highest_bound - 2, + fake_highest_bound, + ) cut = TelemetryTestSuite.__new__(TelemetryTestSuite) @@ -724,24 +951,37 @@ def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignment result = cut.feasibility(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('GREEN', [({'GREEN'}, 1.0)]) + assert result == ("GREEN", [({"GREEN"}, 1.0)]) + -def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_within_bound_val_in_yellow_low_zone_when_given_test_params_length_4(mocker): +def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_within_bound_val_in_yellow_low_zone_when_given_test_params_length_4( + mocker, +): # Arrange arg_epsilon = 1.0 - fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 - fake_highest_bound = fake_lowest_bound + pytest.gen.randint(10, 100) # arbitrary, from 10 to 100 higher than lowest bound + fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 + fake_highest_bound = fake_lowest_bound + pytest.gen.randint( + 10, 100 + ) # arbitrary, from 10 to 100 higher than lowest bound - arg_test_params = [fake_lowest_bound, - fake_lowest_bound + 2, - fake_highest_bound - 2, - fake_highest_bound] + arg_test_params = [ + fake_lowest_bound, + fake_lowest_bound + 2, + fake_highest_bound - 2, + fake_highest_bound, + ] - fake_delta = arg_epsilon * (abs(fake_highest_bound-fake_lowest_bound)) + fake_delta = arg_epsilon * (abs(fake_highest_bound - fake_lowest_bound)) arg_val = fake_lowest_bound + 1 - print(fake_lowest_bound, arg_val, fake_lowest_bound + 2, fake_highest_bound -2, fake_highest_bound) + print( + fake_lowest_bound, + arg_val, + fake_lowest_bound + 2, + fake_highest_bound - 2, + fake_highest_bound, + ) cut = TelemetryTestSuite.__new__(TelemetryTestSuite) @@ -749,51 +989,78 @@ def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignment result = cut.feasibility(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('YELLOW', [({'YELLOW'}, 0.5), ({'RED', 'YELLOW'}, 0.5)]) + assert result == ("YELLOW", [({"YELLOW"}, 0.5), ({"RED", "YELLOW"}, 0.5)]) -def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_within_bound_val_in_yellow_high_zone_when_given_test_params_length_4(mocker): + +def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_within_bound_val_in_yellow_high_zone_when_given_test_params_length_4( + mocker, +): # Arrange arg_epsilon = 0.5 - fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 - fake_highest_bound = fake_lowest_bound + pytest.gen.randint(20, 100) # arbitrary, from 10 to 100 higher than lowest bound + fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 + fake_highest_bound = fake_lowest_bound + pytest.gen.randint( + 20, 100 + ) # arbitrary, from 10 to 100 higher than lowest bound - arg_test_params = [fake_lowest_bound, - fake_lowest_bound + 4, - fake_highest_bound - 4, - fake_highest_bound] + arg_test_params = [ + fake_lowest_bound, + fake_lowest_bound + 4, + fake_highest_bound - 4, + fake_highest_bound, + ] arg_val = fake_highest_bound - 1 - print(fake_lowest_bound, fake_lowest_bound + 4, fake_highest_bound -4, arg_val, fake_highest_bound) + print( + fake_lowest_bound, + fake_lowest_bound + 4, + fake_highest_bound - 4, + arg_val, + fake_highest_bound, + ) cut = TelemetryTestSuite.__new__(TelemetryTestSuite) # Act result = cut.feasibility(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('YELLOW', [({'YELLOW'}, 0.5), ({'YELLOW', 'RED'}, 0.5)]) + assert result == ("YELLOW", [({"YELLOW"}, 0.5), ({"YELLOW", "RED"}, 0.5)]) + -def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_within_bound_val_on_yellow_high_mark_when_given_test_params_length_4(mocker): +def test_TelemetryTestSuite_feasibility_return_expected_stat_and_mass_assignments_for_within_bound_val_on_yellow_high_mark_when_given_test_params_length_4( + mocker, +): # Arrange arg_epsilon = 0.5 - fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 - fake_highest_bound = fake_lowest_bound + pytest.gen.randint(20, 100) # arbitrary, from 10 to 100 higher than lowest bound + fake_lowest_bound = pytest.gen.randint(-100, 100) # arbitrary, from -100 to 100 + fake_highest_bound = fake_lowest_bound + pytest.gen.randint( + 20, 100 + ) # arbitrary, from 10 to 100 higher than lowest bound - arg_test_params = [fake_lowest_bound, - fake_lowest_bound + 4, - fake_highest_bound - 4, - fake_highest_bound] + arg_test_params = [ + fake_lowest_bound, + fake_lowest_bound + 4, + fake_highest_bound - 4, + fake_highest_bound, + ] arg_val = fake_highest_bound - 4 - print(fake_lowest_bound, fake_lowest_bound + 4, fake_highest_bound -4, arg_val, fake_highest_bound) + print( + fake_lowest_bound, + fake_lowest_bound + 4, + fake_highest_bound - 4, + arg_val, + fake_highest_bound, + ) cut = TelemetryTestSuite.__new__(TelemetryTestSuite) # Act result = cut.feasibility(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('YELLOW', [({'YELLOW', 'GREEN'}, 1.0)]) + assert result == ("YELLOW", [({"YELLOW", "GREEN"}, 1.0)]) + # noop tests def test_TelemetryTestSuite_noop_returns_tuple_of_str_GREEN_and_list_containing_tuple_of_set_of_str_GREEN_and_1_pt_0(): @@ -808,10 +1075,13 @@ def test_TelemetryTestSuite_noop_returns_tuple_of_str_GREEN_and_list_containing_ result = cut.noop(arg_val, arg_test_params, arg_epsilon) # Assert - assert result == ('GREEN', [({'GREEN'}, 1.0)]) + assert result == ("GREEN", [({"GREEN"}, 1.0)]) + # calc_single_status tests -def test_TelemetryTestSuite_calc_single_status_returns_tuple_of_value_from_call_to_most_common_on_occurrences_and_1_pt_0_when_mode_is_not_str_max_or_str_distr_or_str_strict(mocker): +def test_TelemetryTestSuite_calc_single_status_returns_tuple_of_value_from_call_to_most_common_on_occurrences_and_1_pt_0_when_mode_is_not_str_max_or_str_distr_or_str_strict( + mocker, +): # Arrange arg_status_list = MagicMock() arg_mode = MagicMock() @@ -821,148 +1091,202 @@ def test_TelemetryTestSuite_calc_single_status_returns_tuple_of_value_from_call_ cut = TelemetryTestSuite.__new__(TelemetryTestSuite) - mocker.patch(telemetry_test_suite.__name__ + '.Counter', return_value=fake_occurrences) - mocker.patch.object(fake_occurrences, 'most_common', return_value=[[fake_max_occurrence]]) + mocker.patch( + telemetry_test_suite.__name__ + ".Counter", return_value=fake_occurrences + ) + mocker.patch.object( + fake_occurrences, "most_common", return_value=[[fake_max_occurrence]] + ) # Act result = cut.calc_single_status(arg_status_list, arg_mode) # Assert assert telemetry_test_suite.Counter.call_count == 1 - assert telemetry_test_suite.Counter.call_args_list[0].args == (arg_status_list, ) + assert telemetry_test_suite.Counter.call_args_list[0].args == (arg_status_list,) assert result == (fake_max_occurrence, 1.0) -def test_TelemetryTestSuite_calc_single_status_returns_tuple_of_value_from_call_to_most_common_on_occurrences_and_1_pt_0_when_mode_is_str_max(mocker): + +def test_TelemetryTestSuite_calc_single_status_returns_tuple_of_value_from_call_to_most_common_on_occurrences_and_1_pt_0_when_mode_is_str_max( + mocker, +): # Arrange arg_status_list = MagicMock() - arg_mode = 'max' + arg_mode = "max" fake_occurrences = MagicMock() fake_max_occurrence = MagicMock() cut = TelemetryTestSuite.__new__(TelemetryTestSuite) - mocker.patch(telemetry_test_suite.__name__ + '.Counter', return_value=fake_occurrences) - mocker.patch.object(fake_occurrences, 'most_common', return_value=[[fake_max_occurrence]]) + mocker.patch( + telemetry_test_suite.__name__ + ".Counter", return_value=fake_occurrences + ) + mocker.patch.object( + fake_occurrences, "most_common", return_value=[[fake_max_occurrence]] + ) # Act result = cut.calc_single_status(arg_status_list, arg_mode) # Assert assert telemetry_test_suite.Counter.call_count == 1 - assert telemetry_test_suite.Counter.call_args_list[0].args == (arg_status_list, ) + assert telemetry_test_suite.Counter.call_args_list[0].args == (arg_status_list,) assert result == (fake_max_occurrence, 1.0) -def test_TelemetryTestSuite_calc_single_status_returns_tuple_of_value_from_call_to_most_common_on_occurrences_and_ratio_of_max_occurrence_over_len_given_status_list_when_mode_is_str_distr(mocker): + +def test_TelemetryTestSuite_calc_single_status_returns_tuple_of_value_from_call_to_most_common_on_occurrences_and_ratio_of_max_occurrence_over_len_given_status_list_when_mode_is_str_distr( + mocker, +): # Arrange arg_status_list = [] - arg_mode = 'distr' + arg_mode = "distr" - num_fake_statuses = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10 (0 not allowed, div by 0 error) + num_fake_statuses = pytest.gen.randint( + 1, 10 + ) # arbitrary, from 1 to 10 (0 not allowed, div by 0 error) fake_max_occurrence = MagicMock() for i in range(num_fake_statuses): arg_status_list.append(MagicMock()) - fake_occurrences = telemetry_test_suite.Counter.__new__(telemetry_test_suite.Counter) + fake_occurrences = telemetry_test_suite.Counter.__new__( + telemetry_test_suite.Counter + ) - expected_float = fake_occurrences[fake_max_occurrence]/num_fake_statuses + expected_float = fake_occurrences[fake_max_occurrence] / num_fake_statuses cut = TelemetryTestSuite.__new__(TelemetryTestSuite) - mocker.patch(telemetry_test_suite.__name__ + '.Counter', return_value=fake_occurrences) - mocker.patch.object(fake_occurrences, 'most_common', return_value=[[fake_max_occurrence]]) + mocker.patch( + telemetry_test_suite.__name__ + ".Counter", return_value=fake_occurrences + ) + mocker.patch.object( + fake_occurrences, "most_common", return_value=[[fake_max_occurrence]] + ) # Act result = cut.calc_single_status(arg_status_list, arg_mode) # Assert assert telemetry_test_suite.Counter.call_count == 1 - assert telemetry_test_suite.Counter.call_args_list[0].args == (arg_status_list, ) + assert telemetry_test_suite.Counter.call_args_list[0].args == (arg_status_list,) assert result == (fake_max_occurrence, expected_float) -def test_TelemetryTestSuite_calc_single_status_returns_tuple_of_value_from_call_to_most_common_on_occurrences_and_1_pt_0_when_mode_is_str_strict_and_no_occurrences_of_str_RED(mocker): + +def test_TelemetryTestSuite_calc_single_status_returns_tuple_of_value_from_call_to_most_common_on_occurrences_and_1_pt_0_when_mode_is_str_strict_and_no_occurrences_of_str_RED( + mocker, +): # Arrange arg_status_list = [] - arg_mode = 'strict' + arg_mode = "strict" - num_fake_statuses = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10 (0 not allowed, div by 0 error) + num_fake_statuses = pytest.gen.randint( + 1, 10 + ) # arbitrary, from 1 to 10 (0 not allowed, div by 0 error) fake_max_occurrence = MagicMock() for i in range(num_fake_statuses): arg_status_list.append(MagicMock()) - fake_occurrences = telemetry_test_suite.Counter.__new__(telemetry_test_suite.Counter) + fake_occurrences = telemetry_test_suite.Counter.__new__( + telemetry_test_suite.Counter + ) - expected_float = fake_occurrences[fake_max_occurrence]/num_fake_statuses + expected_float = fake_occurrences[fake_max_occurrence] / num_fake_statuses cut = TelemetryTestSuite.__new__(TelemetryTestSuite) - mocker.patch(telemetry_test_suite.__name__ + '.Counter', return_value=fake_occurrences) - mocker.patch.object(fake_occurrences, 'most_common', return_value=[[fake_max_occurrence]]) + mocker.patch( + telemetry_test_suite.__name__ + ".Counter", return_value=fake_occurrences + ) + mocker.patch.object( + fake_occurrences, "most_common", return_value=[[fake_max_occurrence]] + ) # Act result = cut.calc_single_status(arg_status_list, arg_mode) # Assert assert telemetry_test_suite.Counter.call_count == 1 - assert telemetry_test_suite.Counter.call_args_list[0].args == (arg_status_list, ) + assert telemetry_test_suite.Counter.call_args_list[0].args == (arg_status_list,) assert result == (fake_max_occurrence, 1.0) -def test_TelemetryTestSuite_calc_single_status_returns_tuple_of_str_RED_and_1_pt_0_when_mode_is_str_strict_and_ratio_of_RED_occurrence_over_len_given_status_list_with_occurrences_of_str_RED(mocker): + +def test_TelemetryTestSuite_calc_single_status_returns_tuple_of_str_RED_and_1_pt_0_when_mode_is_str_strict_and_ratio_of_RED_occurrence_over_len_given_status_list_with_occurrences_of_str_RED( + mocker, +): # Arrange arg_status_list = [] - arg_mode = 'strict' - - num_fake_statuses = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10 (0 not allowed, div by 0 error) - num_red_statuses = pytest.gen.randint(1, num_fake_statuses) # arbitrary, from 1 to total statuses + arg_mode = "strict" + + num_fake_statuses = pytest.gen.randint( + 1, 10 + ) # arbitrary, from 1 to 10 (0 not allowed, div by 0 error) + num_red_statuses = pytest.gen.randint( + 1, num_fake_statuses + ) # arbitrary, from 1 to total statuses fake_max_occurrence = MagicMock() for i in range(num_fake_statuses): arg_status_list.append(MagicMock()) - fake_occurrences = telemetry_test_suite.Counter(['RED'] * num_red_statuses) + fake_occurrences = telemetry_test_suite.Counter(["RED"] * num_red_statuses) - expected_float = fake_occurrences['RED']/num_fake_statuses + expected_float = fake_occurrences["RED"] / num_fake_statuses cut = TelemetryTestSuite.__new__(TelemetryTestSuite) - mocker.patch(telemetry_test_suite.__name__ + '.Counter', return_value=fake_occurrences) - mocker.patch.object(fake_occurrences, 'most_common', return_value=[[fake_max_occurrence]]) + mocker.patch( + telemetry_test_suite.__name__ + ".Counter", return_value=fake_occurrences + ) + mocker.patch.object( + fake_occurrences, "most_common", return_value=[[fake_max_occurrence]] + ) # Act result = cut.calc_single_status(arg_status_list, arg_mode) # Assert assert telemetry_test_suite.Counter.call_count == 1 - assert telemetry_test_suite.Counter.call_args_list[0].args == (arg_status_list, ) - assert result == ('RED', expected_float) + assert telemetry_test_suite.Counter.call_args_list[0].args == (arg_status_list,) + assert result == ("RED", expected_float) + def test_TelemetryTestSuite_calc_single_status_default_given_mode_is_str_strict(mocker): # Arrange arg_status_list = [] - num_fake_statuses = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10 (0 not allowed, div by 0 error) - num_red_statuses = pytest.gen.randint(1, num_fake_statuses) # arbitrary, from 1 to total statuses + num_fake_statuses = pytest.gen.randint( + 1, 10 + ) # arbitrary, from 1 to 10 (0 not allowed, div by 0 error) + num_red_statuses = pytest.gen.randint( + 1, num_fake_statuses + ) # arbitrary, from 1 to total statuses fake_max_occurrence = MagicMock() for i in range(num_fake_statuses): arg_status_list.append(MagicMock()) - fake_occurrences = telemetry_test_suite.Counter(['RED'] * num_red_statuses) + fake_occurrences = telemetry_test_suite.Counter(["RED"] * num_red_statuses) - expected_float = fake_occurrences['RED']/num_fake_statuses + expected_float = fake_occurrences["RED"] / num_fake_statuses cut = TelemetryTestSuite.__new__(TelemetryTestSuite) - mocker.patch(telemetry_test_suite.__name__ + '.Counter', return_value=fake_occurrences) - mocker.patch.object(fake_occurrences, 'most_common', return_value=[[fake_max_occurrence]]) + mocker.patch( + telemetry_test_suite.__name__ + ".Counter", return_value=fake_occurrences + ) + mocker.patch.object( + fake_occurrences, "most_common", return_value=[[fake_max_occurrence]] + ) # Act result = cut.calc_single_status(arg_status_list) # Assert assert telemetry_test_suite.Counter.call_count == 1 - assert telemetry_test_suite.Counter.call_args_list[0].args == (arg_status_list, ) - assert result == ('RED', expected_float) + assert telemetry_test_suite.Counter.call_args_list[0].args == (arg_status_list,) + assert result == ("RED", expected_float) + # get_suite_status def test_TelemetryTestSuite_get_suite_status_raises_TypeError_when_latest_results_is_None(): @@ -977,26 +1301,34 @@ def test_TelemetryTestSuite_get_suite_status_raises_TypeError_when_latest_result # Assert assert e_info.match("'NoneType' object is not iterable") -def test_TelemetryTestSuite_get_suite_status_returns_value_from_call_to_calc_single_status_when_it_is_given_empty_list_because_latest_results_are_empty(mocker): + +def test_TelemetryTestSuite_get_suite_status_returns_value_from_call_to_calc_single_status_when_it_is_given_empty_list_because_latest_results_are_empty( + mocker, +): # Arrange expected_result = MagicMock() cut = TelemetryTestSuite.__new__(TelemetryTestSuite) cut.latest_results = [] - mocker.patch.object(cut, 'calc_single_status', return_value=expected_result) + mocker.patch.object(cut, "calc_single_status", return_value=expected_result) # Act result = cut.get_suite_status() # Assert assert cut.calc_single_status.call_count == 1 - assert cut.calc_single_status.call_args_list[0].args == ([], ) + assert cut.calc_single_status.call_args_list[0].args == ([],) assert result == expected_result -def test_TelemetryTestSuite_get_suite_status_returns_value_from_call_to_calc_single_status_when_it_is_given_list_of_all_statuses_in_latest_results(mocker): + +def test_TelemetryTestSuite_get_suite_status_returns_value_from_call_to_calc_single_status_when_it_is_given_list_of_all_statuses_in_latest_results( + mocker, +): # Arrange - num_fake_results = pytest.gen.randint(1, 10) # arbitrary, from 1 to 10 (0 has its own test) + num_fake_results = pytest.gen.randint( + 1, 10 + ) # arbitrary, from 1 to 10 (0 has its own test) fake_latest_results = [] fake_statuses = [] @@ -1004,7 +1336,7 @@ def test_TelemetryTestSuite_get_suite_status_returns_value_from_call_to_calc_sin fake_res = MagicMock() fake_status = MagicMock() - mocker.patch.object(fake_res, 'get_status', return_value=fake_status) + mocker.patch.object(fake_res, "get_status", return_value=fake_status) fake_latest_results.append(fake_res) fake_statuses.append(fake_status) @@ -1014,20 +1346,23 @@ def test_TelemetryTestSuite_get_suite_status_returns_value_from_call_to_calc_sin cut = TelemetryTestSuite.__new__(TelemetryTestSuite) cut.latest_results = fake_latest_results - mocker.patch.object(cut, 'calc_single_status', return_value=expected_result) + mocker.patch.object(cut, "calc_single_status", return_value=expected_result) # Act result = cut.get_suite_status() # Assert assert cut.calc_single_status.call_count == 1 - assert cut.calc_single_status.call_args_list[0].args == (fake_statuses, ) + assert cut.calc_single_status.call_args_list[0].args == (fake_statuses,) assert result == expected_result + # get_status_specific_mnemonics # test_get_status_specific_mnemonics_raises_TypeError_when_latest_results_is_None was written because None is the init value for latest_results -def test_TelemetryTestSuite_get_status_specific_mnemonics_raises_TypeError_when_latest_results_is_None(mocker): - # Arrange +def test_TelemetryTestSuite_get_status_specific_mnemonics_raises_TypeError_when_latest_results_is_None( + mocker, +): + # Arrange arg_status = MagicMock() cut = TelemetryTestSuite.__new__(TelemetryTestSuite) @@ -1040,7 +1375,10 @@ def test_TelemetryTestSuite_get_status_specific_mnemonics_raises_TypeError_when_ # Assert assert e_info.match("'NoneType' object is not iterable") -def test_TelemetryTestSuite_get_status_specific_mnemonics_returns_empty_list_when_latest_results_is_empty(mocker): + +def test_TelemetryTestSuite_get_status_specific_mnemonics_returns_empty_list_when_latest_results_is_empty( + mocker, +): # Arrange arg_status = MagicMock() @@ -1053,7 +1391,10 @@ def test_TelemetryTestSuite_get_status_specific_mnemonics_returns_empty_list_whe # Assert assert result == [] -def test_TelemetryTestSuite_get_status_specific_mnemonics_returns_the_only_name_in_latest_results_because_its_status_eq_given_status(mocker): + +def test_TelemetryTestSuite_get_status_specific_mnemonics_returns_the_only_name_in_latest_results_because_its_status_eq_given_status( + mocker, +): # Arrange arg_status = MagicMock() @@ -1061,8 +1402,8 @@ def test_TelemetryTestSuite_get_status_specific_mnemonics_returns_the_only_name_ expected_name = str(MagicMock()) - mocker.patch.object(fake_res, 'get_status', return_value=arg_status) - mocker.patch.object(fake_res, 'get_name', return_value=expected_name) + mocker.patch.object(fake_res, "get_status", return_value=arg_status) + mocker.patch.object(fake_res, "get_name", return_value=expected_name) cut = TelemetryTestSuite.__new__(TelemetryTestSuite) cut.latest_results = [fake_res] @@ -1073,13 +1414,16 @@ def test_TelemetryTestSuite_get_status_specific_mnemonics_returns_the_only_name_ # Assert assert result == [expected_name] -def test_TelemetryTestSuite_get_status_specific_mnemonics_returns_empty_list_latest_results_because_its_status_not_eq_given_status(mocker): + +def test_TelemetryTestSuite_get_status_specific_mnemonics_returns_empty_list_latest_results_because_its_status_not_eq_given_status( + mocker, +): # Arrange arg_status = MagicMock() fake_res = MagicMock() - mocker.patch.object(fake_res, 'get_status', return_value=MagicMock()) + mocker.patch.object(fake_res, "get_status", return_value=MagicMock()) cut = TelemetryTestSuite.__new__(TelemetryTestSuite) cut.latest_results = [fake_res] @@ -1090,28 +1434,37 @@ def test_TelemetryTestSuite_get_status_specific_mnemonics_returns_empty_list_lat # Assert assert result == [] -def test_TelemetryTestSuite_get_status_specific_mnemonics_returns_only_names_in_latest_results_where_status_matches_given_status(mocker): + +def test_TelemetryTestSuite_get_status_specific_mnemonics_returns_only_names_in_latest_results_where_status_matches_given_status( + mocker, +): # Arrange arg_status = MagicMock() - num_fake_results = pytest.gen.randint(2, 10) # arbitrary, from 2 to 10 (0 and 1 both have own test) - num_fake_status_matches = pytest.gen.randint(1, num_fake_results - 1) # at least 1 match up to 1 less than all + num_fake_results = pytest.gen.randint( + 2, 10 + ) # arbitrary, from 2 to 10 (0 and 1 both have own test) + num_fake_status_matches = pytest.gen.randint( + 1, num_fake_results - 1 + ) # at least 1 match up to 1 less than all fake_latest_results = [False] * num_fake_results expected_names = [] - for i in pytest.gen.sample(range(len(fake_latest_results)), num_fake_status_matches): + for i in pytest.gen.sample( + range(len(fake_latest_results)), num_fake_status_matches + ): fake_latest_results[i] = True for i in range(len(fake_latest_results)): fake_res = MagicMock() if fake_latest_results[i] == True: fake_name = str(MagicMock()) - mocker.patch.object(fake_res, 'get_status', return_value=arg_status) - mocker.patch.object(fake_res, 'get_name', return_value=fake_name) + mocker.patch.object(fake_res, "get_status", return_value=arg_status) + mocker.patch.object(fake_res, "get_name", return_value=fake_name) expected_names.append(fake_name) else: - mocker.patch.object(fake_res, 'get_status', return_value=MagicMock()) + mocker.patch.object(fake_res, "get_status", return_value=MagicMock()) fake_latest_results[i] = fake_res cut = TelemetryTestSuite.__new__(TelemetryTestSuite) @@ -1124,11 +1477,16 @@ def test_TelemetryTestSuite_get_status_specific_mnemonics_returns_only_names_in_ assert result == expected_names assert len(result) != len(fake_latest_results) -def test_TelemetryTestSuite_get_status_specific_mnemonics_returns_all_names_in_latest_results_when_all_statuses_matches_given_status(mocker): + +def test_TelemetryTestSuite_get_status_specific_mnemonics_returns_all_names_in_latest_results_when_all_statuses_matches_given_status( + mocker, +): # Arrange arg_status = MagicMock() - num_fake_results = pytest.gen.randint(1, 10) # arbitrary, from 2 to 10 (0 and 1 both have own test) + num_fake_results = pytest.gen.randint( + 1, 10 + ) # arbitrary, from 2 to 10 (0 and 1 both have own test) fake_latest_results = [] expected_names = [] @@ -1136,8 +1494,8 @@ def test_TelemetryTestSuite_get_status_specific_mnemonics_returns_all_names_in_l for i in range(num_fake_results): fake_res = MagicMock() fake_name = str(MagicMock()) - mocker.patch.object(fake_res, 'get_status', return_value=arg_status) - mocker.patch.object(fake_res, 'get_name', return_value=fake_name) + mocker.patch.object(fake_res, "get_status", return_value=arg_status) + mocker.patch.object(fake_res, "get_name", return_value=fake_name) fake_latest_results.append(fake_res) expected_names.append(fake_name) @@ -1151,14 +1509,17 @@ def test_TelemetryTestSuite_get_status_specific_mnemonics_returns_all_names_in_l assert result == expected_names assert len(result) == len(fake_latest_results) -def test_TelemetryTestSuite_get_status_specific_mnemonics_default_given_status_is_str_RED(mocker): + +def test_TelemetryTestSuite_get_status_specific_mnemonics_default_given_status_is_str_RED( + mocker, +): # Arrange fake_res = MagicMock() expected_name = str(MagicMock()) - mocker.patch.object(fake_res, 'get_status', return_value='RED') - mocker.patch.object(fake_res, 'get_name', return_value=expected_name) + mocker.patch.object(fake_res, "get_status", return_value="RED") + mocker.patch.object(fake_res, "get_name", return_value=expected_name) cut = TelemetryTestSuite.__new__(TelemetryTestSuite) cut.latest_results = [fake_res] diff --git a/test/onair/src/systems/test_vehicle_rep.py b/test/onair/src/systems/test_vehicle_rep.py index 1a77214a..787fe386 100644 --- a/test/onair/src/systems/test_vehicle_rep.py +++ b/test/onair/src/systems/test_vehicle_rep.py @@ -14,21 +14,24 @@ import onair.src.systems.vehicle_rep as vehicle_rep from onair.src.systems.vehicle_rep import VehicleRepresentation + # __init__ tests -def test_VehicleRepresentation__init__asserts_when_len_given_headers_is_not_eq_to_len_given_tests(mocker): +def test_VehicleRepresentation__init__asserts_when_len_given_headers_is_not_eq_to_len_given_tests( + mocker, +): # Arrange arg_headers = MagicMock() arg_tests = MagicMock() fake_len = [] - fake_len.append(pytest.gen.randint(0, 100)) # arbitrary, from 0 to 100 size + fake_len.append(pytest.gen.randint(0, 100)) # arbitrary, from 0 to 100 size fake_len.append(fake_len[0]) - while fake_len[1] == fake_len[0]: # need a value not equal for test to pass - fake_len[1] = pytest.gen.randint(0, 100) # arbitrary, same as fake_len_headers + while fake_len[1] == fake_len[0]: # need a value not equal for test to pass + fake_len[1] = pytest.gen.randint(0, 100) # arbitrary, same as fake_len_headers cut = VehicleRepresentation.__new__(VehicleRepresentation) - mocker.patch(vehicle_rep.__name__ + '.len', side_effect=fake_len) + mocker.patch(vehicle_rep.__name__ + ".len", side_effect=fake_len) # Act with pytest.raises(AssertionError) as e_info: cut.__init__(arg_headers, arg_tests) @@ -36,46 +39,60 @@ def test_VehicleRepresentation__init__asserts_when_len_given_headers_is_not_eq_t # Assert assert vehicle_rep.len.call_count == 2 call_list = set({}) - [call_list.add(vehicle_rep.len.call_args_list[i].args) for i in range(len(vehicle_rep.len.call_args_list))] - assert call_list == {(arg_headers, ), (arg_tests, )} - assert e_info.match('') + [ + call_list.add(vehicle_rep.len.call_args_list[i].args) + for i in range(len(vehicle_rep.len.call_args_list)) + ] + assert call_list == {(arg_headers,), (arg_tests,)} + assert e_info.match("") + -def test_VehicleRepresentation__init__sets_status_to_Status_with_str_MISSION_and_headers_to_given_headers_and_test_suite_to_TelemetryTestSuite_with_given_headers_and_tests_and_curr_data_to_all_empty_step_len_of_headers(mocker): +def test_VehicleRepresentation__init__sets_status_to_Status_with_str_MISSION_and_headers_to_given_headers_and_test_suite_to_TelemetryTestSuite_with_given_headers_and_tests_and_curr_data_to_all_empty_step_len_of_headers( + mocker, +): # Arrange arg_headers = MagicMock() arg_tests = MagicMock() - fake_len = pytest.gen.randint(0, 100) # arbitrary, 0 to 100 items + fake_len = pytest.gen.randint(0, 100) # arbitrary, 0 to 100 items fake_status = MagicMock() fake_test_suite = MagicMock() cut = VehicleRepresentation.__new__(VehicleRepresentation) - mocker.patch(vehicle_rep.__name__ + '.len', return_value=fake_len) - mocker.patch(vehicle_rep.__name__ + '.Status', return_value=fake_status) - mocker.patch(vehicle_rep.__name__ + '.TelemetryTestSuite', return_value=fake_test_suite) + mocker.patch(vehicle_rep.__name__ + ".len", return_value=fake_len) + mocker.patch(vehicle_rep.__name__ + ".Status", return_value=fake_status) + mocker.patch( + vehicle_rep.__name__ + ".TelemetryTestSuite", return_value=fake_test_suite + ) # Act cut.__init__(arg_headers, arg_tests) # Assert assert vehicle_rep.Status.call_count == 1 - assert vehicle_rep.Status.call_args_list[0].args == ('MISSION', ) + assert vehicle_rep.Status.call_args_list[0].args == ("MISSION",) assert cut.status == fake_status assert cut.headers == arg_headers assert vehicle_rep.TelemetryTestSuite.call_count == 1 - assert vehicle_rep.TelemetryTestSuite.call_args_list[0].args == (arg_headers, arg_tests) + assert vehicle_rep.TelemetryTestSuite.call_args_list[0].args == ( + arg_headers, + arg_tests, + ) assert cut.test_suite == fake_test_suite - assert cut.curr_data == ['-'] * fake_len + assert cut.curr_data == ["-"] * fake_len + # update tests -def test_VehicleRepresentation_update_calls_update_constructs_then_update_curr_data_then_executes_test_suite_and_finally_sets_status(mocker): +def test_VehicleRepresentation_update_calls_update_constructs_then_update_curr_data_then_executes_test_suite_and_finally_sets_status( + mocker, +): # Arrange mock_manager = mocker.MagicMock() arg_frame = MagicMock() fake_suite_status = [] - num_fake_status = pytest.gen.randint(0, 10) # from 0 to 10 arbitrary + num_fake_status = pytest.gen.randint(0, 10) # from 0 to 10 arbitrary for i in range(num_fake_status): fake_suite_status.append(MagicMock()) @@ -84,26 +101,45 @@ def test_VehicleRepresentation_update_calls_update_constructs_then_update_curr_d cut.curr_data = MagicMock() cut.status = MagicMock() - mock_manager.attach_mock(mocker.patch.object(cut, 'update_constructs'), 'update_constructs') - mock_manager.attach_mock(mocker.patch.object(cut, 'update_curr_data'), 'update_curr_data') - mock_manager.attach_mock(mocker.patch.object(cut.test_suite, 'execute_suite'), 'test_suite.execute_suite') - mock_manager.attach_mock(mocker.patch.object(cut.test_suite, 'get_suite_status', return_value=fake_suite_status), 'test_suite.get_suite_status') - mock_manager.attach_mock(mocker.patch.object(cut.status, 'set_status'), 'status.set_status') + mock_manager.attach_mock( + mocker.patch.object(cut, "update_constructs"), "update_constructs" + ) + mock_manager.attach_mock( + mocker.patch.object(cut, "update_curr_data"), "update_curr_data" + ) + mock_manager.attach_mock( + mocker.patch.object(cut.test_suite, "execute_suite"), "test_suite.execute_suite" + ) + mock_manager.attach_mock( + mocker.patch.object( + cut.test_suite, "get_suite_status", return_value=fake_suite_status + ), + "test_suite.get_suite_status", + ) + mock_manager.attach_mock( + mocker.patch.object(cut.status, "set_status"), "status.set_status" + ) # Act cut.update(arg_frame) # Assert - mock_manager.assert_has_calls([ - mocker.call.update_curr_data(arg_frame), - mocker.call.test_suite.execute_suite(cut.curr_data), - mocker.call.test_suite.get_suite_status(), - mocker.call.status.set_status(*fake_suite_status), - mocker.call.update_constructs(cut.curr_data), - ], any_order=False) + mock_manager.assert_has_calls( + [ + mocker.call.update_curr_data(arg_frame), + mocker.call.test_suite.execute_suite(cut.curr_data), + mocker.call.test_suite.get_suite_status(), + mocker.call.status.set_status(*fake_suite_status), + mocker.call.update_constructs(cut.curr_data), + ], + any_order=False, + ) + # update_constructs tests -def test_VehicleRepresentation_update_constructs_does_nothing_when_knowledge_synthesis_constructs_are_empty(mocker): +def test_VehicleRepresentation_update_constructs_does_nothing_when_knowledge_synthesis_constructs_are_empty( + mocker, +): # Arrange arg_frame = MagicMock() @@ -118,17 +154,22 @@ def test_VehicleRepresentation_update_constructs_does_nothing_when_knowledge_syn # Assert assert result == None -def test_VehicleRepresentation_update_constructs_calls_update_on_each_knowledge_synthesis_construct(mocker): + +def test_VehicleRepresentation_update_constructs_calls_update_on_each_knowledge_synthesis_construct( + mocker, +): # Arrange arg_frame = MagicMock() - num_fake_constructs = pytest.gen.randint(1, 10) # from 1 to 10 arbitrary, 0 has own test + num_fake_constructs = pytest.gen.randint( + 1, 10 + ) # from 1 to 10 arbitrary, 0 has own test fake_constructs = [] for i in range(num_fake_constructs): fake_construct = MagicMock() fake_constructs.append(fake_construct) - mocker.patch.object(fake_construct, 'update') + mocker.patch.object(fake_construct, "update") cut = VehicleRepresentation.__new__(VehicleRepresentation) cut.knowledge_synthesis_constructs = fake_constructs @@ -139,7 +180,8 @@ def test_VehicleRepresentation_update_constructs_calls_update_on_each_knowledge_ # Assert for i in range(num_fake_constructs): fake_constructs[i].update.call_count == 1 - fake_constructs[i].update.call_args_list[0].args == (arg_frame, ) + fake_constructs[i].update.call_args_list[0].args == (arg_frame,) + # update_curr_data tests def test_VehicleRepresentation_update_does_nothing_when_given_frame_is_empty(mocker): @@ -155,17 +197,22 @@ def test_VehicleRepresentation_update_does_nothing_when_given_frame_is_empty(moc # Assert assert result == None -def test_VehicleRepresentation_update_copies_all_frame_data_into_curr_data_when_all_frame_data_occupied(mocker): + +def test_VehicleRepresentation_update_copies_all_frame_data_into_curr_data_when_all_frame_data_occupied( + mocker, +): # Arrange arg_frame = [] - num_items_in_arg_frame = pytest.gen.randint(1, 10) # from 1 to 10 arbitrary, 0 has own test + num_items_in_arg_frame = pytest.gen.randint( + 1, 10 + ) # from 1 to 10 arbitrary, 0 has own test fake_curr_data = [] for i in range(num_items_in_arg_frame): arg_frame.append(MagicMock()) fake_curr_data.append(MagicMock()) - assert fake_curr_data != arg_frame # sanity check + assert fake_curr_data != arg_frame # sanity check cut = VehicleRepresentation.__new__(VehicleRepresentation) cut.curr_data = fake_curr_data @@ -176,24 +223,31 @@ def test_VehicleRepresentation_update_copies_all_frame_data_into_curr_data_when_ # Assert assert cut.curr_data == arg_frame -def test_VehicleRepresentation_update_copies_only_occupied_frame_data_into_curr_data_when_some_frame_data_vacant(mocker): + +def test_VehicleRepresentation_update_copies_only_occupied_frame_data_into_curr_data_when_some_frame_data_vacant( + mocker, +): # Arrange arg_frame = [] - num_items_in_arg_frame = pytest.gen.randint(1, 10) # from 1 to 10 arbitrary, 0 has own test + num_items_in_arg_frame = pytest.gen.randint( + 1, 10 + ) # from 1 to 10 arbitrary, 0 has own test fake_curr_data = [] for i in range(num_items_in_arg_frame): arg_frame.append(MagicMock()) fake_curr_data.append(MagicMock()) - assert fake_curr_data != arg_frame # sanity check + assert fake_curr_data != arg_frame # sanity check expected_curr_data = arg_frame.copy() - num_vacant_frame_data = pytest.gen.randint(1, num_items_in_arg_frame) # from 1 to frame size + num_vacant_frame_data = pytest.gen.randint( + 1, num_items_in_arg_frame + ) # from 1 to frame size vacant_data_points = list(range(num_vacant_frame_data)) for i in vacant_data_points: - arg_frame[i] = '-' + arg_frame[i] = "-" expected_curr_data[i] = fake_curr_data[i] cut = VehicleRepresentation.__new__(VehicleRepresentation) @@ -205,6 +259,7 @@ def test_VehicleRepresentation_update_copies_only_occupied_frame_data_into_curr_ # Assert assert cut.curr_data == expected_curr_data + # get_headers tests def test_VehicleRepresentation_get_headers_returns_headers(): # Arrange @@ -219,15 +274,20 @@ def test_VehicleRepresentation_get_headers_returns_headers(): # Assert assert result == expected_result + # get_current_faulting_mnemonics tests, return_value=fake_suite_status -def test_VehicleRepresentation_get_current_faulting_mnemonics_returns_test_suite_call_get_status_specific_mnemonics(mocker): +def test_VehicleRepresentation_get_current_faulting_mnemonics_returns_test_suite_call_get_status_specific_mnemonics( + mocker, +): # Arrange expected_result = MagicMock() cut = VehicleRepresentation.__new__(VehicleRepresentation) cut.test_suite = MagicMock() - mocker.patch.object(cut.test_suite, 'get_status_specific_mnemonics', return_value=expected_result) + mocker.patch.object( + cut.test_suite, "get_status_specific_mnemonics", return_value=expected_result + ) # Act result = cut.get_current_faulting_mnemonics() @@ -237,6 +297,7 @@ def test_VehicleRepresentation_get_current_faulting_mnemonics_returns_test_suite assert cut.test_suite.get_status_specific_mnemonics.call_args_list[0].args == () assert result == expected_result + # get_current_data tests def test_VehicleRepresentation_get_current_data_returns_curr_data(): # Arrange @@ -251,6 +312,7 @@ def test_VehicleRepresentation_get_current_data_returns_curr_data(): # Assert assert result == expected_result + # get_current_time tests def test_VehicleRepresentation_get_current_time_returns_curr_data_item_0(): # Arrange @@ -266,6 +328,7 @@ def test_VehicleRepresentation_get_current_time_returns_curr_data_item_0(): # Assert assert result == expected_result + # get_status tests def test_VehicleRepresentation_get_status_returns_status_call_to_get_status(mocker): # Arrange @@ -274,7 +337,7 @@ def test_VehicleRepresentation_get_status_returns_status_call_to_get_status(mock cut = VehicleRepresentation.__new__(VehicleRepresentation) cut.status = MagicMock() - mocker.patch.object(cut.status, 'get_status', return_value=expected_result) + mocker.patch.object(cut.status, "get_status", return_value=expected_result) # Act result = cut.get_status() @@ -284,15 +347,18 @@ def test_VehicleRepresentation_get_status_returns_status_call_to_get_status(mock assert cut.status.get_status.call_args_list[0].args == () assert result == expected_result + # get_bayesian_status tests -def test_VehicleRepresentation_get_bayesian_status_returns_status_call_to_get_bayesian_status(mocker): +def test_VehicleRepresentation_get_bayesian_status_returns_status_call_to_get_bayesian_status( + mocker, +): # Arrange expected_result = MagicMock() cut = VehicleRepresentation.__new__(VehicleRepresentation) cut.status = MagicMock() - mocker.patch.object(cut.status, 'get_bayesian_status', return_value=expected_result) + mocker.patch.object(cut.status, "get_bayesian_status", return_value=expected_result) # Act result = cut.get_bayesian_status() @@ -302,6 +368,7 @@ def test_VehicleRepresentation_get_bayesian_status_returns_status_call_to_get_ba assert cut.status.get_bayesian_status.call_args_list[0].args == () assert result == expected_result + # get_batch_status_reports tests def test_VehicleRepresentation_get_batch_status_reports_returngets_None(): # Arrange @@ -317,8 +384,11 @@ def test_VehicleRepresentation_get_batch_status_reports_returngets_None(): # Assert assert result == expected_result + # get_state_information tests -def test_VehicleRepresentation_get_state_information_calls_render_reasoning_on_knowledge_synthesis_constructs(mocker): +def test_VehicleRepresentation_get_state_information_calls_render_reasoning_on_knowledge_synthesis_constructs( + mocker, +): # Arrange arg_frame = MagicMock() arg_headers = MagicMock() @@ -326,15 +396,19 @@ def test_VehicleRepresentation_get_state_information_calls_render_reasoning_on_k fake_render_reasoning_result = MagicMock() fake_knowledge_synthesis_construct = MagicMock() - fake_knowledge_synthesis_construct.component_name = 'foo' + fake_knowledge_synthesis_construct.component_name = "foo" cut = VehicleRepresentation.__new__(VehicleRepresentation) cut.knowledge_synthesis_constructs = [fake_knowledge_synthesis_construct] - mocker.patch.object(fake_knowledge_synthesis_construct, 'render_reasoning', return_value=fake_render_reasoning_result) + mocker.patch.object( + fake_knowledge_synthesis_construct, + "render_reasoning", + return_value=fake_render_reasoning_result, + ) # Act result = cut.get_state_information() # Assert - assert list(result.keys())[0] == 'foo' + assert list(result.keys())[0] == "foo" assert list(result.values())[0] == fake_render_reasoning_result diff --git a/test/onair/src/util/test_cleanup.py b/test/onair/src/util/test_cleanup.py index ce5d10fe..348fb354 100644 --- a/test/onair/src/util/test_cleanup.py +++ b/test/onair/src/util/test_cleanup.py @@ -11,32 +11,36 @@ import onair.src.util.cleanup as cleanup + # test_setup_folders -def test_cleanup_setup_folders_creates_dir_when_given_results_path_does_not_exist(mocker): - # Arrange - arg_results_path = str(MagicMock()) +def test_cleanup_setup_folders_creates_dir_when_given_results_path_does_not_exist( + mocker, +): + # Arrange + arg_results_path = str(MagicMock()) + + mocker.patch(cleanup.__name__ + ".os.path.isdir", return_value=False) + mocker.patch(cleanup.__name__ + ".os.mkdir") - mocker.patch(cleanup.__name__ + '.os.path.isdir', return_value=False) - mocker.patch(cleanup.__name__ + '.os.mkdir') + # Act + cleanup.setup_folders(arg_results_path) - # Act - cleanup.setup_folders(arg_results_path) + # Assert + assert cleanup.os.path.isdir.call_count == 1 + assert cleanup.os.mkdir.call_count == 1 + assert cleanup.os.mkdir.call_args_list[0].args == (arg_results_path,) - # Assert - assert cleanup.os.path.isdir.call_count == 1 - assert cleanup.os.mkdir.call_count == 1 - assert cleanup.os.mkdir.call_args_list[0].args == (arg_results_path, ) def test_cleanup_setup_folders_does_not_create_dir_when_it_already_exists(mocker): - # Arrange - arg_results_path = str(MagicMock()) + # Arrange + arg_results_path = str(MagicMock()) - mocker.patch(cleanup.__name__ + '.os.path.isdir', return_value=True) - mocker.patch(cleanup.__name__ + '.os.mkdir') + mocker.patch(cleanup.__name__ + ".os.path.isdir", return_value=True) + mocker.patch(cleanup.__name__ + ".os.mkdir") - # Act - cleanup.setup_folders(arg_results_path) + # Act + cleanup.setup_folders(arg_results_path) - # Assert - assert cleanup.os.path.isdir.call_count == 1 - assert cleanup.os.mkdir.call_count == 0 + # Assert + assert cleanup.os.path.isdir.call_count == 1 + assert cleanup.os.mkdir.call_count == 0 diff --git a/test/onair/src/util/test_data_conversion.py b/test/onair/src/util/test_data_conversion.py index 4279c5d2..a930b3ba 100644 --- a/test/onair/src/util/test_data_conversion.py +++ b/test/onair/src/util/test_data_conversion.py @@ -15,12 +15,15 @@ from numpy import ndarray + # status_to_oneHot tests -def test_data_conversion_status_to_oneHot_returns_given_status_when_status_isinstance_of_np_ndarray(mocker): +def test_data_conversion_status_to_oneHot_returns_given_status_when_status_isinstance_of_np_ndarray( + mocker, +): # Arrange arg_status = MagicMock() - mocker.patch(data_conversion.__name__ + '.isinstance', return_value=True) + mocker.patch(data_conversion.__name__ + ".isinstance", return_value=True) # Act result = data_conversion.status_to_oneHot(arg_status) @@ -30,22 +33,24 @@ def test_data_conversion_status_to_oneHot_returns_given_status_when_status_isins assert data_conversion.isinstance.call_args_list[0].args == (arg_status, ndarray) assert result == arg_status -def test_data_conversion_status_to_oneHot_returns_one_hot_set_to_list_of_four_zeros_and_the_value_of_the_classes_status_to_1_point_0(mocker): + +def test_data_conversion_status_to_oneHot_returns_one_hot_set_to_list_of_four_zeros_and_the_value_of_the_classes_status_to_1_point_0( + mocker, +): # Arrange arg_status = MagicMock() - fake_status = pytest.gen.randint(0,3) # size of array choice, from 0 to 3 + fake_status = pytest.gen.randint(0, 3) # size of array choice, from 0 to 3 expected_result = [0.0, 0.0, 0.0, 0.0] expected_result[fake_status] = 1.0 data_conversion.classes = {arg_status: fake_status} - mocker.patch(data_conversion.__name__ + '.isinstance', return_value=False) + mocker.patch(data_conversion.__name__ + ".isinstance", return_value=False) # Act result = data_conversion.status_to_oneHot(arg_status) # Assert assert result == expected_result - diff --git a/test/onair/src/util/test_file_io.py b/test/onair/src/util/test_file_io.py index 3196d8ac..49573da8 100644 --- a/test/onair/src/util/test_file_io.py +++ b/test/onair/src/util/test_file_io.py @@ -14,230 +14,264 @@ # parse_associations_from_json tests -def test_file_io_parse_associations_raises_KeyError_when_loaded_data_does_not_have_keyword_children(mocker): - # Arrange - arg_filepath = MagicMock() - fake_file_iterator = MagicMock() - fake_f = MagicMock() - fake_f.configure_mock(**{'__enter__.return_value': fake_file_iterator}) +def test_file_io_parse_associations_raises_KeyError_when_loaded_data_does_not_have_keyword_children( + mocker, +): + # Arrange + arg_filepath = MagicMock() - fake_data = {} + fake_file_iterator = MagicMock() + fake_f = MagicMock() + fake_f.configure_mock(**{"__enter__.return_value": fake_file_iterator}) - mocker.patch(file_io.__name__ + '.open', return_value=fake_f) - mocker.patch(file_io.__name__ + '.json.load', return_value=fake_data) - mocker.patch(file_io.__name__ + '.print') + fake_data = {} - # Act - with pytest.raises(KeyError) as e_info: - file_io.parse_associations_from_json(arg_filepath) + mocker.patch(file_io.__name__ + ".open", return_value=fake_f) + mocker.patch(file_io.__name__ + ".json.load", return_value=fake_data) + mocker.patch(file_io.__name__ + ".print") - # Assert - assert str(e_info.value) == "'children'" - assert file_io.open.call_count == 1 - assert file_io.json.load.call_count == 1 - assert file_io.json.load.call_args_list[0].args == (fake_file_iterator,) - assert file_io.print.call_count == 0 - -def test_file_io_parse_associations_does_not_print_when_loaded_data_children_is_empty(mocker): - # Arrange - arg_filepath = MagicMock() - - fake_file_iterator = MagicMock() - fake_f = MagicMock() - fake_f.configure_mock(**{'__enter__.return_value': fake_file_iterator}) - fake_data = {} - fake_data['children'] = [] - - mocker.patch(file_io.__name__ + '.open', return_value=fake_f) - mocker.patch(file_io.__name__ + '.json.load', return_value=fake_data) - mocker.patch(file_io.__name__ + '.print') - - # Act - file_io.parse_associations_from_json(arg_filepath) - - # Assert - assert file_io.open.call_count == 1 - assert file_io.json.load.call_count == 1 - assert file_io.json.load.call_args_list[0].args == (fake_file_iterator,) - assert file_io.print.call_count == 0 - -def test_file_io_parse_associations_raises_KeyError_when_loaded_data_child_missing_name(mocker): - # Arrange - arg_filepath = MagicMock() - - fake_file_iterator = MagicMock() - fake_f = MagicMock() - fake_f.configure_mock(**{'__enter__.return_value': fake_file_iterator}) - fake_data = {} - fake_data['children'] = [{}] - - mocker.patch(file_io.__name__ + '.open', return_value=fake_f) - mocker.patch(file_io.__name__ + '.json.load', return_value=fake_data) - mocker.patch(file_io.__name__ + '.print') - - # Act - with pytest.raises(KeyError) as e_info: - file_io.parse_associations_from_json(arg_filepath) + # Act + with pytest.raises(KeyError) as e_info: + file_io.parse_associations_from_json(arg_filepath) + + # Assert + assert str(e_info.value) == "'children'" + assert file_io.open.call_count == 1 + assert file_io.json.load.call_count == 1 + assert file_io.json.load.call_args_list[0].args == (fake_file_iterator,) + assert file_io.print.call_count == 0 + + +def test_file_io_parse_associations_does_not_print_when_loaded_data_children_is_empty( + mocker, +): + # Arrange + arg_filepath = MagicMock() - # Assert - assert str(e_info.value) == "'name'" - assert file_io.open.call_count == 1 - assert file_io.json.load.call_count == 1 - assert file_io.json.load.call_args_list[0].args == (fake_file_iterator,) - assert file_io.print.call_count == 0 - -def test_file_io_parse_associations_raises_KeyError_when_loaded_data_child_missing_connections(mocker): - # Arrange - arg_filepath = MagicMock() - - fake_file_iterator = MagicMock() - fake_f = MagicMock() - fake_f.configure_mock(**{'__enter__.return_value': fake_file_iterator}) - fake_data = {} - fake_data['children'] = [{'name':'I have a name!'}] - - mocker.patch(file_io.__name__ + '.open', return_value=fake_f) - mocker.patch(file_io.__name__ + '.json.load', return_value=fake_data) - mocker.patch(file_io.__name__ + '.print') - - # Act - with pytest.raises(KeyError) as e_info: + fake_file_iterator = MagicMock() + fake_f = MagicMock() + fake_f.configure_mock(**{"__enter__.return_value": fake_file_iterator}) + fake_data = {} + fake_data["children"] = [] + + mocker.patch(file_io.__name__ + ".open", return_value=fake_f) + mocker.patch(file_io.__name__ + ".json.load", return_value=fake_data) + mocker.patch(file_io.__name__ + ".print") + + # Act file_io.parse_associations_from_json(arg_filepath) - # Assert - assert str(e_info.value) == "'connections'" - assert file_io.open.call_count == 1 - assert file_io.json.load.call_count == 1 - assert file_io.json.load.call_args_list[0].args == (fake_file_iterator,) - assert file_io.print.call_count == 0 - -def test_file_io_parse_associations_does_not_print_when_loaded_data_child_conections_are_empty(mocker): - # Arrange - arg_filepath = MagicMock() - - fake_file_iterator = MagicMock() - fake_f = MagicMock() - fake_f.configure_mock(**{'__enter__.return_value': fake_file_iterator}) - fake_data = {} - fake_data['children'] = [{'name':'I have a name!', 'connections':[]}] - - mocker.patch(file_io.__name__ + '.open', return_value=fake_f) - mocker.patch(file_io.__name__ + '.json.load', return_value=fake_data) - mocker.patch(file_io.__name__ + '.print') - - # Act - file_io.parse_associations_from_json(arg_filepath) - - # Assert - assert file_io.open.call_count == 1 - assert file_io.json.load.call_count == 1 - assert file_io.json.load.call_args_list[0].args == (fake_file_iterator,) - assert file_io.print.call_count == 0 - -def test_file_io_parse_associations_raises_KeyError_when_loaded_data_child_connections_missing_target(mocker): - # Arrange - arg_filepath = MagicMock() - - fake_file_iterator = MagicMock() - fake_f = MagicMock() - fake_f.configure_mock(**{'__enter__.return_value': fake_file_iterator}) - fake_data = {} - fake_data['children'] = [{'name':'I have a name!', 'connections':[{}]}] - - mocker.patch(file_io.__name__ + '.open', return_value=fake_f) - mocker.patch(file_io.__name__ + '.json.load', return_value=fake_data) - mocker.patch(file_io.__name__ + '.print') - - # Act - with pytest.raises(KeyError) as e_info: + # Assert + assert file_io.open.call_count == 1 + assert file_io.json.load.call_count == 1 + assert file_io.json.load.call_args_list[0].args == (fake_file_iterator,) + assert file_io.print.call_count == 0 + + +def test_file_io_parse_associations_raises_KeyError_when_loaded_data_child_missing_name( + mocker, +): + # Arrange + arg_filepath = MagicMock() + + fake_file_iterator = MagicMock() + fake_f = MagicMock() + fake_f.configure_mock(**{"__enter__.return_value": fake_file_iterator}) + fake_data = {} + fake_data["children"] = [{}] + + mocker.patch(file_io.__name__ + ".open", return_value=fake_f) + mocker.patch(file_io.__name__ + ".json.load", return_value=fake_data) + mocker.patch(file_io.__name__ + ".print") + + # Act + with pytest.raises(KeyError) as e_info: + file_io.parse_associations_from_json(arg_filepath) + + # Assert + assert str(e_info.value) == "'name'" + assert file_io.open.call_count == 1 + assert file_io.json.load.call_count == 1 + assert file_io.json.load.call_args_list[0].args == (fake_file_iterator,) + assert file_io.print.call_count == 0 + + +def test_file_io_parse_associations_raises_KeyError_when_loaded_data_child_missing_connections( + mocker, +): + # Arrange + arg_filepath = MagicMock() + + fake_file_iterator = MagicMock() + fake_f = MagicMock() + fake_f.configure_mock(**{"__enter__.return_value": fake_file_iterator}) + fake_data = {} + fake_data["children"] = [{"name": "I have a name!"}] + + mocker.patch(file_io.__name__ + ".open", return_value=fake_f) + mocker.patch(file_io.__name__ + ".json.load", return_value=fake_data) + mocker.patch(file_io.__name__ + ".print") + + # Act + with pytest.raises(KeyError) as e_info: + file_io.parse_associations_from_json(arg_filepath) + + # Assert + assert str(e_info.value) == "'connections'" + assert file_io.open.call_count == 1 + assert file_io.json.load.call_count == 1 + assert file_io.json.load.call_args_list[0].args == (fake_file_iterator,) + assert file_io.print.call_count == 0 + + +def test_file_io_parse_associations_does_not_print_when_loaded_data_child_conections_are_empty( + mocker, +): + # Arrange + arg_filepath = MagicMock() + + fake_file_iterator = MagicMock() + fake_f = MagicMock() + fake_f.configure_mock(**{"__enter__.return_value": fake_file_iterator}) + fake_data = {} + fake_data["children"] = [{"name": "I have a name!", "connections": []}] + + mocker.patch(file_io.__name__ + ".open", return_value=fake_f) + mocker.patch(file_io.__name__ + ".json.load", return_value=fake_data) + mocker.patch(file_io.__name__ + ".print") + + # Act file_io.parse_associations_from_json(arg_filepath) - # Assert - assert str(e_info.value) == "'target'" - assert file_io.open.call_count == 1 - assert file_io.json.load.call_count == 1 - assert file_io.json.load.call_args_list[0].args == (fake_file_iterator,) - assert file_io.print.call_count == 0 - -def test_file_io_parse_associations_raises_KeyError_when_loaded_data_child_connections_missing_weight(mocker): - # Arrange - arg_filepath = MagicMock() - - fake_file_iterator = MagicMock() - fake_f = MagicMock() - fake_f.configure_mock(**{'__enter__.return_value': fake_file_iterator}) - fake_data = {} - fake_data['children'] = [{'name':'I have a name!', 'connections':[{'target':'I have a target!'}]}] - - mocker.patch(file_io.__name__ + '.open', return_value=fake_f) - mocker.patch(file_io.__name__ + '.json.load', return_value=fake_data) - mocker.patch(file_io.__name__ + '.print') - - # Act - with pytest.raises(KeyError) as e_info: + # Assert + assert file_io.open.call_count == 1 + assert file_io.json.load.call_count == 1 + assert file_io.json.load.call_args_list[0].args == (fake_file_iterator,) + assert file_io.print.call_count == 0 + + +def test_file_io_parse_associations_raises_KeyError_when_loaded_data_child_connections_missing_target( + mocker, +): + # Arrange + arg_filepath = MagicMock() + + fake_file_iterator = MagicMock() + fake_f = MagicMock() + fake_f.configure_mock(**{"__enter__.return_value": fake_file_iterator}) + fake_data = {} + fake_data["children"] = [{"name": "I have a name!", "connections": [{}]}] + + mocker.patch(file_io.__name__ + ".open", return_value=fake_f) + mocker.patch(file_io.__name__ + ".json.load", return_value=fake_data) + mocker.patch(file_io.__name__ + ".print") + + # Act + with pytest.raises(KeyError) as e_info: + file_io.parse_associations_from_json(arg_filepath) + + # Assert + assert str(e_info.value) == "'target'" + assert file_io.open.call_count == 1 + assert file_io.json.load.call_count == 1 + assert file_io.json.load.call_args_list[0].args == (fake_file_iterator,) + assert file_io.print.call_count == 0 + + +def test_file_io_parse_associations_raises_KeyError_when_loaded_data_child_connections_missing_weight( + mocker, +): + # Arrange + arg_filepath = MagicMock() + + fake_file_iterator = MagicMock() + fake_f = MagicMock() + fake_f.configure_mock(**{"__enter__.return_value": fake_file_iterator}) + fake_data = {} + fake_data["children"] = [ + {"name": "I have a name!", "connections": [{"target": "I have a target!"}]} + ] + + mocker.patch(file_io.__name__ + ".open", return_value=fake_f) + mocker.patch(file_io.__name__ + ".json.load", return_value=fake_data) + mocker.patch(file_io.__name__ + ".print") + + # Act + with pytest.raises(KeyError) as e_info: + file_io.parse_associations_from_json(arg_filepath) + + # Assert + assert str(e_info.value) == "'weight'" + assert file_io.open.call_count == 1 + assert file_io.json.load.call_count == 1 + assert file_io.json.load.call_args_list[0].args == (fake_file_iterator,) + assert file_io.print.call_count == 0 + + +def test_file_io_parse_associations_prints_associations_in_reverse_sort_by_weight_when_data_is_properly_formed( + mocker, +): + # Arrange + arg_filepath = MagicMock() + + fake_file_iterator = MagicMock() + fake_f = MagicMock() + fake_f.configure_mock(**{"__enter__.return_value": fake_file_iterator}) + fake_data = {} + fake_data["children"] = [] + example_connection = {"target": "", "weight": 0} + total_num_children = pytest.gen.randint(1, 5) # from 1 to 5 + total_num_connections = pytest.gen.randint(1, 20) # from 1 to 20 + expected_prints = [] + + # fake children + for i in range(total_num_children): + fake_data["children"].append({"name": f"name{i}", "connections": []}) + + # fake connections + for i in range(total_num_connections): + fake_target = f"target{i}" + fake_weight = 20 - i # highest weights first + # add to random child + child_index = pytest.gen.randrange( + 0, total_num_children + ) # from 0 to total_num_children - 1 + fake_child = fake_data["children"][child_index] + fake_connections = fake_child["connections"] + fake_connection = {"target": fake_target, "weight": fake_weight} + fake_connections.insert( + pytest.gen.randint(0, len(fake_connections)), fake_connection + ) + expected_prints.append( + f"{fake_child['name']} --> {fake_target}, {str(fake_weight)}" + ) + + mocker.patch(file_io.__name__ + ".open", return_value=fake_f) + mocker.patch(file_io.__name__ + ".json.load", return_value=fake_data) + mocker.patch(file_io.__name__ + ".print") + + # Act file_io.parse_associations_from_json(arg_filepath) - # Assert - assert str(e_info.value) == "'weight'" - assert file_io.open.call_count == 1 - assert file_io.json.load.call_count == 1 - assert file_io.json.load.call_args_list[0].args == (fake_file_iterator,) - assert file_io.print.call_count == 0 - -def test_file_io_parse_associations_prints_associations_in_reverse_sort_by_weight_when_data_is_properly_formed(mocker): - # Arrange - arg_filepath = MagicMock() - - fake_file_iterator = MagicMock() - fake_f = MagicMock() - fake_f.configure_mock(**{'__enter__.return_value': fake_file_iterator}) - fake_data = {} - fake_data['children'] = [] - example_connection = {'target':'', 'weight':0} - total_num_children = pytest.gen.randint(1, 5) # from 1 to 5 - total_num_connections = pytest.gen.randint(1, 20) # from 1 to 20 - expected_prints = [] - - # fake children - for i in range(total_num_children): - fake_data['children'].append({'name':f"name{i}", 'connections':[]}) - - # fake connections - for i in range(total_num_connections): - fake_target = f"target{i}" - fake_weight = 20 - i # highest weights first - # add to random child - child_index = pytest.gen.randrange(0, total_num_children) # from 0 to total_num_children - 1 - fake_child = fake_data['children'][child_index] - fake_connections = fake_child['connections'] - fake_connection = {'target':fake_target, 'weight':fake_weight} - fake_connections.insert(pytest.gen.randint(0, len(fake_connections)), fake_connection) - expected_prints.append(f"{fake_child['name']} --> {fake_target}, {str(fake_weight)}") - - mocker.patch(file_io.__name__ + '.open', return_value=fake_f) - mocker.patch(file_io.__name__ + '.json.load', return_value=fake_data) - mocker.patch(file_io.__name__ + '.print') - - # Act - file_io.parse_associations_from_json(arg_filepath) - - # Assert - assert file_io.open.call_count == 1 - assert file_io.json.load.call_count == 1 - assert file_io.json.load.call_args_list[0].args == (fake_file_iterator,) - assert file_io.print.call_count == total_num_connections - for i in range(total_num_connections): - assert file_io.print.call_args_list[i].args == (expected_prints[i],) + # Assert + assert file_io.open.call_count == 1 + assert file_io.json.load.call_count == 1 + assert file_io.json.load.call_args_list[0].args == (fake_file_iterator,) + assert file_io.print.call_count == total_num_connections + for i in range(total_num_connections): + assert file_io.print.call_args_list[i].args == (expected_prints[i],) + # aggregate_results tests + def test_file_io_aggregate_results_does_nothing_then_returns_None(): - # Arrange - expected_result = None + # Arrange + expected_result = None - # Act - result = file_io.aggregate_results() + # Act + result = file_io.aggregate_results() - # Assert - assert result == expected_result \ No newline at end of file + # Assert + assert result == expected_result diff --git a/test/onair/src/util/test_plugin_import.py b/test/onair/src/util/test_plugin_import.py index 0e3155cb..c3f58570 100644 --- a/test/onair/src/util/test_plugin_import.py +++ b/test/onair/src/util/test_plugin_import.py @@ -13,6 +13,7 @@ import onair.src.util.plugin_import as plugin_import + def test_plugin_import_returns_empty_list_when_given_module_dict_is_empty(): # Arrange arg_headers = MagicMock() @@ -24,32 +25,40 @@ def test_plugin_import_returns_empty_list_when_given_module_dict_is_empty(): # Assert assert result == [] -def test_plugin_import_returns_single_item_list_when_given_module_dict_contains_one_key_value_pair_no_init_and_not_already_in_sys(mocker): + +def test_plugin_import_returns_single_item_list_when_given_module_dict_contains_one_key_value_pair_no_init_and_not_already_in_sys( + mocker, +): # Arrange arg_headers = MagicMock() fake_construct_name = MagicMock() fake_mod_name = MagicMock() fake_module_path = MagicMock() fake_full_path = MagicMock() - arg_module_dict = {fake_construct_name:fake_module_path} + arg_module_dict = {fake_construct_name: fake_module_path} fake_spec = MagicMock() fake_module = MagicMock() fake_plugin = MagicMock() fake_Plugin_instance = MagicMock() - mocker.patch.object(fake_module_path, 'endswith', - return_value=False) - mocker.patch(plugin_import.__name__ + '.os.path.basename', - return_value=fake_mod_name) - mocker.patch(plugin_import.__name__ + '.os.path.join', - return_value=fake_full_path) - mocker.patch(plugin_import.__name__ + '.importlib.util.spec_from_file_location', return_value=fake_spec) - mocker.patch(plugin_import.__name__ + '.importlib.util.module_from_spec', return_value=fake_module) - mocker.patch.object(fake_spec, 'loader.exec_module') + mocker.patch.object(fake_module_path, "endswith", return_value=False) + mocker.patch( + plugin_import.__name__ + ".os.path.basename", return_value=fake_mod_name + ) + mocker.patch(plugin_import.__name__ + ".os.path.join", return_value=fake_full_path) + mocker.patch( + plugin_import.__name__ + ".importlib.util.spec_from_file_location", + return_value=fake_spec, + ) + mocker.patch( + plugin_import.__name__ + ".importlib.util.module_from_spec", + return_value=fake_module, + ) + mocker.patch.object(fake_spec, "loader.exec_module") mocker.patch.dict(plugin_import.sys.modules) - import_mock = mocker.patch('builtins.__import__', return_value=fake_plugin) - mocker.patch.object(fake_plugin, 'Plugin', return_value=fake_Plugin_instance) + import_mock = mocker.patch("builtins.__import__", return_value=fake_plugin) + mocker.patch.object(fake_plugin, "Plugin", return_value=fake_Plugin_instance) # Act result = plugin_import.import_plugins(arg_headers, arg_module_dict) @@ -59,52 +68,69 @@ def test_plugin_import_returns_single_item_list_when_given_module_dict_contains_ # Therefore import_mock is checked first then stopped, so other items failures output correctly # When this test fails because of INTERNALERROR the problem is with import_mock assert import_mock.call_count == 1 - assert import_mock.call_args_list[0].args == (f'{fake_mod_name}.{fake_mod_name}_plugin', ) - assert import_mock.call_args_list[0].kwargs == ({'fromlist': [f"{fake_mod_name}_plugin"]}) + assert import_mock.call_args_list[0].args == ( + f"{fake_mod_name}.{fake_mod_name}_plugin", + ) + assert import_mock.call_args_list[0].kwargs == ( + {"fromlist": [f"{fake_mod_name}_plugin"]} + ) # # Without the stop of import_mock any other fails will also cause INTERNALERROR mocker.stop(import_mock) assert plugin_import.os.path.basename.call_count == 1 - assert plugin_import.os.path.basename.call_args_list[0].args == (fake_module_path, ) + assert plugin_import.os.path.basename.call_args_list[0].args == (fake_module_path,) assert plugin_import.importlib.util.spec_from_file_location.call_count == 1 - assert plugin_import.importlib.util.spec_from_file_location.call_args_list[0].args == (fake_mod_name, fake_full_path) + assert plugin_import.importlib.util.spec_from_file_location.call_args_list[ + 0 + ].args == (fake_mod_name, fake_full_path) assert plugin_import.importlib.util.module_from_spec.call_count == 1 - assert plugin_import.importlib.util.module_from_spec.call_args_list[0].args == (fake_spec,) + assert plugin_import.importlib.util.module_from_spec.call_args_list[0].args == ( + fake_spec, + ) assert fake_spec.loader.exec_module.call_count == 1 - assert fake_spec.loader.exec_module.call_args_list[0].args == (fake_module, ) + assert fake_spec.loader.exec_module.call_args_list[0].args == (fake_module,) assert fake_mod_name in plugin_import.sys.modules assert plugin_import.sys.modules[fake_mod_name] == fake_module assert result == [fake_Plugin_instance] -def test_plugin_import_returns_single_item_list_when_given_module_dict_contains_one_key_value_pair_has_init_and_not_already_in_sys(mocker): + +def test_plugin_import_returns_single_item_list_when_given_module_dict_contains_one_key_value_pair_has_init_and_not_already_in_sys( + mocker, +): # Arrange arg_headers = MagicMock() fake_construct_name = MagicMock() fake_mod_name = MagicMock() fake_pathing = [] - for _ in range(pytest.gen.randint(1, 5)): # 1-5 arbitrary length + for _ in range(pytest.gen.randint(1, 5)): # 1-5 arbitrary length fake_pathing.append(str(MagicMock())) expected_true_path = os.path.join(*fake_pathing) fake_pathing.append("__init__.py") fake_module_path = os.path.join(*fake_pathing) fake_full_path = MagicMock() - arg_module_dict = {fake_construct_name:fake_module_path} + arg_module_dict = {fake_construct_name: fake_module_path} fake_spec = MagicMock() fake_module = MagicMock() fake_plugin = MagicMock() fake_Plugin_instance = MagicMock() - mocker.patch(plugin_import.__name__ + '.os.path.basename', - return_value=fake_mod_name) - mocker.patch(plugin_import.__name__ + '.os.path.join', - return_value=fake_full_path) - mocker.patch(plugin_import.__name__ + '.importlib.util.spec_from_file_location', return_value=fake_spec) - mocker.patch(plugin_import.__name__ + '.importlib.util.module_from_spec', return_value=fake_module) - mocker.patch.object(fake_spec, 'loader.exec_module') + mocker.patch( + plugin_import.__name__ + ".os.path.basename", return_value=fake_mod_name + ) + mocker.patch(plugin_import.__name__ + ".os.path.join", return_value=fake_full_path) + mocker.patch( + plugin_import.__name__ + ".importlib.util.spec_from_file_location", + return_value=fake_spec, + ) + mocker.patch( + plugin_import.__name__ + ".importlib.util.module_from_spec", + return_value=fake_module, + ) + mocker.patch.object(fake_spec, "loader.exec_module") mocker.patch.dict(plugin_import.sys.modules) - import_mock = mocker.patch('builtins.__import__', return_value=fake_plugin) - mocker.patch.object(fake_plugin, 'Plugin', return_value=fake_Plugin_instance) + import_mock = mocker.patch("builtins.__import__", return_value=fake_plugin) + mocker.patch.object(fake_plugin, "Plugin", return_value=fake_Plugin_instance) # Act result = plugin_import.import_plugins(arg_headers, arg_module_dict) @@ -114,45 +140,57 @@ def test_plugin_import_returns_single_item_list_when_given_module_dict_contains_ # Therefore import_mock is checked first then stopped, so other items failures output correctly # When this test fails because of INTERNALERROR the problem is with import_mock assert import_mock.call_count == 1 - assert import_mock.call_args_list[0].args == (f'{fake_mod_name}.{fake_mod_name}_plugin', ) - assert import_mock.call_args_list[0].kwargs == ({'fromlist': [f"{fake_mod_name}_plugin"]}) + assert import_mock.call_args_list[0].args == ( + f"{fake_mod_name}.{fake_mod_name}_plugin", + ) + assert import_mock.call_args_list[0].kwargs == ( + {"fromlist": [f"{fake_mod_name}_plugin"]} + ) # # Without the stop of import_mock any other fails will also cause INTERNALERROR mocker.stop(import_mock) assert plugin_import.os.path.basename.call_count == 1 - assert plugin_import.os.path.basename.call_args_list[0].args == (expected_true_path, ) + assert plugin_import.os.path.basename.call_args_list[0].args == ( + expected_true_path, + ) assert plugin_import.importlib.util.spec_from_file_location.call_count == 1 - assert plugin_import.importlib.util.spec_from_file_location.call_args_list[0].args == (fake_mod_name, fake_full_path) + assert plugin_import.importlib.util.spec_from_file_location.call_args_list[ + 0 + ].args == (fake_mod_name, fake_full_path) assert plugin_import.importlib.util.module_from_spec.call_count == 1 - assert plugin_import.importlib.util.module_from_spec.call_args_list[0].args == (fake_spec,) + assert plugin_import.importlib.util.module_from_spec.call_args_list[0].args == ( + fake_spec, + ) assert fake_spec.loader.exec_module.call_count == 1 - assert fake_spec.loader.exec_module.call_args_list[0].args == (fake_module, ) + assert fake_spec.loader.exec_module.call_args_list[0].args == (fake_module,) assert fake_mod_name in plugin_import.sys.modules assert plugin_import.sys.modules[fake_mod_name] == fake_module assert result == [fake_Plugin_instance] -def test_plugin_import_returns_single_item_list_when_given_module_dict_contains_one_key_value_pair_no_init_and_exists_in_sys(mocker): + +def test_plugin_import_returns_single_item_list_when_given_module_dict_contains_one_key_value_pair_no_init_and_exists_in_sys( + mocker, +): # Arrange arg_headers = MagicMock() fake_construct_name = MagicMock() fake_mod_name = MagicMock() fake_module_path = MagicMock() fake_full_path = MagicMock() - arg_module_dict = {fake_construct_name:fake_module_path} + arg_module_dict = {fake_construct_name: fake_module_path} fake_plugin = MagicMock() fake_Plugin_instance = MagicMock() - mocker.patch.object(fake_module_path, 'endswith', - return_value=False) - mocker.patch(plugin_import.__name__ + '.os.path.basename', - return_value=fake_mod_name) - mocker.patch(plugin_import.__name__ + '.os.path.join', - return_value=fake_full_path) - mocker.patch(plugin_import.__name__ + '.importlib.util.spec_from_file_location') - mocker.patch.dict(plugin_import.sys.modules, {fake_mod_name:None}) - import_mock = mocker.patch('builtins.__import__', return_value=fake_plugin) - mocker.patch.object(fake_plugin, 'Plugin', return_value=fake_Plugin_instance) + mocker.patch.object(fake_module_path, "endswith", return_value=False) + mocker.patch( + plugin_import.__name__ + ".os.path.basename", return_value=fake_mod_name + ) + mocker.patch(plugin_import.__name__ + ".os.path.join", return_value=fake_full_path) + mocker.patch(plugin_import.__name__ + ".importlib.util.spec_from_file_location") + mocker.patch.dict(plugin_import.sys.modules, {fake_mod_name: None}) + import_mock = mocker.patch("builtins.__import__", return_value=fake_plugin) + mocker.patch.object(fake_plugin, "Plugin", return_value=fake_Plugin_instance) # Act result = plugin_import.import_plugins(arg_headers, arg_module_dict) @@ -162,43 +200,50 @@ def test_plugin_import_returns_single_item_list_when_given_module_dict_contains_ # Therefore import_mock is checked first then stopped, so other items failures output correctly # When this test fails because of INTERNALERROR the problem is with import_mock assert import_mock.call_count == 1 - assert import_mock.call_args_list[0].args == (f'{fake_mod_name}.{fake_mod_name}_plugin', ) - assert import_mock.call_args_list[0].kwargs == ({'fromlist': [f"{fake_mod_name}_plugin"]}) + assert import_mock.call_args_list[0].args == ( + f"{fake_mod_name}.{fake_mod_name}_plugin", + ) + assert import_mock.call_args_list[0].kwargs == ( + {"fromlist": [f"{fake_mod_name}_plugin"]} + ) # # Without the stop of import_mock any other fails will also cause INTERNALERROR mocker.stop(import_mock) assert plugin_import.os.path.basename.call_count == 1 - assert plugin_import.os.path.basename.call_args_list[0].args == (fake_module_path, ) + assert plugin_import.os.path.basename.call_args_list[0].args == (fake_module_path,) assert plugin_import.importlib.util.spec_from_file_location.call_count == 0 assert fake_mod_name in plugin_import.sys.modules assert plugin_import.sys.modules[fake_mod_name] == None assert result == [fake_Plugin_instance] -def test_plugin_import_returns_single_item_list_when_given_module_dict_contains_one_key_value_pair_has_init_and_exists_in_sys(mocker): + +def test_plugin_import_returns_single_item_list_when_given_module_dict_contains_one_key_value_pair_has_init_and_exists_in_sys( + mocker, +): # Arrange arg_headers = MagicMock() fake_construct_name = MagicMock() fake_mod_name = MagicMock() fake_pathing = [] - for _ in range(pytest.gen.randint(1, 5)): # 1-5 arbitrary length + for _ in range(pytest.gen.randint(1, 5)): # 1-5 arbitrary length fake_pathing.append(str(MagicMock())) expected_true_path = os.path.join(*fake_pathing) fake_pathing.append("__init__.py") fake_module_path = os.path.join(*fake_pathing) fake_full_path = MagicMock() - arg_module_dict = {fake_construct_name:fake_module_path} + arg_module_dict = {fake_construct_name: fake_module_path} fake_plugin = MagicMock() fake_Plugin_instance = MagicMock() - mocker.patch(plugin_import.__name__ + '.os.path.basename', - return_value=fake_mod_name) - mocker.patch(plugin_import.__name__ + '.os.path.join', - return_value=fake_full_path) - mocker.patch(plugin_import.__name__ + '.importlib.util.spec_from_file_location') - mocker.patch.dict(plugin_import.sys.modules, {fake_mod_name:None}) - import_mock = mocker.patch('builtins.__import__', return_value=fake_plugin) - mocker.patch.object(fake_plugin, 'Plugin', return_value=fake_Plugin_instance) + mocker.patch( + plugin_import.__name__ + ".os.path.basename", return_value=fake_mod_name + ) + mocker.patch(plugin_import.__name__ + ".os.path.join", return_value=fake_full_path) + mocker.patch(plugin_import.__name__ + ".importlib.util.spec_from_file_location") + mocker.patch.dict(plugin_import.sys.modules, {fake_mod_name: None}) + import_mock = mocker.patch("builtins.__import__", return_value=fake_plugin) + mocker.patch.object(fake_plugin, "Plugin", return_value=fake_Plugin_instance) # Act result = plugin_import.import_plugins(arg_headers, arg_module_dict) @@ -208,19 +253,28 @@ def test_plugin_import_returns_single_item_list_when_given_module_dict_contains_ # Therefore import_mock is checked first then stopped, so other items failures output correctly # When this test fails because of INTERNALERROR the problem is with import_mock assert import_mock.call_count == 1 - assert import_mock.call_args_list[0].args == (f'{fake_mod_name}.{fake_mod_name}_plugin', ) - assert import_mock.call_args_list[0].kwargs == ({'fromlist': [f"{fake_mod_name}_plugin"]}) + assert import_mock.call_args_list[0].args == ( + f"{fake_mod_name}.{fake_mod_name}_plugin", + ) + assert import_mock.call_args_list[0].kwargs == ( + {"fromlist": [f"{fake_mod_name}_plugin"]} + ) # # Without the stop of import_mock any other fails will also cause INTERNALERROR mocker.stop(import_mock) assert plugin_import.os.path.basename.call_count == 1 - assert plugin_import.os.path.basename.call_args_list[0].args == (expected_true_path, ) + assert plugin_import.os.path.basename.call_args_list[0].args == ( + expected_true_path, + ) assert plugin_import.importlib.util.spec_from_file_location.call_count == 0 assert fake_mod_name in plugin_import.sys.modules assert plugin_import.sys.modules[fake_mod_name] == None assert result == [fake_Plugin_instance] -def test_plugin_import_returns_two_item_list_when_given_module_dict_contains_two_key_value_pairs_that_use_same_module(mocker): + +def test_plugin_import_returns_two_item_list_when_given_module_dict_contains_two_key_value_pairs_that_use_same_module( + mocker, +): # Arrange arg_headers = MagicMock() fake_construct_name_1 = MagicMock() @@ -228,8 +282,10 @@ def test_plugin_import_returns_two_item_list_when_given_module_dict_contains_two fake_mod_name = MagicMock() fake_module_path = MagicMock() fake_full_path = MagicMock() - arg_module_dict = {fake_construct_name_1:fake_module_path, - fake_construct_name_2:fake_module_path} + arg_module_dict = { + fake_construct_name_1: fake_module_path, + fake_construct_name_2: fake_module_path, + } fake_spec = MagicMock() fake_module = MagicMock() @@ -237,21 +293,27 @@ def test_plugin_import_returns_two_item_list_when_given_module_dict_contains_two fake_Plugin_instance_1 = MagicMock() fake_Plugin_instance_2 = MagicMock() - mocker.patch.object(fake_module_path, 'endswith', - return_value=False) - mocker.patch(plugin_import.__name__ + '.os.path.basename', - return_value=fake_mod_name) - mocker.patch(plugin_import.__name__ + '.os.path.join', - return_value=fake_full_path) - mocker.patch(plugin_import.__name__ + '.importlib.util.spec_from_file_location', return_value=fake_spec) - mocker.patch(plugin_import.__name__ + '.importlib.util.module_from_spec', return_value=fake_module) - mocker.patch.object(fake_spec, 'loader.exec_module') + mocker.patch.object(fake_module_path, "endswith", return_value=False) + mocker.patch( + plugin_import.__name__ + ".os.path.basename", return_value=fake_mod_name + ) + mocker.patch(plugin_import.__name__ + ".os.path.join", return_value=fake_full_path) + mocker.patch( + plugin_import.__name__ + ".importlib.util.spec_from_file_location", + return_value=fake_spec, + ) + mocker.patch( + plugin_import.__name__ + ".importlib.util.module_from_spec", + return_value=fake_module, + ) + mocker.patch.object(fake_spec, "loader.exec_module") mocker.patch.dict(plugin_import.sys.modules) - import_mock = mocker.patch('builtins.__import__', return_value=fake_plugin) - mocker.patch.object(fake_plugin, - 'Plugin', - side_effect=[fake_Plugin_instance_1, - fake_Plugin_instance_2]) + import_mock = mocker.patch("builtins.__import__", return_value=fake_plugin) + mocker.patch.object( + fake_plugin, + "Plugin", + side_effect=[fake_Plugin_instance_1, fake_Plugin_instance_2], + ) # Act result = plugin_import.import_plugins(arg_headers, arg_module_dict) @@ -261,21 +323,33 @@ def test_plugin_import_returns_two_item_list_when_given_module_dict_contains_two # Therefore import_mock is checked first then stopped, so other items failures output correctly # When this test fails because of INTERNALERROR the problem is with import_mock assert import_mock.call_count == 2 - assert import_mock.call_args_list[0].args == (f'{fake_mod_name}.{fake_mod_name}_plugin', ) - assert import_mock.call_args_list[0].kwargs == ({'fromlist': [f"{fake_mod_name}_plugin"]}) - assert import_mock.call_args_list[1].args == (f'{fake_mod_name}.{fake_mod_name}_plugin', ) - assert import_mock.call_args_list[1].kwargs == ({'fromlist': [f"{fake_mod_name}_plugin"]}) + assert import_mock.call_args_list[0].args == ( + f"{fake_mod_name}.{fake_mod_name}_plugin", + ) + assert import_mock.call_args_list[0].kwargs == ( + {"fromlist": [f"{fake_mod_name}_plugin"]} + ) + assert import_mock.call_args_list[1].args == ( + f"{fake_mod_name}.{fake_mod_name}_plugin", + ) + assert import_mock.call_args_list[1].kwargs == ( + {"fromlist": [f"{fake_mod_name}_plugin"]} + ) # # Without the stop of import_mock any other fails will also cause INTERNALERROR mocker.stop(import_mock) assert plugin_import.os.path.basename.call_count == 2 - assert plugin_import.os.path.basename.call_args_list[0].args == (fake_module_path, ) + assert plugin_import.os.path.basename.call_args_list[0].args == (fake_module_path,) assert plugin_import.importlib.util.spec_from_file_location.call_count == 1 - assert plugin_import.importlib.util.spec_from_file_location.call_args_list[0].args == (fake_mod_name, fake_full_path) + assert plugin_import.importlib.util.spec_from_file_location.call_args_list[ + 0 + ].args == (fake_mod_name, fake_full_path) assert plugin_import.importlib.util.module_from_spec.call_count == 1 - assert plugin_import.importlib.util.module_from_spec.call_args_list[0].args == (fake_spec,) + assert plugin_import.importlib.util.module_from_spec.call_args_list[0].args == ( + fake_spec, + ) assert fake_spec.loader.exec_module.call_count == 1 - assert fake_spec.loader.exec_module.call_args_list[0].args == (fake_module, ) + assert fake_spec.loader.exec_module.call_args_list[0].args == (fake_module,) assert fake_mod_name in plugin_import.sys.modules assert plugin_import.sys.modules[fake_mod_name] == fake_module assert result == [fake_Plugin_instance_1, fake_Plugin_instance_2] diff --git a/test/onair/src/util/test_print_io.py b/test/onair/src/util/test_print_io.py index afed28f7..8af9abd5 100644 --- a/test/onair/src/util/test_print_io.py +++ b/test/onair/src/util/test_print_io.py @@ -12,549 +12,659 @@ import onair.src.util.print_io as print_io + # bcolors tests def test_print_io_bcolors_HEADER_is_expected_value(): - assert print_io.bcolors.HEADER == '\033[95m' + assert print_io.bcolors.HEADER == "\033[95m" + def test_print_io_bcolors_OKBLUE_is_expected_value(): - assert print_io.bcolors.OKBLUE == '\033[94m' + assert print_io.bcolors.OKBLUE == "\033[94m" + def test_print_io_bcolors_OKGREEN_is_expected_value(): - assert print_io.bcolors.OKGREEN == '\033[92m' + assert print_io.bcolors.OKGREEN == "\033[92m" + def test_print_io_bcolors_WARNING_is_expected_value(): - assert print_io.bcolors.WARNING == '\033[93m' + assert print_io.bcolors.WARNING == "\033[93m" + def test_print_io_bcolors_FAIL_is_expected_value(): - assert print_io.bcolors.FAIL == '\033[91m' + assert print_io.bcolors.FAIL == "\033[91m" + def test_print_io_bcolors_ENDC_is_expected_value(): - assert print_io.bcolors.ENDC == '\033[0m' + assert print_io.bcolors.ENDC == "\033[0m" + def test_print_io_bcolors_BOLD_is_expected_value(): - assert print_io.bcolors.BOLD == '\033[1m' + assert print_io.bcolors.BOLD == "\033[1m" + def test_print_io_bcolors_UNDERLINE_is_expected_value(): - assert print_io.bcolors.UNDERLINE == '\033[4m' + assert print_io.bcolors.UNDERLINE == "\033[4m" # Globals tests def test_print_io_scolors_HEADER_is_set_to_bcolors_HEADER(): - assert print_io.scolors['HEADER'] == print_io.bcolors.HEADER + assert print_io.scolors["HEADER"] == print_io.bcolors.HEADER + def test_print_io_scolors_OKBLUE_is_set_to_bcolors_OKBLUE(): - assert print_io.scolors['OKBLUE'] == print_io.bcolors.OKBLUE + assert print_io.scolors["OKBLUE"] == print_io.bcolors.OKBLUE + def test_print_io_scolors_OKGREEN_is_set_to_bcolors_OKGREEN(): - assert print_io.scolors['OKGREEN'] == print_io.bcolors.OKGREEN + assert print_io.scolors["OKGREEN"] == print_io.bcolors.OKGREEN + def test_print_io_scolors_WARNING_is_set_to_bcolors_WARNING(): - assert print_io.scolors['WARNING'] == print_io.bcolors.WARNING + assert print_io.scolors["WARNING"] == print_io.bcolors.WARNING + def test_print_io_scolors_FAIL_is_set_to_bcolors_FAIL(): - assert print_io.scolors['FAIL'] == print_io.bcolors.FAIL + assert print_io.scolors["FAIL"] == print_io.bcolors.FAIL + def test_print_io_scolors_ENDC_is_set_to_bcolors_ENDC(): - assert print_io.scolors['ENDC'] == print_io.bcolors.ENDC + assert print_io.scolors["ENDC"] == print_io.bcolors.ENDC + def test_print_io_scolors_BOLD_is_set_to_bcolors_BOLD(): - assert print_io.scolors['BOLD'] == print_io.bcolors.BOLD + assert print_io.scolors["BOLD"] == print_io.bcolors.BOLD + def test_print_io_scolors_UNDERLINE_is_set_to_bcolors_UNDERLINE(): - assert print_io.scolors['UNDERLINE'] == print_io.bcolors.UNDERLINE + assert print_io.scolors["UNDERLINE"] == print_io.bcolors.UNDERLINE + def test_print_io_status_colors_GREEN_is_set_to_bcolors_OKGREEN(): - assert print_io.status_colors['GREEN'] == print_io.bcolors.OKGREEN + assert print_io.status_colors["GREEN"] == print_io.bcolors.OKGREEN + def test_print_io_status_colors_YELLOW_is_set_to_bcolors_WARNING(): - assert print_io.status_colors['YELLOW'] == print_io.bcolors.WARNING + assert print_io.status_colors["YELLOW"] == print_io.bcolors.WARNING + def test_print_io_status_colors_RED_is_set_to_bcolors_FAIL(): - assert print_io.status_colors['RED'] == print_io.bcolors.FAIL + assert print_io.status_colors["RED"] == print_io.bcolors.FAIL + def test_print_io_status_colors_3_dashes_is_set_to_bcolors_OKBLUE(): - assert print_io.status_colors['---'] == print_io.bcolors.OKBLUE + assert print_io.status_colors["---"] == print_io.bcolors.OKBLUE # print_sim_header tests def test_print_io_print_sim_header_prints_expected_strings(mocker): - # Arrange - expected_print = [] - expected_print.append(print_io.bcolors.HEADER + \ - print_io.bcolors.BOLD +\ - "\n***************************************************") - expected_print.append("************ SIMULATION STARTED ************") - expected_print.append("***************************************************" + \ - print_io.bcolors.ENDC) + # Arrange + expected_print = [] + expected_print.append( + print_io.bcolors.HEADER + + print_io.bcolors.BOLD + + "\n***************************************************" + ) + expected_print.append("************ SIMULATION STARTED ************") + expected_print.append( + "***************************************************" + print_io.bcolors.ENDC + ) - mocker.patch(print_io.__name__ + '.print') + mocker.patch(print_io.__name__ + ".print") - # Act - print_io.print_sim_header() + # Act + print_io.print_sim_header() - # Assert - for i in range(3): - print_io.print.call_args_list[i].args == (expected_print[i], ) + # Assert + for i in range(3): + print_io.print.call_args_list[i].args == (expected_print[i],) # print_sim_step tests def test_print_io_print_sim_step_inserts_given_step_num_into_text(mocker): - # Arrange - arg_step_num = pytest.gen.randint(1, 100) # arbitrary from 1 to 100 - expected_print = print_io.bcolors.HEADER + \ - print_io.bcolors.BOLD + \ - f"\n--------------------- STEP {arg_step_num}" + \ - " ---------------------\n" + \ - print_io.bcolors.ENDC + # Arrange + arg_step_num = pytest.gen.randint(1, 100) # arbitrary from 1 to 100 + expected_print = ( + print_io.bcolors.HEADER + + print_io.bcolors.BOLD + + f"\n--------------------- STEP {arg_step_num}" + + " ---------------------\n" + + print_io.bcolors.ENDC + ) - mocker.patch(print_io.__name__ + '.print') + mocker.patch(print_io.__name__ + ".print") - # Act - print_io.print_sim_step(arg_step_num) + # Act + print_io.print_sim_step(arg_step_num) - # Assert - assert print_io.print.call_args_list[0].args == (expected_print, ) + # Assert + assert print_io.print.call_args_list[0].args == (expected_print,) # print_separator tests def test_print_io_print_separator_uses_bcolors_HEADER_as_default_color_value(mocker): - # Arrange - expected_color = print_io.bcolors.HEADER - expected_print = expected_color + \ - print_io.bcolors.BOLD + \ - "\n------------------------------------------------\n" + \ - print_io.bcolors.ENDC + # Arrange + expected_color = print_io.bcolors.HEADER + expected_print = ( + expected_color + + print_io.bcolors.BOLD + + "\n------------------------------------------------\n" + + print_io.bcolors.ENDC + ) - mocker.patch(print_io.__name__ + '.print') + mocker.patch(print_io.__name__ + ".print") - # Act - print_io.print_separator() + # Act + print_io.print_separator() - # Assert - assert print_io.print.call_args_list[0].args == (expected_print, ) + # Assert + assert print_io.print.call_args_list[0].args == (expected_print,) -def test_print_io_print_separator_prints_whatever_is_passed_in_as_color_at_start_of_line(mocker): - # Arrange - arg_color = MagicMock() - expected_print = arg_color + \ - print_io.bcolors.BOLD + \ - "\n------------------------------------------------\n" + \ - print_io.bcolors.ENDC +def test_print_io_print_separator_prints_whatever_is_passed_in_as_color_at_start_of_line( + mocker, +): + # Arrange + arg_color = MagicMock() - mocker.patch(print_io.__name__ + '.print') + expected_print = ( + arg_color + + print_io.bcolors.BOLD + + "\n------------------------------------------------\n" + + print_io.bcolors.ENDC + ) - # Act - print_io.print_separator(arg_color) + mocker.patch(print_io.__name__ + ".print") - # Assert - assert print_io.print.call_count == 1 - assert print_io.print.call_args_list[0].args == (expected_print, ) + # Act + print_io.print_separator(arg_color) + + # Assert + assert print_io.print.call_count == 1 + assert print_io.print.call_args_list[0].args == (expected_print,) # update_header tests -def test_print_io_update_header_prints_message_with_bcolors_BOLD_at_start_when_no_clr_arg_given(mocker): - # Arrange - arg_msg = MagicMock() +def test_print_io_update_header_prints_message_with_bcolors_BOLD_at_start_when_no_clr_arg_given( + mocker, +): + # Arrange + arg_msg = MagicMock() - expected_clr = print_io.bcolors.BOLD - expected_print = expected_clr + \ - "--------- " + arg_msg + " update" + \ - print_io.bcolors.ENDC + expected_clr = print_io.bcolors.BOLD + expected_print = ( + expected_clr + "--------- " + arg_msg + " update" + print_io.bcolors.ENDC + ) - mocker.patch(print_io.__name__ + '.print') + mocker.patch(print_io.__name__ + ".print") - # Act - print_io.update_header(arg_msg) + # Act + print_io.update_header(arg_msg) - # Assert - assert print_io.print.call_count == 1 - assert print_io.print.call_args_list[0].args == (expected_print, ) + # Assert + assert print_io.print.call_count == 1 + assert print_io.print.call_args_list[0].args == (expected_print,) -def test_print_io_update_header_prints_message_starting_with_whatever_is_given_as_clr(mocker): - # Arrange - arg_msg = MagicMock() - arg_clr = MagicMock() - expected_print = arg_clr + \ - "--------- " + arg_msg + " update" + \ - print_io.bcolors.ENDC +def test_print_io_update_header_prints_message_starting_with_whatever_is_given_as_clr( + mocker, +): + # Arrange + arg_msg = MagicMock() + arg_clr = MagicMock() + + expected_print = ( + arg_clr + "--------- " + arg_msg + " update" + print_io.bcolors.ENDC + ) - mocker.patch(print_io.__name__ + '.print') + mocker.patch(print_io.__name__ + ".print") - # Act - print_io.update_header(arg_msg, arg_clr) + # Act + print_io.update_header(arg_msg, arg_clr) - # Assert - assert print_io.print.call_count == 1 - assert print_io.print.call_args_list[0].args == (expected_print, ) + # Assert + assert print_io.print.call_count == 1 + assert print_io.print.call_args_list[0].args == (expected_print,) # print_msg tests -def test_print_io_print_msg_prints_message_starting_only_with_scolor_HEADER_when_no_clrs_arg_given(mocker): +def test_print_io_print_msg_prints_message_starting_only_with_scolor_HEADER_when_no_clrs_arg_given( + mocker, +): # Arrange - arg_msg = MagicMock() + arg_msg = MagicMock() - expected_scolor = print_io.scolors['HEADER'] - expected_print = [] - expected_print.append(expected_scolor) - expected_print.append("---- " + arg_msg + print_io.bcolors.ENDC) + expected_scolor = print_io.scolors["HEADER"] + expected_print = [] + expected_print.append(expected_scolor) + expected_print.append("---- " + arg_msg + print_io.bcolors.ENDC) - mocker.patch(print_io.__name__ + '.print') + mocker.patch(print_io.__name__ + ".print") - # Act - print_io.print_msg(arg_msg) + # Act + print_io.print_msg(arg_msg) + + # Assert + assert print_io.print.call_count == 2 + for i in range(2): + assert print_io.print.call_args_list[i].args == (expected_print[i],) - # Assert - assert print_io.print.call_count == 2 - for i in range(2): - assert print_io.print.call_args_list[i].args == (expected_print[i], ) def test_print_io_print_msg_raises_KeyError_when_given_clrs_item_not_in_scolors(mocker): # Arrange - arg_msg = MagicMock() - arg_clrs = ['THIS-WILL-THROW-KEYERROR'] + arg_msg = MagicMock() + arg_clrs = ["THIS-WILL-THROW-KEYERROR"] - mocker.patch(print_io.__name__ + '.print') + mocker.patch(print_io.__name__ + ".print") - # Act - with pytest.raises(KeyError) as e_info: - print_io.print_msg(arg_msg, arg_clrs) + # Act + with pytest.raises(KeyError) as e_info: + print_io.print_msg(arg_msg, arg_clrs) + + # Assert + assert str(e_info.value) == "'THIS-WILL-THROW-KEYERROR'" + assert print_io.print.call_count == 0 - # Assert - assert str(e_info.value) == "'THIS-WILL-THROW-KEYERROR'" - assert print_io.print.call_count == 0 def test_print_io_print_msg_prints_only_given_msg_when_given_clrs_is_empty(mocker): # Arrange - arg_msg = MagicMock() - arg_clrs = [] + arg_msg = MagicMock() + arg_clrs = [] - mocker.patch(print_io.__name__ + '.print') + mocker.patch(print_io.__name__ + ".print") - # Act - print_io.print_msg(arg_msg, arg_clrs) + # Act + print_io.print_msg(arg_msg, arg_clrs) + + # Assert + assert print_io.print.call_count == 1 + assert print_io.print.call_args_list[0].args == ( + "---- " + arg_msg + print_io.bcolors.ENDC, + ) - # Assert - assert print_io.print.call_count == 1 - assert print_io.print.call_args_list[0].args == ("---- " + arg_msg + print_io.bcolors.ENDC, ) def test_print_io_print_msg_prints_all_scolors_given_in_clrs(mocker): # Arrange - arg_msg = MagicMock() - arg_clrs = list(print_io.scolors.keys()) - pytest.gen.shuffle(arg_clrs) # change up the order to show it does not matter + arg_msg = MagicMock() + arg_clrs = list(print_io.scolors.keys()) + pytest.gen.shuffle(arg_clrs) # change up the order to show it does not matter + + mocker.patch(print_io.__name__ + ".print") + + # Act + print_io.print_msg(arg_msg, arg_clrs) - mocker.patch(print_io.__name__ + '.print') + # Assert + assert print_io.print.call_count == len(print_io.scolors.keys()) + 1 + for i in range(len(arg_clrs)): + assert print_io.print.call_args_list[i].args == (print_io.scolors[arg_clrs[i]],) + assert print_io.print.call_args_list[i + 1].args == ( + "---- " + arg_msg + print_io.bcolors.ENDC, + ) - # Act - print_io.print_msg(arg_msg, arg_clrs) - # Assert - assert print_io.print.call_count == len(print_io.scolors.keys()) + 1 - for i in range(len(arg_clrs)): - assert print_io.print.call_args_list[i].args == (print_io.scolors[arg_clrs[i]], ) - assert print_io.print.call_args_list[i + 1].args == ("---- " + arg_msg + print_io.bcolors.ENDC, ) +# print_mission_status +def test_print_io_print_mission_status_only_prints_agent_formatted_status_when_data_not_given( + mocker, +): + # Arrange + arg_agent = MagicMock() + fake_mission_status = MagicMock() + fake_status = MagicMock() -#print_mission_status -def test_print_io_print_mission_status_only_prints_agent_formatted_status_when_data_not_given(mocker): - # Arrange - arg_agent = MagicMock() + expected_print = "INTERPRETED SYSTEM STATUS: " + str(fake_status) - fake_mission_status = MagicMock() - fake_status = MagicMock() + arg_agent.mission_status = fake_mission_status + mocker.patch(print_io.__name__ + ".format_status", return_value=fake_status) + mocker.patch(print_io.__name__ + ".print") - expected_print = "INTERPRETED SYSTEM STATUS: " + str(fake_status) + # Act + print_io.print_system_status(arg_agent) - arg_agent.mission_status = fake_mission_status - mocker.patch(print_io.__name__ + '.format_status', return_value=fake_status) - mocker.patch(print_io.__name__ + '.print') + # Assert + assert print_io.format_status.call_count == 1 + assert print_io.format_status.call_args_list[0].args == (fake_mission_status,) + assert print_io.print.call_count == 1 + assert print_io.print.call_args_list[0].args == (expected_print,) - # Act - print_io.print_system_status(arg_agent) - # Assert - assert print_io.format_status.call_count == 1 - assert print_io.format_status.call_args_list[0].args == (fake_mission_status,) - assert print_io.print.call_count == 1 - assert print_io.print.call_args_list[0].args == (expected_print, ) +def test_print_io_print_mission_status_only_prints_agent_formatted_status_when_data_given_is_None( + mocker, +): + # Arrange + arg_agent = MagicMock() + arg_data = None -def test_print_io_print_mission_status_only_prints_agent_formatted_status_when_data_given_is_None(mocker): - # Arrange - arg_agent = MagicMock() - arg_data = None + fake_mission_status = MagicMock() + fake_status = MagicMock() - fake_mission_status = MagicMock() - fake_status = MagicMock() + expected_print = "INTERPRETED SYSTEM STATUS: " + str(fake_status) - expected_print = "INTERPRETED SYSTEM STATUS: " + str(fake_status) + arg_agent.mission_status = fake_mission_status + mocker.patch(print_io.__name__ + ".format_status", return_value=fake_status) + mocker.patch(print_io.__name__ + ".print") - arg_agent.mission_status = fake_mission_status - mocker.patch(print_io.__name__ + '.format_status', return_value=fake_status) - mocker.patch(print_io.__name__ + '.print') + # Act + print_io.print_system_status(arg_agent, arg_data) - # Act - print_io.print_system_status(arg_agent, arg_data) + # Assert + assert print_io.format_status.call_count == 1 + assert print_io.format_status.call_args_list[0].args == (fake_mission_status,) + assert print_io.print.call_count == 1 + assert print_io.print.call_args_list[0].args == (expected_print,) - # Assert - assert print_io.format_status.call_count == 1 - assert print_io.format_status.call_args_list[0].args == (fake_mission_status,) - assert print_io.print.call_count == 1 - assert print_io.print.call_args_list[0].args == (expected_print, ) -def test_print_io_print_mission_status_only_prints_agent_formatted_status_when_data_given_is_None(mocker): - # Arrange - arg_agent = MagicMock() - arg_data = MagicMock() +def test_print_io_print_mission_status_only_prints_agent_formatted_status_when_data_given_is_None( + mocker, +): + # Arrange + arg_agent = MagicMock() + arg_data = MagicMock() - fake_mission_status = MagicMock() - fake_status = MagicMock() + fake_mission_status = MagicMock() + fake_status = MagicMock() - expected_print = [] - expected_print.append("CURRENT DATA: " + str(arg_data)) - expected_print.append("INTERPRETED SYSTEM STATUS: " + str(fake_status)) + expected_print = [] + expected_print.append("CURRENT DATA: " + str(arg_data)) + expected_print.append("INTERPRETED SYSTEM STATUS: " + str(fake_status)) - arg_agent.mission_status = fake_mission_status - mocker.patch(print_io.__name__ + '.format_status', return_value=fake_status) - mocker.patch(print_io.__name__ + '.print') + arg_agent.mission_status = fake_mission_status + mocker.patch(print_io.__name__ + ".format_status", return_value=fake_status) + mocker.patch(print_io.__name__ + ".print") - # Act - print_io.print_system_status(arg_agent, arg_data) + # Act + print_io.print_system_status(arg_agent, arg_data) - # Assert - assert print_io.format_status.call_count == 1 - assert print_io.format_status.call_args_list[0].args == (fake_mission_status,) - assert print_io.print.call_count == 2 - for i in range(print_io.print.call_count): - assert print_io.print.call_args_list[i].args == (expected_print[i], ) + # Assert + assert print_io.format_status.call_count == 1 + assert print_io.format_status.call_args_list[0].args == (fake_mission_status,) + assert print_io.print.call_count == 2 + for i in range(print_io.print.call_count): + assert print_io.print.call_args_list[i].args == (expected_print[i],) # print_diagnosis tests -def test_print_io_print_diagnosis_only_prints_separators_and_headers_when_status_list_and_activations_are_empty_tree_traversal_unused(mocker): - # Arrange - arg_diagnosis = MagicMock() - - arg_diagnosis.configure_mock(**{'get_status_list.return_value': []}) - arg_diagnosis.configure_mock(**{'current_activations.return_value': []}) - - mocker.patch(print_io.__name__ + '.print_separator') - mocker.patch(print_io.__name__ + '.print') - - # Act - print_io.print_diagnosis(arg_diagnosis) - - # Assert - assert print_io.print_separator.call_count == 2 - assert print_io.print.call_count == 2 - assert print_io.print.call_args_list[0].args == (print_io.bcolors.HEADER + print_io.bcolors.BOLD + "DIAGNOSIS INFO: \n" + print_io.bcolors.ENDC, ) - assert print_io.print.call_args_list[1].args == (print_io.bcolors.HEADER + print_io.bcolors.BOLD + "\nCURRENT ACTIVATIONS: \n" + print_io.bcolors.ENDC, ) - -def test_print_io_print_diagnosis_prints_separators_headers_status_and_activations_when_status_list_and_activations_have_items_tree_traversal_unused(mocker): - # Arrange - arg_diagnosis = MagicMock() - - num_status = pytest.gen.randint(1, 10) # arbitrary from 1 to 10 - fake_status = [] - fake_format = MagicMock() - num_activations = pytest.gen.randint(1, 10) # arbitrary from 1 to 10 - fake_activations = [] - fake_str = MagicMock() - - for i in range(num_status): - fake_status.append([MagicMock(), MagicMock()]) - - for i in range(num_activations): - fake_activations.append(MagicMock()) - - arg_diagnosis.configure_mock(**{'get_status_list.return_value': fake_status}) - arg_diagnosis.current_activations = fake_activations - - mocker.patch(print_io.__name__ + '.print_separator') - mocker.patch(print_io.__name__ + '.print') - mocker.patch(print_io.__name__ + '.format_status', return_value=fake_format) - mocker.patch(print_io.__name__ + '.str', return_value=fake_str) - - # Act - print_io.print_diagnosis(arg_diagnosis) - - # Assert - assert print_io.print_separator.call_count == 2 - assert print_io.print.call_count == 2 + num_status + num_activations - assert print_io.print.call_args_list[0].args == (print_io.bcolors.HEADER + print_io.bcolors.BOLD + "DIAGNOSIS INFO: \n" + print_io.bcolors.ENDC, ) - for i in range(num_status): - assert print_io.print.call_args_list[1 + i].args == (fake_status[i][0] + ': ' + fake_format, ) - assert print_io.format_status.call_args_list[i].args == (fake_status[i][1], ) - assert print_io.print.call_args_list[1 + num_status].args == (print_io.bcolors.HEADER + print_io.bcolors.BOLD + "\nCURRENT ACTIVATIONS: \n" + print_io.bcolors.ENDC, ) - for i in range(num_activations): - assert print_io.print.call_args_list[2 + num_status + i].args == ('---' + fake_str, ) - assert print_io.str.call_args_list[i].args == (fake_activations[i], ) +def test_print_io_print_diagnosis_only_prints_separators_and_headers_when_status_list_and_activations_are_empty_tree_traversal_unused( + mocker, +): + # Arrange + arg_diagnosis = MagicMock() + + arg_diagnosis.configure_mock(**{"get_status_list.return_value": []}) + arg_diagnosis.configure_mock(**{"current_activations.return_value": []}) + + mocker.patch(print_io.__name__ + ".print_separator") + mocker.patch(print_io.__name__ + ".print") + + # Act + print_io.print_diagnosis(arg_diagnosis) + + # Assert + assert print_io.print_separator.call_count == 2 + assert print_io.print.call_count == 2 + assert print_io.print.call_args_list[0].args == ( + print_io.bcolors.HEADER + + print_io.bcolors.BOLD + + "DIAGNOSIS INFO: \n" + + print_io.bcolors.ENDC, + ) + assert print_io.print.call_args_list[1].args == ( + print_io.bcolors.HEADER + + print_io.bcolors.BOLD + + "\nCURRENT ACTIVATIONS: \n" + + print_io.bcolors.ENDC, + ) + + +def test_print_io_print_diagnosis_prints_separators_headers_status_and_activations_when_status_list_and_activations_have_items_tree_traversal_unused( + mocker, +): + # Arrange + arg_diagnosis = MagicMock() + + num_status = pytest.gen.randint(1, 10) # arbitrary from 1 to 10 + fake_status = [] + fake_format = MagicMock() + num_activations = pytest.gen.randint(1, 10) # arbitrary from 1 to 10 + fake_activations = [] + fake_str = MagicMock() + + for i in range(num_status): + fake_status.append([MagicMock(), MagicMock()]) + + for i in range(num_activations): + fake_activations.append(MagicMock()) + + arg_diagnosis.configure_mock(**{"get_status_list.return_value": fake_status}) + arg_diagnosis.current_activations = fake_activations + + mocker.patch(print_io.__name__ + ".print_separator") + mocker.patch(print_io.__name__ + ".print") + mocker.patch(print_io.__name__ + ".format_status", return_value=fake_format) + mocker.patch(print_io.__name__ + ".str", return_value=fake_str) + + # Act + print_io.print_diagnosis(arg_diagnosis) + + # Assert + assert print_io.print_separator.call_count == 2 + assert print_io.print.call_count == 2 + num_status + num_activations + assert print_io.print.call_args_list[0].args == ( + print_io.bcolors.HEADER + + print_io.bcolors.BOLD + + "DIAGNOSIS INFO: \n" + + print_io.bcolors.ENDC, + ) + for i in range(num_status): + assert print_io.print.call_args_list[1 + i].args == ( + fake_status[i][0] + ": " + fake_format, + ) + assert print_io.format_status.call_args_list[i].args == (fake_status[i][1],) + assert print_io.print.call_args_list[1 + num_status].args == ( + print_io.bcolors.HEADER + + print_io.bcolors.BOLD + + "\nCURRENT ACTIVATIONS: \n" + + print_io.bcolors.ENDC, + ) + for i in range(num_activations): + assert print_io.print.call_args_list[2 + num_status + i].args == ( + "---" + fake_str, + ) + assert print_io.str.call_args_list[i].args == (fake_activations[i],) # subsystem_status_str tests -def test_print_io_subsystem_status_str_returns_expected_string_when_stat_exists_as_key_in_status_colors(mocker): - # Arrange - arg_ss = MagicMock() +def test_print_io_subsystem_status_str_returns_expected_string_when_stat_exists_as_key_in_status_colors( + mocker, +): + # Arrange + arg_ss = MagicMock() + + fake_type = MagicMock() + fake_stat = pytest.gen.choice(list(print_io.status_colors.keys())) + fake_uncertainty = MagicMock() + fake_str = MagicMock() + + expected_s = print_io.bcolors.BOLD + "[" + fake_str + "] : " + print_io.bcolors.ENDC + expected_s = ( + expected_s + + "\n" + + print_io.status_colors[fake_stat] + + " ---- " + + fake_str + + print_io.bcolors.ENDC + + " (" + + fake_str + + ")" + ) + expected_s = expected_s + "\n" + + arg_ss.type = fake_type + arg_ss.configure_mock(**{"get_status.return_value": fake_stat}) + arg_ss.uncertainty = fake_uncertainty + + mocker.patch(print_io.__name__ + ".str", return_value=fake_str) + + # Act + result = print_io.subsystem_status_str(arg_ss) + + # Assert + assert print_io.str.call_count == 3 + assert print_io.str.call_args_list[0].args == (fake_type,) + assert print_io.str.call_args_list[1].args == (fake_stat,) + assert print_io.str.call_args_list[2].args == (fake_uncertainty,) + assert result == expected_s - fake_type = MagicMock() - fake_stat = pytest.gen.choice(list(print_io.status_colors.keys())) - fake_uncertainty = MagicMock() - fake_str = MagicMock() - expected_s = print_io.bcolors.BOLD + '[' + fake_str + '] : ' + print_io.bcolors.ENDC - expected_s = expected_s + '\n' + print_io.status_colors[fake_stat] + ' ---- ' + fake_str + print_io.bcolors.ENDC + ' (' + fake_str + ')' - expected_s = expected_s + '\n' +# subsystem_str tests +def test_print_io_subsystem_str_returns_string_without_any_data_when_headers_tests_and_test_data_empty( + mocker, +): + # Arrange + arg_ss = MagicMock() - arg_ss.type = fake_type - arg_ss.configure_mock(**{'get_status.return_value':fake_stat}) - arg_ss.uncertainty = fake_uncertainty + arg_ss.type = str(MagicMock()) + arg_ss.headers = [] + arg_ss.tests = [] + arg_ss.test_data = [] - mocker.patch(print_io.__name__ + '.str', return_value=fake_str) + expected_result = print_io.bcolors.BOLD + arg_ss.type + "\n" + print_io.bcolors.ENDC + expected_result = expected_result + "--[headers] \n--[tests] \n--[test data] " - # Act - result = print_io.subsystem_status_str(arg_ss) + # Act + result = print_io.subsystem_str(arg_ss) - # Assert - assert print_io.str.call_count == 3 - assert print_io.str.call_args_list[0].args == (fake_type, ) - assert print_io.str.call_args_list[1].args == (fake_stat, ) - assert print_io.str.call_args_list[2].args == (fake_uncertainty, ) - assert result == expected_s + # Assert + assert result == expected_result -# subsystem_str tests -def test_print_io_subsystem_str_returns_string_without_any_data_when_headers_tests_and_test_data_empty(mocker): - # Arrange - arg_ss = MagicMock() - - arg_ss.type = str(MagicMock()) - arg_ss.headers = [] - arg_ss.tests = [] - arg_ss.test_data = [] - - expected_result = print_io.bcolors.BOLD + arg_ss.type + '\n' + print_io.bcolors.ENDC - expected_result = expected_result + '--[headers] \n--[tests] \n--[test data] ' - - # Act - result = print_io.subsystem_str(arg_ss) - - # Assert - assert result == expected_result - -def test_print_io_subsystem_str_returns_string_all_data_when_headers_tests_and_test_data_occupied(mocker): - # Arrange - arg_ss = MagicMock() - - arg_ss.type = str(MagicMock()) - num_headers = pytest.gen.randint(1, 10) # arbitrary from 1 to 10 - arg_ss.headers = [] - num_tests = pytest.gen.randint(1, 10) # arbitrary from 1 to 10 - arg_ss.tests = [] - num_test_data = pytest.gen.randint(1, 10) # arbitrary from 1 to 10 - arg_ss.test_data = [] - - expected_result = print_io.bcolors.BOLD + arg_ss.type + '\n' + print_io.bcolors.ENDC - expected_result = expected_result + '--[headers] ' - for i in range(num_headers): - arg_ss.headers.append(MagicMock()) - expected_result = expected_result + '\n---' + str(arg_ss.headers[i]) - expected_result = expected_result + '\n--[tests] ' - for i in range(num_tests): - arg_ss.tests.append(MagicMock()) - expected_result = expected_result + '\n---' + str(arg_ss.tests[i]) - expected_result = expected_result + '\n--[test data] ' - for i in range(num_test_data): - arg_ss.test_data.append(MagicMock()) - expected_result = expected_result + '\n---' + str(arg_ss.test_data[i]) - - # Act - result = print_io.subsystem_str(arg_ss) - - # Assert - assert result == expected_result +def test_print_io_subsystem_str_returns_string_all_data_when_headers_tests_and_test_data_occupied( + mocker, +): + # Arrange + arg_ss = MagicMock() + + arg_ss.type = str(MagicMock()) + num_headers = pytest.gen.randint(1, 10) # arbitrary from 1 to 10 + arg_ss.headers = [] + num_tests = pytest.gen.randint(1, 10) # arbitrary from 1 to 10 + arg_ss.tests = [] + num_test_data = pytest.gen.randint(1, 10) # arbitrary from 1 to 10 + arg_ss.test_data = [] + + expected_result = print_io.bcolors.BOLD + arg_ss.type + "\n" + print_io.bcolors.ENDC + expected_result = expected_result + "--[headers] " + for i in range(num_headers): + arg_ss.headers.append(MagicMock()) + expected_result = expected_result + "\n---" + str(arg_ss.headers[i]) + expected_result = expected_result + "\n--[tests] " + for i in range(num_tests): + arg_ss.tests.append(MagicMock()) + expected_result = expected_result + "\n---" + str(arg_ss.tests[i]) + expected_result = expected_result + "\n--[test data] " + for i in range(num_test_data): + arg_ss.test_data.append(MagicMock()) + expected_result = expected_result + "\n---" + str(arg_ss.test_data[i]) + + # Act + result = print_io.subsystem_str(arg_ss) + + # Assert + assert result == expected_result # headers_string tests def test_print_io_format_status_returns_empty_string_when_headers_is_vacant(): - # Arrange - arg_headers = [] + # Arrange + arg_headers = [] + + # Act + result = print_io.headers_string(arg_headers) - # Act - result = print_io.headers_string(arg_headers) + # Assert + assert result == str() - # Assert - assert result == str() def test_print_io_format_status_returns_all_headers_in_formatted_string_when_occupied(): - # Arrange - num_headers = pytest.gen.randint(1, 10) # arbitrary from 1 to 10 - arg_headers = [] + # Arrange + num_headers = pytest.gen.randint(1, 10) # arbitrary from 1 to 10 + arg_headers = [] - expected_result = '' + expected_result = "" - for i in range(num_headers): - arg_headers.append(str(MagicMock())) - expected_result = expected_result + '\n -- ' + arg_headers[i] + for i in range(num_headers): + arg_headers.append(str(MagicMock())) + expected_result = expected_result + "\n -- " + arg_headers[i] - # Act - result = print_io.headers_string(arg_headers) + # Act + result = print_io.headers_string(arg_headers) - # Assert - assert result == expected_result + # Assert + assert result == expected_result # format_status tests + def test_print_io_format_status_raises_KeyError_when_stat_is_string_and_not_in_status_color_keys(): - # Arrange - arg_stat = str(MagicMock()) + # Arrange + arg_stat = str(MagicMock()) - # Act - with pytest.raises(KeyError) as e_info: - result = print_io.format_status(arg_stat) + # Act + with pytest.raises(KeyError) as e_info: + result = print_io.format_status(arg_stat) + + # Assert + assert str(e_info.value) == '"' + arg_stat + '"' - # Assert - assert str(e_info.value) == '"' + arg_stat + '"' def test_print_io_format_status_returns_stat_in_its_status_color_when_stat_is_string_and_a_key(): - # Arrange - arg_stat = pytest.gen.choice(list(print_io.status_colors.keys())) + # Arrange + arg_stat = pytest.gen.choice(list(print_io.status_colors.keys())) - expected_result = print_io.status_colors[arg_stat] + arg_stat + print_io.scolors['ENDC'] + expected_result = ( + print_io.status_colors[arg_stat] + arg_stat + print_io.scolors["ENDC"] + ) - # Act - result = print_io.format_status(arg_stat) + # Act + result = print_io.format_status(arg_stat) - # Assert - assert result == expected_result + # Assert + assert result == expected_result -def test_print_io_format_status_returns_only_a_right_parenthesis_in_string_when_stat_is_an_empty_list(): - # Arrange - arg_stat = [] - expected_result = ')' +def test_print_io_format_status_returns_only_a_right_parenthesis_in_string_when_stat_is_an_empty_list(): + # Arrange + arg_stat = [] - # Act - result = print_io.format_status(arg_stat) + expected_result = ")" - # Assert - assert result == expected_result + # Act + result = print_io.format_status(arg_stat) -def test_print_io_format_status_returns_all_status_in_stat_formatted_into_string_when_stat_is_a_list_of_status(mocker): - # Arrange - num_stat = pytest.gen.randint(1, 10) # arbitrary from 1 to 10 - arg_stat = [] + # Assert + assert result == expected_result - expected_result = '(' - for i in range(num_stat): - arg_stat.append(pytest.gen.choice(list(print_io.status_colors.keys()))) - expected_result += print_io.status_colors[arg_stat[i]] + arg_stat[i] + print_io.scolors['ENDC'] - if i != (num_stat - 1): - expected_result += ', ' - expected_result += ')' - # Act - result = print_io.format_status(arg_stat) +def test_print_io_format_status_returns_all_status_in_stat_formatted_into_string_when_stat_is_a_list_of_status( + mocker, +): + # Arrange + num_stat = pytest.gen.randint(1, 10) # arbitrary from 1 to 10 + arg_stat = [] + + expected_result = "(" + for i in range(num_stat): + arg_stat.append(pytest.gen.choice(list(print_io.status_colors.keys()))) + expected_result += ( + print_io.status_colors[arg_stat[i]] + arg_stat[i] + print_io.scolors["ENDC"] + ) + if i != (num_stat - 1): + expected_result += ", " + expected_result += ")" + + # Act + result = print_io.format_status(arg_stat) - # Assert - assert result == expected_result \ No newline at end of file + # Assert + assert result == expected_result diff --git a/test/onair/src/util/test_sim_io.py b/test/onair/src/util/test_sim_io.py index 3aba2e95..6b1ebbe5 100644 --- a/test/onair/src/util/test_sim_io.py +++ b/test/onair/src/util/test_sim_io.py @@ -11,267 +11,376 @@ import onair.src.util.sim_io as sim_io -def test_sim_io_render_reasoning_writes_txt_and_csv_files_even_when_list_is_empty(mocker): - # Arrange - SAVE_PATH = 'ONAIR_DIAGNOSIS_SAVE_PATH' - diag1 = MagicMock() - arg_diagnosis_list = [] - fake_system_filename = MagicMock() - fake_full_path = MagicMock() - fake_file_iterator = MagicMock() - fake_file = MagicMock() - fake_file.configure_mock(**{'__enter__.return_value': fake_file_iterator}) - - mocker.patch(sim_io.__name__ + '.os.environ.get', return_value=fake_system_filename) - mocker.patch(sim_io.__name__ + '.os.path.join', return_value=fake_full_path) - mocker.patch('builtins.open', return_value=fake_file) - - # Act - sim_io.render_reasoning(arg_diagnosis_list) - - # Assert - assert open.call_count == 2 - assert fake_file_iterator.write.call_count == 4 - assert sim_io.os.environ.get.call_args_list[0].args == (SAVE_PATH,) - assert sim_io.os.path.join.call_args_list[0].args == (fake_system_filename, 'diagnosis.txt') - assert open.call_args_list[0].args == (fake_full_path,) - assert open.call_args_list[0].kwargs == {'mode':'a'} - assert fake_file_iterator.write.call_args_list[0].args == ('==========================================================\n',) - assert fake_file_iterator.write.call_args_list[1].args == (' DIAGNOSIS \n',) - assert fake_file_iterator.write.call_args_list[2].args == ('==========================================================\n',) - assert sim_io.os.environ.get.call_args_list[1].args == (SAVE_PATH,) - assert sim_io.os.path.join.call_args_list[1].args == (fake_system_filename, 'diagnosis.csv') - assert open.call_args_list[1].args == (fake_full_path,) - assert open.call_args_list[1].kwargs == {'mode':'a'} - assert fake_file_iterator.write.call_args_list[3].args == ('time_step, cohens_kappa, faults, subgraph\n',) - -def test_sim_io_render_reasoning_writes_txt_and_csv_files_with_entry_for_each_given_diagnosis_in_list(mocker): - # Arrange - SAVE_PATH = 'ONAIR_DIAGNOSIS_SAVE_PATH' - diag1 = MagicMock() - arg_diagnosis_list = [] - fake_system_filename = MagicMock() - fake_full_path = MagicMock() - fake_file_iterator = MagicMock() - fake_file = MagicMock() - fake_file.configure_mock(**{'__enter__.return_value': fake_file_iterator}) - fake_timestep = "my fake time step" - fake_str = MagicMock() - fake_results_csv = MagicMock - - mocker.patch(sim_io.__name__ + '.os.environ.get', return_value=fake_system_filename) - mocker.patch(sim_io.__name__ + '.os.path.join', return_value=fake_full_path) - mocker.patch('builtins.open', return_value=fake_file) - - for i in range(5): - fake_diag = MagicMock() - fake_diag.configure_mock(**{'get_time_step.return_value':fake_timestep, - '__str__.return_value':fake_str, - 'results_csv.return_value':fake_results_csv}) - arg_diagnosis_list.append(fake_diag) - - # Act - sim_io.render_reasoning(arg_diagnosis_list) - - # Assert - assert open.call_count == 2 - assert fake_file_iterator.write.call_count == 4 + 5*5 - assert sim_io.os.environ.get.call_args_list[0].args == (SAVE_PATH,) - assert sim_io.os.path.join.call_args_list[0].args == (fake_system_filename, 'diagnosis.txt') - assert open.call_args_list[0].args == (fake_full_path,) - assert open.call_args_list[0].kwargs == {'mode':'a'} - assert fake_file_iterator.write.call_args_list[0].args == ('==========================================================\n',) - assert fake_file_iterator.write.call_args_list[1].args == (' DIAGNOSIS \n',) - assert fake_file_iterator.write.call_args_list[2].args == ('==========================================================\n',) - - for i in range(5): - assert fake_file_iterator.write.call_args_list[i*4 + 3].args == ('\n----------------------------------------------------------\n',) - assert fake_file_iterator.write.call_args_list[i*4 + 4].args == ('*** DIAGNOSIS AT FRAME ' + fake_timestep + ' ***\n',) - assert fake_file_iterator.write.call_args_list[i*4 + 5].args == (fake_str,) - assert fake_file_iterator.write.call_args_list[i*4 + 6].args == ('----------------------------------------------------------\n',) - - assert sim_io.os.environ.get.call_args_list[1].args == (SAVE_PATH,) - assert sim_io.os.path.join.call_args_list[1].args == (fake_system_filename, 'diagnosis.csv') - assert open.call_args_list[1].args == (fake_full_path,) - assert open.call_args_list[1].kwargs == {'mode':'a'} - assert fake_file_iterator.write.call_args_list[i*4 + 7].args == ('time_step, cohens_kappa, faults, subgraph\n',) - - for j in range(5): - assert fake_file_iterator.write.call_args_list[j + i*4 + 8].args == (fake_results_csv,) - -def test_sim_io_render_viz_does_only_stattest_render_viz_does_status_sensor_and_diagnosis_reports_when_diagnosis_is_givenus_and_sensor_reports_when_diagnosis_is_not_given(mocker): - # Arrange - SAVE_PATH = 'ONAIR_VIZ_SAVE_PATH' - arg_status_data = MagicMock() - arg_sensor_data = MagicMock() - arg_sim_name = MagicMock() - - fake_system_filename = MagicMock() - fake_full_path = MagicMock() - fake_iterator = MagicMock() - fake_file = MagicMock() - fake_file.configure_mock(**{'__enter__.return_value': fake_iterator}) - - expected_status_report = {} - expected_status_report['filename'] = arg_sim_name - expected_status_report['data'] = arg_status_data - expected_sensor_status_report = {} - expected_sensor_status_report['name'] = 'MISSION' - expected_sensor_status_report['children'] = arg_sensor_data - - mocker.patch(sim_io.__name__ + '.os.environ.get', return_value=fake_system_filename) - mocker.patch(sim_io.__name__ + '.os.path.join', return_value=fake_full_path) - mocker.patch('builtins.open', return_value=fake_file) - mocker.patch(sim_io.__name__ + '.json.dump') - - # Act - sim_io.render_viz(arg_status_data, arg_sensor_data, arg_sim_name) - - # Assert - assert open.call_count == 2 - assert sim_io.os.environ.get.call_args_list[0].args == (SAVE_PATH,) - assert sim_io.os.path.join.call_args_list[0].args == (fake_system_filename, 'system.json') - assert open.call_args_list[0].args == (fake_full_path, 'w') - assert sim_io.json.dump.call_args_list[0].args == (expected_status_report, fake_iterator) - assert sim_io.os.environ.get.call_args_list[1].args == (SAVE_PATH,) - assert sim_io.os.path.join.call_args_list[1].args == (fake_system_filename, 'faults.json') - assert open.call_args_list[1].args == (fake_full_path, 'w') - assert sim_io.json.dump.call_args_list[1].args == (expected_sensor_status_report, fake_iterator) - -def test_sim_io_render_viz_does_only_status_and_sensor_reports_when_diagnosis_is_given_as_None(mocker): - # Arrange - SAVE_PATH = 'ONAIR_VIZ_SAVE_PATH' - arg_status_data = MagicMock() - arg_sensor_data = MagicMock() - arg_sim_name = MagicMock() - arg_diagnosis = None - - fake_system_filename = MagicMock() - fake_full_path = MagicMock() - fake_iterator = MagicMock() - fake_file = MagicMock() - fake_file.configure_mock(**{'__enter__.return_value': fake_iterator}) - - expected_status_report = {} - expected_status_report['filename'] = arg_sim_name - expected_status_report['data'] = arg_status_data - expected_sensor_status_report = {} - expected_sensor_status_report['name'] = 'MISSION' - expected_sensor_status_report['children'] = arg_sensor_data - - mocker.patch(sim_io.__name__ + '.os.environ.get', return_value=fake_system_filename) - mocker.patch(sim_io.__name__ + '.os.path.join', return_value=fake_full_path) - mocker.patch('builtins.open', return_value=fake_file) - mocker.patch(sim_io.__name__ + '.json.dump') - - # Act - sim_io.render_viz(arg_status_data, arg_sensor_data, arg_sim_name, arg_diagnosis) - - # Assert - assert open.call_count == 2 - assert sim_io.os.environ.get.call_args_list[0].args == (SAVE_PATH,) - assert sim_io.os.path.join.call_args_list[0].args == (fake_system_filename, 'system.json') - assert open.call_args_list[0].args == (fake_full_path, 'w') - assert sim_io.json.dump.call_args_list[0].args == (expected_status_report, fake_iterator) - assert sim_io.os.environ.get.call_args_list[1].args == (SAVE_PATH,) - assert sim_io.os.path.join.call_args_list[1].args == (fake_system_filename, 'faults.json') - assert open.call_args_list[1].args == (fake_full_path, 'w') - assert sim_io.json.dump.call_args_list[1].args == (expected_sensor_status_report, fake_iterator) - -def test_sim_io_render_viz_does_status_sensor_and_diagnosis_reports_when_diagnosis_is_given(mocker): - # Arrange - SAVE_PATH = 'ONAIR_VIZ_SAVE_PATH' - arg_status_data = MagicMock() - arg_sensor_data = MagicMock() - arg_sim_name = MagicMock() - arg_diagnosis = MagicMock() - - fake_system_filename = MagicMock() - fake_full_path = MagicMock() - fake_iterator = MagicMock() - fake_file = MagicMock() - fake_file.configure_mock(**{'__enter__.return_value': fake_iterator}) - fake_results = MagicMock() - - expected_status_report = {} - expected_status_report['filename'] = arg_sim_name - expected_status_report['data'] = arg_status_data - expected_sensor_status_report = {} - expected_sensor_status_report['name'] = 'MISSION' - expected_sensor_status_report['children'] = arg_sensor_data - - mocker.patch(sim_io.__name__ + '.os.environ.get', return_value=fake_system_filename) - mocker.patch(sim_io.__name__ + '.os.path.join', return_value=fake_full_path) - mocker.patch('builtins.open', return_value=fake_file) - mocker.patch(sim_io.__name__ + '.json.dump') - arg_diagnosis.configure_mock(**{'get_diagnosis_viz_json.return_value': fake_results}) - - # Act - sim_io.render_viz(arg_status_data, arg_sensor_data, arg_sim_name, arg_diagnosis) - - # Assert - assert open.call_count == 3 - assert sim_io.os.environ.get.call_args_list[0].args == (SAVE_PATH,) - assert sim_io.os.path.join.call_args_list[0].args == (fake_system_filename, 'system.json') - assert open.call_args_list[0].args == (fake_full_path, 'w') - assert sim_io.json.dump.call_args_list[0].args == (expected_status_report, fake_iterator) - assert sim_io.os.environ.get.call_args_list[1].args == (SAVE_PATH,) - assert sim_io.os.path.join.call_args_list[1].args == (fake_system_filename, 'faults.json') - assert open.call_args_list[1].args == (fake_full_path, 'w') - assert sim_io.json.dump.call_args_list[1].args == (expected_sensor_status_report, fake_iterator) - arg_diagnosis.get_diagnosis_viz_json.assert_called_once() - assert sim_io.os.environ.get.call_args_list[2].args == (SAVE_PATH,) - assert sim_io.os.path.join.call_args_list[2].args == (fake_system_filename, 'results.json') - assert open.call_args_list[2].args == (fake_full_path, 'w') - assert sim_io.json.dump.call_args_list[2].args == (fake_results, fake_iterator) - -def test_sim_io_print_dots_uses_mod_10_plus_one_dots_when_ts_mod_20_is_less_than_10(mocker): - # Arrange - arg_ts = 20 # really want 0-9 + 20 * (arbitrary random 0 to some number) - expected_num_dots = (arg_ts % 10) + 1 - dots_string = "" - - for i in range(expected_num_dots): - dots_string = dots_string + '.' - - mocker.patch("builtins.print") - - # Act - sim_io.print_dots(arg_ts) - - # Assert - print.assert_called_with('\033[95m' + dots_string + '\033[0m') - -def test_sim_io_print_dots_uses_10_minus_mod_10_plus_one_dots_when_ts_mod_20_is_10(mocker): - # Arrange - arg_ts = 10 # 10 is a static value by design but should still add 20 * 0 to some number - expected_num_dots = 10 - (arg_ts % 10) + 1 - dots_string = "" - - for i in range(expected_num_dots): - dots_string = dots_string + '.' - - mocker.patch("builtins.print") - - # Act - sim_io.print_dots(arg_ts) - - # Assert - print.assert_called_with('\033[95m' + dots_string + '\033[0m') - -def test_sim_io_print_dots_uses_10_minus_mod_10_plus_one_dots_when_ts_mod_20_is_greater_than_10(mocker): - # Arrange - arg_ts = 19 # really should be 11 to 19 + 20 * 0 to some random number - expected_num_dots = 10 - (arg_ts % 10) + 1 - dots_string = "" - - for i in range(expected_num_dots): - dots_string = dots_string + '.' - - mocker.patch("builtins.print") - - # Act - sim_io.print_dots(arg_ts) - - # Assert - print.assert_called_with('\033[95m' + dots_string + '\033[0m') \ No newline at end of file + +def test_sim_io_render_reasoning_writes_txt_and_csv_files_even_when_list_is_empty( + mocker, +): + # Arrange + SAVE_PATH = "ONAIR_DIAGNOSIS_SAVE_PATH" + diag1 = MagicMock() + arg_diagnosis_list = [] + fake_system_filename = MagicMock() + fake_full_path = MagicMock() + fake_file_iterator = MagicMock() + fake_file = MagicMock() + fake_file.configure_mock(**{"__enter__.return_value": fake_file_iterator}) + + mocker.patch(sim_io.__name__ + ".os.environ.get", return_value=fake_system_filename) + mocker.patch(sim_io.__name__ + ".os.path.join", return_value=fake_full_path) + mocker.patch("builtins.open", return_value=fake_file) + + # Act + sim_io.render_reasoning(arg_diagnosis_list) + + # Assert + assert open.call_count == 2 + assert fake_file_iterator.write.call_count == 4 + assert sim_io.os.environ.get.call_args_list[0].args == (SAVE_PATH,) + assert sim_io.os.path.join.call_args_list[0].args == ( + fake_system_filename, + "diagnosis.txt", + ) + assert open.call_args_list[0].args == (fake_full_path,) + assert open.call_args_list[0].kwargs == {"mode": "a"} + assert fake_file_iterator.write.call_args_list[0].args == ( + "==========================================================\n", + ) + assert fake_file_iterator.write.call_args_list[1].args == ( + " DIAGNOSIS \n", + ) + assert fake_file_iterator.write.call_args_list[2].args == ( + "==========================================================\n", + ) + assert sim_io.os.environ.get.call_args_list[1].args == (SAVE_PATH,) + assert sim_io.os.path.join.call_args_list[1].args == ( + fake_system_filename, + "diagnosis.csv", + ) + assert open.call_args_list[1].args == (fake_full_path,) + assert open.call_args_list[1].kwargs == {"mode": "a"} + assert fake_file_iterator.write.call_args_list[3].args == ( + "time_step, cohens_kappa, faults, subgraph\n", + ) + + +def test_sim_io_render_reasoning_writes_txt_and_csv_files_with_entry_for_each_given_diagnosis_in_list( + mocker, +): + # Arrange + SAVE_PATH = "ONAIR_DIAGNOSIS_SAVE_PATH" + diag1 = MagicMock() + arg_diagnosis_list = [] + fake_system_filename = MagicMock() + fake_full_path = MagicMock() + fake_file_iterator = MagicMock() + fake_file = MagicMock() + fake_file.configure_mock(**{"__enter__.return_value": fake_file_iterator}) + fake_timestep = "my fake time step" + fake_str = MagicMock() + fake_results_csv = MagicMock + + mocker.patch(sim_io.__name__ + ".os.environ.get", return_value=fake_system_filename) + mocker.patch(sim_io.__name__ + ".os.path.join", return_value=fake_full_path) + mocker.patch("builtins.open", return_value=fake_file) + + for i in range(5): + fake_diag = MagicMock() + fake_diag.configure_mock( + **{ + "get_time_step.return_value": fake_timestep, + "__str__.return_value": fake_str, + "results_csv.return_value": fake_results_csv, + } + ) + arg_diagnosis_list.append(fake_diag) + + # Act + sim_io.render_reasoning(arg_diagnosis_list) + + # Assert + assert open.call_count == 2 + assert fake_file_iterator.write.call_count == 4 + 5 * 5 + assert sim_io.os.environ.get.call_args_list[0].args == (SAVE_PATH,) + assert sim_io.os.path.join.call_args_list[0].args == ( + fake_system_filename, + "diagnosis.txt", + ) + assert open.call_args_list[0].args == (fake_full_path,) + assert open.call_args_list[0].kwargs == {"mode": "a"} + assert fake_file_iterator.write.call_args_list[0].args == ( + "==========================================================\n", + ) + assert fake_file_iterator.write.call_args_list[1].args == ( + " DIAGNOSIS \n", + ) + assert fake_file_iterator.write.call_args_list[2].args == ( + "==========================================================\n", + ) + + for i in range(5): + assert fake_file_iterator.write.call_args_list[i * 4 + 3].args == ( + "\n----------------------------------------------------------\n", + ) + assert fake_file_iterator.write.call_args_list[i * 4 + 4].args == ( + "*** DIAGNOSIS AT FRAME " + + fake_timestep + + " ***\n", + ) + assert fake_file_iterator.write.call_args_list[i * 4 + 5].args == (fake_str,) + assert fake_file_iterator.write.call_args_list[i * 4 + 6].args == ( + "----------------------------------------------------------\n", + ) + + assert sim_io.os.environ.get.call_args_list[1].args == (SAVE_PATH,) + assert sim_io.os.path.join.call_args_list[1].args == ( + fake_system_filename, + "diagnosis.csv", + ) + assert open.call_args_list[1].args == (fake_full_path,) + assert open.call_args_list[1].kwargs == {"mode": "a"} + assert fake_file_iterator.write.call_args_list[i * 4 + 7].args == ( + "time_step, cohens_kappa, faults, subgraph\n", + ) + + for j in range(5): + assert fake_file_iterator.write.call_args_list[j + i * 4 + 8].args == ( + fake_results_csv, + ) + + +def test_sim_io_render_viz_does_only_stattest_render_viz_does_status_sensor_and_diagnosis_reports_when_diagnosis_is_givenus_and_sensor_reports_when_diagnosis_is_not_given( + mocker, +): + # Arrange + SAVE_PATH = "ONAIR_VIZ_SAVE_PATH" + arg_status_data = MagicMock() + arg_sensor_data = MagicMock() + arg_sim_name = MagicMock() + + fake_system_filename = MagicMock() + fake_full_path = MagicMock() + fake_iterator = MagicMock() + fake_file = MagicMock() + fake_file.configure_mock(**{"__enter__.return_value": fake_iterator}) + + expected_status_report = {} + expected_status_report["filename"] = arg_sim_name + expected_status_report["data"] = arg_status_data + expected_sensor_status_report = {} + expected_sensor_status_report["name"] = "MISSION" + expected_sensor_status_report["children"] = arg_sensor_data + + mocker.patch(sim_io.__name__ + ".os.environ.get", return_value=fake_system_filename) + mocker.patch(sim_io.__name__ + ".os.path.join", return_value=fake_full_path) + mocker.patch("builtins.open", return_value=fake_file) + mocker.patch(sim_io.__name__ + ".json.dump") + + # Act + sim_io.render_viz(arg_status_data, arg_sensor_data, arg_sim_name) + + # Assert + assert open.call_count == 2 + assert sim_io.os.environ.get.call_args_list[0].args == (SAVE_PATH,) + assert sim_io.os.path.join.call_args_list[0].args == ( + fake_system_filename, + "system.json", + ) + assert open.call_args_list[0].args == (fake_full_path, "w") + assert sim_io.json.dump.call_args_list[0].args == ( + expected_status_report, + fake_iterator, + ) + assert sim_io.os.environ.get.call_args_list[1].args == (SAVE_PATH,) + assert sim_io.os.path.join.call_args_list[1].args == ( + fake_system_filename, + "faults.json", + ) + assert open.call_args_list[1].args == (fake_full_path, "w") + assert sim_io.json.dump.call_args_list[1].args == ( + expected_sensor_status_report, + fake_iterator, + ) + + +def test_sim_io_render_viz_does_only_status_and_sensor_reports_when_diagnosis_is_given_as_None( + mocker, +): + # Arrange + SAVE_PATH = "ONAIR_VIZ_SAVE_PATH" + arg_status_data = MagicMock() + arg_sensor_data = MagicMock() + arg_sim_name = MagicMock() + arg_diagnosis = None + + fake_system_filename = MagicMock() + fake_full_path = MagicMock() + fake_iterator = MagicMock() + fake_file = MagicMock() + fake_file.configure_mock(**{"__enter__.return_value": fake_iterator}) + + expected_status_report = {} + expected_status_report["filename"] = arg_sim_name + expected_status_report["data"] = arg_status_data + expected_sensor_status_report = {} + expected_sensor_status_report["name"] = "MISSION" + expected_sensor_status_report["children"] = arg_sensor_data + + mocker.patch(sim_io.__name__ + ".os.environ.get", return_value=fake_system_filename) + mocker.patch(sim_io.__name__ + ".os.path.join", return_value=fake_full_path) + mocker.patch("builtins.open", return_value=fake_file) + mocker.patch(sim_io.__name__ + ".json.dump") + + # Act + sim_io.render_viz(arg_status_data, arg_sensor_data, arg_sim_name, arg_diagnosis) + + # Assert + assert open.call_count == 2 + assert sim_io.os.environ.get.call_args_list[0].args == (SAVE_PATH,) + assert sim_io.os.path.join.call_args_list[0].args == ( + fake_system_filename, + "system.json", + ) + assert open.call_args_list[0].args == (fake_full_path, "w") + assert sim_io.json.dump.call_args_list[0].args == ( + expected_status_report, + fake_iterator, + ) + assert sim_io.os.environ.get.call_args_list[1].args == (SAVE_PATH,) + assert sim_io.os.path.join.call_args_list[1].args == ( + fake_system_filename, + "faults.json", + ) + assert open.call_args_list[1].args == (fake_full_path, "w") + assert sim_io.json.dump.call_args_list[1].args == ( + expected_sensor_status_report, + fake_iterator, + ) + + +def test_sim_io_render_viz_does_status_sensor_and_diagnosis_reports_when_diagnosis_is_given( + mocker, +): + # Arrange + SAVE_PATH = "ONAIR_VIZ_SAVE_PATH" + arg_status_data = MagicMock() + arg_sensor_data = MagicMock() + arg_sim_name = MagicMock() + arg_diagnosis = MagicMock() + + fake_system_filename = MagicMock() + fake_full_path = MagicMock() + fake_iterator = MagicMock() + fake_file = MagicMock() + fake_file.configure_mock(**{"__enter__.return_value": fake_iterator}) + fake_results = MagicMock() + + expected_status_report = {} + expected_status_report["filename"] = arg_sim_name + expected_status_report["data"] = arg_status_data + expected_sensor_status_report = {} + expected_sensor_status_report["name"] = "MISSION" + expected_sensor_status_report["children"] = arg_sensor_data + + mocker.patch(sim_io.__name__ + ".os.environ.get", return_value=fake_system_filename) + mocker.patch(sim_io.__name__ + ".os.path.join", return_value=fake_full_path) + mocker.patch("builtins.open", return_value=fake_file) + mocker.patch(sim_io.__name__ + ".json.dump") + arg_diagnosis.configure_mock( + **{"get_diagnosis_viz_json.return_value": fake_results} + ) + + # Act + sim_io.render_viz(arg_status_data, arg_sensor_data, arg_sim_name, arg_diagnosis) + + # Assert + assert open.call_count == 3 + assert sim_io.os.environ.get.call_args_list[0].args == (SAVE_PATH,) + assert sim_io.os.path.join.call_args_list[0].args == ( + fake_system_filename, + "system.json", + ) + assert open.call_args_list[0].args == (fake_full_path, "w") + assert sim_io.json.dump.call_args_list[0].args == ( + expected_status_report, + fake_iterator, + ) + assert sim_io.os.environ.get.call_args_list[1].args == (SAVE_PATH,) + assert sim_io.os.path.join.call_args_list[1].args == ( + fake_system_filename, + "faults.json", + ) + assert open.call_args_list[1].args == (fake_full_path, "w") + assert sim_io.json.dump.call_args_list[1].args == ( + expected_sensor_status_report, + fake_iterator, + ) + arg_diagnosis.get_diagnosis_viz_json.assert_called_once() + assert sim_io.os.environ.get.call_args_list[2].args == (SAVE_PATH,) + assert sim_io.os.path.join.call_args_list[2].args == ( + fake_system_filename, + "results.json", + ) + assert open.call_args_list[2].args == (fake_full_path, "w") + assert sim_io.json.dump.call_args_list[2].args == (fake_results, fake_iterator) + + +def test_sim_io_print_dots_uses_mod_10_plus_one_dots_when_ts_mod_20_is_less_than_10( + mocker, +): + # Arrange + arg_ts = 20 # really want 0-9 + 20 * (arbitrary random 0 to some number) + expected_num_dots = (arg_ts % 10) + 1 + dots_string = "" + + for i in range(expected_num_dots): + dots_string = dots_string + "." + + mocker.patch("builtins.print") + + # Act + sim_io.print_dots(arg_ts) + + # Assert + print.assert_called_with("\033[95m" + dots_string + "\033[0m") + + +def test_sim_io_print_dots_uses_10_minus_mod_10_plus_one_dots_when_ts_mod_20_is_10( + mocker, +): + # Arrange + arg_ts = ( + 10 # 10 is a static value by design but should still add 20 * 0 to some number + ) + expected_num_dots = 10 - (arg_ts % 10) + 1 + dots_string = "" + + for i in range(expected_num_dots): + dots_string = dots_string + "." + + mocker.patch("builtins.print") + + # Act + sim_io.print_dots(arg_ts) + + # Assert + print.assert_called_with("\033[95m" + dots_string + "\033[0m") + + +def test_sim_io_print_dots_uses_10_minus_mod_10_plus_one_dots_when_ts_mod_20_is_greater_than_10( + mocker, +): + # Arrange + arg_ts = 19 # really should be 11 to 19 + 20 * 0 to some random number + expected_num_dots = 10 - (arg_ts % 10) + 1 + dots_string = "" + + for i in range(expected_num_dots): + dots_string = dots_string + "." + + mocker.patch("builtins.print") + + # Act + sim_io.print_dots(arg_ts) + + # Assert + print.assert_called_with("\033[95m" + dots_string + "\033[0m") diff --git a/test/plugins/generic/test_generic_plugin.py b/test/plugins/generic/test_generic_plugin.py index 2a4f7bff..a8894656 100644 --- a/test/plugins/generic/test_generic_plugin.py +++ b/test/plugins/generic/test_generic_plugin.py @@ -13,6 +13,7 @@ from plugins.generic.generic_plugin import Plugin + def test_update_does_nothing(): # Arrange cut = Plugin.__new__(Plugin) @@ -23,6 +24,7 @@ def test_update_does_nothing(): # Assert assert result == None + def test_render_reasoning_does_nothing(): # Arrange cut = Plugin.__new__(Plugin) diff --git a/test/plugins/kalman/test_kalman_plugin.py b/test/plugins/kalman/test_kalman_plugin.py index ec7248d6..e604ecc3 100644 --- a/test/plugins/kalman/test_kalman_plugin.py +++ b/test/plugins/kalman/test_kalman_plugin.py @@ -15,15 +15,21 @@ from plugins.kalman import kalman_plugin from plugins.kalman.kalman_plugin import Plugin as Kalman + # test init -def test_Kalman__init__initializes_variables_to_expected_values_when_given_all_args_except_window_size(mocker): +def test_Kalman__init__initializes_variables_to_expected_values_when_given_all_args_except_window_size( + mocker, +): # Arrange arg_name = MagicMock() arg_headers = [MagicMock(), MagicMock()] fake_var = MagicMock() - class Fake_KalmanFilter(): - def __init__(self, state_transition, process_noise, observation_model, observation_noise): + + class Fake_KalmanFilter: + def __init__( + self, state_transition, process_noise, observation_model, observation_noise + ): self.test_var = fake_var self.state_transition = state_transition self.process_noise = process_noise @@ -32,9 +38,13 @@ def __init__(self, state_transition, process_noise, observation_model, observati forced_diag_return_value = MagicMock() forced_array_return_value = MagicMock() - mocker.patch(kalman_plugin.__name__ + '.simdkalman.KalmanFilter', Fake_KalmanFilter) - mocker.patch(kalman_plugin.__name__ + '.np.diag', return_value=forced_diag_return_value) - mocker.patch(kalman_plugin.__name__ + '.np.array', return_value=forced_array_return_value) + mocker.patch(kalman_plugin.__name__ + ".simdkalman.KalmanFilter", Fake_KalmanFilter) + mocker.patch( + kalman_plugin.__name__ + ".np.diag", return_value=forced_diag_return_value + ) + mocker.patch( + kalman_plugin.__name__ + ".np.array", return_value=forced_array_return_value + ) cut = Kalman.__new__(Kalman) @@ -48,20 +58,26 @@ def __init__(self, state_transition, process_noise, observation_model, observati assert cut.window_size == 3 assert isinstance(cut.kf, Fake_KalmanFilter) assert cut.kf.test_var == fake_var - assert cut.kf.state_transition == [[1,1],[0,1]] + assert cut.kf.state_transition == [[1, 1], [0, 1]] assert cut.kf.process_noise == forced_diag_return_value assert cut.kf.observation_model == forced_array_return_value assert cut.kf.observation_noise == 1.0 -def test_Kalman__init__initializes_variables_to_expected_values_when_given_all_args(mocker): + +def test_Kalman__init__initializes_variables_to_expected_values_when_given_all_args( + mocker, +): # Arrange arg_name = MagicMock() arg_headers = [MagicMock(), MagicMock()] arg_window_size = MagicMock() fake_var = MagicMock() - class Fake_KalmanFilter(): - def __init__(self, state_transition, process_noise, observation_model, observation_noise): + + class Fake_KalmanFilter: + def __init__( + self, state_transition, process_noise, observation_model, observation_noise + ): self.test_var = fake_var self.state_transition = state_transition self.process_noise = process_noise @@ -70,9 +86,13 @@ def __init__(self, state_transition, process_noise, observation_model, observati forced_diag_return_value = MagicMock() forced_array_return_value = MagicMock() - mocker.patch(kalman_plugin.__name__ + '.simdkalman.KalmanFilter', Fake_KalmanFilter) - mocker.patch(kalman_plugin.__name__ + '.np.diag', return_value=forced_diag_return_value) - mocker.patch(kalman_plugin.__name__ + '.np.array', return_value=forced_array_return_value) + mocker.patch(kalman_plugin.__name__ + ".simdkalman.KalmanFilter", Fake_KalmanFilter) + mocker.patch( + kalman_plugin.__name__ + ".np.diag", return_value=forced_diag_return_value + ) + mocker.patch( + kalman_plugin.__name__ + ".np.array", return_value=forced_array_return_value + ) cut = Kalman.__new__(Kalman) @@ -86,11 +106,12 @@ def __init__(self, state_transition, process_noise, observation_model, observati assert cut.window_size == arg_window_size assert isinstance(cut.kf, Fake_KalmanFilter) assert cut.kf.test_var == fake_var - assert cut.kf.state_transition == [[1,1],[0,1]] + assert cut.kf.state_transition == [[1, 1], [0, 1]] assert cut.kf.process_noise == forced_diag_return_value assert cut.kf.observation_model == forced_array_return_value assert cut.kf.observation_noise == 1.0 + # test update def test_Kalman_update_does_not_mutate_frames_attribute_when_arg_frame_is_empty(): # Arrange @@ -106,10 +127,11 @@ def test_Kalman_update_does_not_mutate_frames_attribute_when_arg_frame_is_empty( # Assert assert cut.frames == fake_frames + def test_Kalman_update_mutates_frames_attribute_as_expected_when_frames_is_empty_and_arg_frame_is_not_empty(): # Arrange fake_frames = [] - len_arg_frame = pytest.gen.randint(1, 10) # arbitrary, random integer from 1 to 10 + len_arg_frame = pytest.gen.randint(1, 10) # arbitrary, random integer from 1 to 10 arg_frame = [MagicMock()] * len_arg_frame cut = Kalman.__new__(Kalman) @@ -125,13 +147,16 @@ def test_Kalman_update_mutates_frames_attribute_as_expected_when_frames_is_empty # Assert assert cut.frames == expected_result + def test_Kalman_update_mutates_frames_attribute_as_expected_when_both_frames_and_arg_frame_are_not_empty_and_len_arg_frame_greater_than_len_frames(): # Arrange - len_fake_frames = pytest.gen.randint(1, 5) # arbitrary, random int from 1 to 5 + len_fake_frames = pytest.gen.randint(1, 5) # arbitrary, random int from 1 to 5 fake_frames = [[MagicMock()]] * len_fake_frames - fake_window_size = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + fake_window_size = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 - len_arg_frame = pytest.gen.randint(6, 10) # arbitrary int greater than max len of fake_frames, from 6 to 10 + len_arg_frame = pytest.gen.randint( + 6, 10 + ) # arbitrary int greater than max len of fake_frames, from 6 to 10 arg_frame = [MagicMock()] * len_arg_frame cut = Kalman.__new__(Kalman) @@ -155,13 +180,16 @@ def test_Kalman_update_mutates_frames_attribute_as_expected_when_both_frames_and # Assert assert cut.frames == expected_result + def test_Kalman_update_mutates_frames_attribute_as_expected_when_both_frames_and_arg_frame_are_not_empty_and_len_arg_frame_less_than_len_frames(): # Arrange - len_fake_frames = pytest.gen.randint(6, 10) # arbitrary int greater than max len of arg_frame, from 6 to 10 + len_fake_frames = pytest.gen.randint( + 6, 10 + ) # arbitrary int greater than max len of arg_frame, from 6 to 10 fake_frames = [[MagicMock()]] * len_fake_frames - fake_window_size = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + fake_window_size = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 - len_arg_frame = pytest.gen.randint(1, 5) # arbitrary, random int from 1 to 5 + len_arg_frame = pytest.gen.randint(1, 5) # arbitrary, random int from 1 to 5 arg_frame = [MagicMock()] * len_arg_frame cut = Kalman.__new__(Kalman) @@ -183,8 +211,10 @@ def test_Kalman_update_mutates_frames_attribute_as_expected_when_both_frames_and def test_Kalman_update_pops_first_index_of_frames_data_points_when_window_size_is_exceeded(): # Arrange - len_fake_frames = pytest.gen.randint(6, 10) # arbitrary int greater than max len of arg_frame, from 6 to 10 - # choosing to keep len of fake_frames greater than arg_frame in order to guarantee 'else' statement is reached + len_fake_frames = pytest.gen.randint( + 6, 10 + ) # arbitrary int greater than max len of arg_frame, from 6 to 10 + # choosing to keep len of fake_frames greater than arg_frame in order to guarantee 'else' statement is reached # fake_frames = [[MagicMock()]] * len_fake_frames expected_result = [] fake_frames = [] @@ -192,9 +222,9 @@ def test_Kalman_update_pops_first_index_of_frames_data_points_when_window_size_i fake_frame = [MagicMock()] fake_frames.append([fake_frame]) expected_result.append([fake_frame]) - fake_window_size = 1 # arbitrary, chosen to guarantee 'popping' + fake_window_size = 1 # arbitrary, chosen to guarantee 'popping' - len_arg_frame = pytest.gen.randint(1, 5) # arbitrary, random int from 1 to 5 + len_arg_frame = pytest.gen.randint(1, 5) # arbitrary, random int from 1 to 5 arg_frame = [] for i in range(len_arg_frame): arg_frame.append(MagicMock()) @@ -213,10 +243,13 @@ def test_Kalman_update_pops_first_index_of_frames_data_points_when_window_size_i # Assert assert cut.frames == expected_result + def test_Kalman_update_will_not_pop_first_index_of_frames_data_points_when_window_size_is_never_exceeded(): # Arrange - len_fake_frames = pytest.gen.randint(6, 10) # arbitrary int greater than max len of arg_frame, from 6 to 10 - # choosing to keep len of fake_frames greater than arg_frame in order to guarantee 'else' statement is reached + len_fake_frames = pytest.gen.randint( + 6, 10 + ) # arbitrary int greater than max len of arg_frame, from 6 to 10 + # choosing to keep len of fake_frames greater than arg_frame in order to guarantee 'else' statement is reached # fake_frames = [[MagicMock()]] * len_fake_frames expected_result = [] fake_frames = [] @@ -224,9 +257,9 @@ def test_Kalman_update_will_not_pop_first_index_of_frames_data_points_when_windo fake_frame = [MagicMock()] fake_frames.append([fake_frame]) expected_result.append([fake_frame]) - fake_window_size = 99 # arbitrary, chosen to guarantee no 'popping' will occur + fake_window_size = 99 # arbitrary, chosen to guarantee no 'popping' will occur - len_arg_frame = pytest.gen.randint(1, 5) # arbitrary, random int from 1 to 5 + len_arg_frame = pytest.gen.randint(1, 5) # arbitrary, random int from 1 to 5 arg_frame = [] for i in range(len_arg_frame): arg_frame.append(MagicMock()) @@ -244,8 +277,11 @@ def test_Kalman_update_will_not_pop_first_index_of_frames_data_points_when_windo # Assert assert cut.frames == expected_result + # test render diagnosis -def test_Kalman_render_reasoning_returns_value_returned_by_frame_diagnosis_function(mocker): +def test_Kalman_render_reasoning_returns_value_returned_by_frame_diagnosis_function( + mocker, +): # Arrange fake_frames = MagicMock() fake_headers = MagicMock() @@ -255,7 +291,9 @@ def test_Kalman_render_reasoning_returns_value_returned_by_frame_diagnosis_funct cut.frames = fake_frames cut.headers = fake_headers - mocker.patch.object(cut, 'frame_diagnosis', return_value=forced_frame_diagnose_return) + mocker.patch.object( + cut, "frame_diagnosis", return_value=forced_frame_diagnose_return + ) # Act result = cut.render_reasoning() @@ -263,15 +301,20 @@ def test_Kalman_render_reasoning_returns_value_returned_by_frame_diagnosis_funct # Assert assert result == forced_frame_diagnose_return + # test mean def test_Kalman_mean_calculates_return_value_by_dividing_sum_by_len(mocker): # Arrange arg_values = MagicMock() - forced_sum_return_value = pytest.gen.uniform(1.0, 10.0) # arbitrary, random float from 1.0 to 10.0 - forced_len_return_value = pytest.gen.uniform(1.0, 10.0) # arbitrary, random float from 1.0 to 10.0 - mocker.patch(kalman_plugin.__name__ + '.sum', return_value=forced_sum_return_value) - mocker.patch(kalman_plugin.__name__ + '.len', return_value=forced_len_return_value) + forced_sum_return_value = pytest.gen.uniform( + 1.0, 10.0 + ) # arbitrary, random float from 1.0 to 10.0 + forced_len_return_value = pytest.gen.uniform( + 1.0, 10.0 + ) # arbitrary, random float from 1.0 to 10.0 + mocker.patch(kalman_plugin.__name__ + ".sum", return_value=forced_sum_return_value) + mocker.patch(kalman_plugin.__name__ + ".len", return_value=forced_len_return_value) cut = Kalman.__new__(Kalman) @@ -280,19 +323,26 @@ def test_Kalman_mean_calculates_return_value_by_dividing_sum_by_len(mocker): # Assert assert kalman_plugin.sum.call_count == 1 - assert kalman_plugin.sum.call_args_list[0].args == (arg_values, ) + assert kalman_plugin.sum.call_args_list[0].args == (arg_values,) assert kalman_plugin.len.call_count == 1 - assert kalman_plugin.len.call_args_list[0].args == (arg_values, ) - assert result == forced_sum_return_value/forced_len_return_value + assert kalman_plugin.len.call_args_list[0].args == (arg_values,) + assert result == forced_sum_return_value / forced_len_return_value + # test residual -def test_Kalman_residual_calculates_return_value_by_finding_the_abs_difference_of_given_args(mocker): +def test_Kalman_residual_calculates_return_value_by_finding_the_abs_difference_of_given_args( + mocker, +): # Arrange - arg_predicted = pytest.gen.uniform(-10.0, 10.0) # arbitrary, random float from -10.0 to 10.0 - arg_actual = pytest.gen.uniform(-10.0, 10.0) # arbitrary, random float from -10.0 to 10.0 + arg_predicted = pytest.gen.uniform( + -10.0, 10.0 + ) # arbitrary, random float from -10.0 to 10.0 + arg_actual = pytest.gen.uniform( + -10.0, 10.0 + ) # arbitrary, random float from -10.0 to 10.0 forced_abs_return_value = MagicMock() - mocker.patch(kalman_plugin.__name__ + '.abs', return_value=forced_abs_return_value) + mocker.patch(kalman_plugin.__name__ + ".abs", return_value=forced_abs_return_value) cut = Kalman.__new__(Kalman) @@ -302,15 +352,20 @@ def test_Kalman_residual_calculates_return_value_by_finding_the_abs_difference_o # Assert assert result == forced_abs_return_value assert kalman_plugin.abs.call_count == 1 - assert kalman_plugin.abs.call_args_list[0].args == (arg_actual - arg_predicted, ) + assert kalman_plugin.abs.call_args_list[0].args == (arg_actual - arg_predicted,) + # test std_dev -def test_Kalman_std_dev_calculates_return_value_by_using_np_std_function_on_arg_data(mocker): +def test_Kalman_std_dev_calculates_return_value_by_using_np_std_function_on_arg_data( + mocker, +): # Arrange arg_data = MagicMock() forced_std_return_value = MagicMock() - mocker.patch(kalman_plugin.__name__ + '.np.std', return_value=forced_std_return_value) + mocker.patch( + kalman_plugin.__name__ + ".np.std", return_value=forced_std_return_value + ) cut = Kalman.__new__(Kalman) @@ -320,10 +375,13 @@ def test_Kalman_std_dev_calculates_return_value_by_using_np_std_function_on_arg_ # Assert assert result == forced_std_return_value assert kalman_plugin.np.std.call_count == 1 - assert kalman_plugin.np.std.call_args_list[0].args == (arg_data, ) + assert kalman_plugin.np.std.call_args_list[0].args == (arg_data,) + # test predict -def test_Kalman_predict_smoothes_data_and_predicts_result_using_KalmanFilter_functions_as_expected_when_data_is_empty_and_initial_val_equals_None(mocker): +def test_Kalman_predict_smoothes_data_and_predicts_result_using_KalmanFilter_functions_as_expected_when_data_is_empty_and_initial_val_equals_None( + mocker, +): # Arrange arg_data = [] arg_forward_steps = MagicMock() @@ -332,8 +390,8 @@ def test_Kalman_predict_smoothes_data_and_predicts_result_using_KalmanFilter_fun fake_kf = MagicMock() forced_predict_return_value = MagicMock() - mocker.patch.object(fake_kf, 'smooth') - mocker.patch.object(fake_kf, 'predict', return_value=forced_predict_return_value) + mocker.patch.object(fake_kf, "smooth") + mocker.patch.object(fake_kf, "predict", return_value=forced_predict_return_value) cut = Kalman.__new__(Kalman) cut.kf = fake_kf @@ -344,21 +402,26 @@ def test_Kalman_predict_smoothes_data_and_predicts_result_using_KalmanFilter_fun # Assert assert result == forced_predict_return_value assert fake_kf.smooth.call_count == 1 - assert fake_kf.smooth.call_args_list[0].args == (arg_data, ) + assert fake_kf.smooth.call_args_list[0].args == (arg_data,) assert fake_kf.predict.call_count == 1 assert fake_kf.predict.call_args_list[0].args == (arg_data, arg_forward_steps) -def test_Kalman_predict_smoothes_data_and_predicts_result_using_KalmanFilter_functions_as_expected_when_data_is_empty_and_initial_val_is_not_None(mocker): + +def test_Kalman_predict_smoothes_data_and_predicts_result_using_KalmanFilter_functions_as_expected_when_data_is_empty_and_initial_val_is_not_None( + mocker, +): # Arrange arg_data = [] arg_forward_steps = MagicMock() - arg_initial_val = pytest.gen.uniform(-10.0, 10.0) # arbitrary, random float from -10.0 to 10.0 + arg_initial_val = pytest.gen.uniform( + -10.0, 10.0 + ) # arbitrary, random float from -10.0 to 10.0 fake_kf = MagicMock() forced_predict_return_value = MagicMock() - mocker.patch.object(fake_kf, 'smooth') - mocker.patch.object(fake_kf, 'predict', return_value=forced_predict_return_value) + mocker.patch.object(fake_kf, "smooth") + mocker.patch.object(fake_kf, "predict", return_value=forced_predict_return_value) cut = Kalman.__new__(Kalman) cut.kf = fake_kf @@ -369,17 +432,24 @@ def test_Kalman_predict_smoothes_data_and_predicts_result_using_KalmanFilter_fun # Assert assert result == forced_predict_return_value assert fake_kf.smooth.call_count == 1 - assert fake_kf.smooth.call_args_list[0].args == (arg_data, ) - assert fake_kf.smooth.call_args_list[0].kwargs == {'initial_value' : [arg_initial_val,0]} + assert fake_kf.smooth.call_args_list[0].args == (arg_data,) + assert fake_kf.smooth.call_args_list[0].kwargs == { + "initial_value": [arg_initial_val, 0] + } assert fake_kf.predict.call_count == 1 assert fake_kf.predict.call_args_list[0].args == (arg_data, arg_forward_steps) -def test_Kalman_predict_smoothes_data_and_predicts_result_using_KalmanFilter_functions_as_expected_when_initial_val_equals_None(mocker): + +def test_Kalman_predict_smoothes_data_and_predicts_result_using_KalmanFilter_functions_as_expected_when_initial_val_equals_None( + mocker, +): # Arrange - len_arg_data = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + len_arg_data = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 arg_data = [] for i in range(len_arg_data): - rand_float = pytest.gen.uniform(-10.0, 10.0) # arbitrary, random float from -10.0 to 10.0 + rand_float = pytest.gen.uniform( + -10.0, 10.0 + ) # arbitrary, random float from -10.0 to 10.0 arg_data.append(rand_float) arg_forward_steps = MagicMock() arg_initial_val = None @@ -387,8 +457,8 @@ def test_Kalman_predict_smoothes_data_and_predicts_result_using_KalmanFilter_fun fake_kf = MagicMock() forced_predict_return_value = MagicMock() - mocker.patch.object(fake_kf, 'smooth') - mocker.patch.object(fake_kf, 'predict', return_value=forced_predict_return_value) + mocker.patch.object(fake_kf, "smooth") + mocker.patch.object(fake_kf, "predict", return_value=forced_predict_return_value) cut = Kalman.__new__(Kalman) cut.kf = fake_kf @@ -399,24 +469,29 @@ def test_Kalman_predict_smoothes_data_and_predicts_result_using_KalmanFilter_fun # Assert assert result == forced_predict_return_value assert fake_kf.smooth.call_count == 1 - assert fake_kf.smooth.call_args_list[0].args == (arg_data, ) + assert fake_kf.smooth.call_args_list[0].args == (arg_data,) assert fake_kf.predict.call_count == 1 assert fake_kf.predict.call_args_list[0].args == (arg_data, arg_forward_steps) -def test_Kalman_predict_when_not_given_initial_val_arg_sets_initial_val_arg_equal_to_None(mocker): + +def test_Kalman_predict_when_not_given_initial_val_arg_sets_initial_val_arg_equal_to_None( + mocker, +): # Arrange - len_arg_data = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + len_arg_data = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 arg_data = [] for i in range(len_arg_data): - rand_float = pytest.gen.uniform(-10.0, 10.0) # arbitrary, random float from -10.0 to 10.0 + rand_float = pytest.gen.uniform( + -10.0, 10.0 + ) # arbitrary, random float from -10.0 to 10.0 arg_data.append(rand_float) arg_forward_steps = MagicMock() fake_kf = MagicMock() forced_predict_return_value = MagicMock() - mocker.patch.object(fake_kf, 'smooth') - mocker.patch.object(fake_kf, 'predict', return_value=forced_predict_return_value) + mocker.patch.object(fake_kf, "smooth") + mocker.patch.object(fake_kf, "predict", return_value=forced_predict_return_value) cut = Kalman.__new__(Kalman) cut.kf = fake_kf @@ -427,25 +502,32 @@ def test_Kalman_predict_when_not_given_initial_val_arg_sets_initial_val_arg_equa # Assert assert result == forced_predict_return_value assert fake_kf.smooth.call_count == 1 - assert fake_kf.smooth.call_args_list[0].args == (arg_data, ) + assert fake_kf.smooth.call_args_list[0].args == (arg_data,) assert fake_kf.predict.call_count == 1 assert fake_kf.predict.call_args_list[0].args == (arg_data, arg_forward_steps) -def test_Kalman_predict_smoothes_data_and_predicts_result_using_KalmanFilter_functions_as_expected_when_initial_val_is_not_None(mocker): + +def test_Kalman_predict_smoothes_data_and_predicts_result_using_KalmanFilter_functions_as_expected_when_initial_val_is_not_None( + mocker, +): # Arrange - len_arg_data = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + len_arg_data = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 arg_data = [] for i in range(len_arg_data): - rand_float = pytest.gen.uniform(-10.0, 10.0) # arbitrary, random float from -10.0 to 10.0 + rand_float = pytest.gen.uniform( + -10.0, 10.0 + ) # arbitrary, random float from -10.0 to 10.0 arg_data.append(rand_float) arg_forward_steps = MagicMock() - arg_initial_val = pytest.gen.uniform(-10.0, 10.0) # arbitrary, random float from -10.0 to 10.0 + arg_initial_val = pytest.gen.uniform( + -10.0, 10.0 + ) # arbitrary, random float from -10.0 to 10.0 fake_kf = MagicMock() forced_predict_return_value = MagicMock() - mocker.patch.object(fake_kf, 'smooth') - mocker.patch.object(fake_kf, 'predict', return_value=forced_predict_return_value) + mocker.patch.object(fake_kf, "smooth") + mocker.patch.object(fake_kf, "predict", return_value=forced_predict_return_value) cut = Kalman.__new__(Kalman) cut.kf = fake_kf @@ -456,29 +538,38 @@ def test_Kalman_predict_smoothes_data_and_predicts_result_using_KalmanFilter_fun # Assert assert result == forced_predict_return_value assert fake_kf.smooth.call_count == 1 - assert fake_kf.smooth.call_args_list[0].args == (arg_data, ) - assert fake_kf.smooth.call_args_list[0].kwargs == {'initial_value' : [arg_initial_val,0]} + assert fake_kf.smooth.call_args_list[0].args == (arg_data,) + assert fake_kf.smooth.call_args_list[0].kwargs == { + "initial_value": [arg_initial_val, 0] + } assert fake_kf.predict.call_count == 1 assert fake_kf.predict.call_args_list[0].args == (arg_data, arg_forward_steps) -def test_Kalman_predict_floatifies_args_and_smoothes_data_and_predicts_result_using_KalmanFilter_functions_as_expected_when_args_are_not_float_values(mocker): + +def test_Kalman_predict_floatifies_args_and_smoothes_data_and_predicts_result_using_KalmanFilter_functions_as_expected_when_args_are_not_float_values( + mocker, +): # Arrange - len_arg_data = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + len_arg_data = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 arg_data = [] arg_data_float = [] for i in range(len_arg_data): - rand_float = pytest.gen.uniform(-10.0, 10.0) # arbitrary, random float from -10.0 to 10.0 + rand_float = pytest.gen.uniform( + -10.0, 10.0 + ) # arbitrary, random float from -10.0 to 10.0 arg_data_float.append(rand_float) arg_data.append(str(rand_float)) arg_forward_steps = MagicMock() - arg_initial_val_float = pytest.gen.uniform(-10.0, 10.0) # arbitrary, random float from -10.0 to 10.0 + arg_initial_val_float = pytest.gen.uniform( + -10.0, 10.0 + ) # arbitrary, random float from -10.0 to 10.0 arg_initial_val = str(arg_initial_val_float) fake_kf = MagicMock() forced_predict_return_value = MagicMock() - mocker.patch.object(fake_kf, 'smooth') - mocker.patch.object(fake_kf, 'predict', return_value=forced_predict_return_value) + mocker.patch.object(fake_kf, "smooth") + mocker.patch.object(fake_kf, "predict", return_value=forced_predict_return_value) cut = Kalman.__new__(Kalman) cut.kf = fake_kf @@ -492,33 +583,39 @@ def test_Kalman_predict_floatifies_args_and_smoothes_data_and_predicts_result_us assert arg_initial_val != arg_initial_val_float assert fake_kf.smooth.call_count == 1 assert fake_kf.smooth.call_args_list[0].args == (arg_data_float,) - assert fake_kf.smooth.call_args_list[0].kwargs == {'initial_value' : [arg_initial_val_float,0]} + assert fake_kf.smooth.call_args_list[0].kwargs == { + "initial_value": [arg_initial_val_float, 0] + } assert fake_kf.predict.call_count == 1 assert fake_kf.predict.call_args_list[0].args == (arg_data_float, arg_forward_steps) + # test predictions_for_given_data def test_Kalman_predictions_for_given_data_raises_error_when_data_arg_is_empty(mocker): # Arrange arg_data = [] cut = Kalman.__new__(Kalman) - mocker.patch.object(cut, 'predict') + mocker.patch.object(cut, "predict") # Act with pytest.raises(IndexError) as e_info: cut.predictions_for_given_data(arg_data) # Assert - assert e_info.match('list index out of range') + assert e_info.match("list index out of range") assert cut.predict.call_count == 0 -def test_Kalman_predictions_for_given_data_returns_expected_result_when_data_arg_has_only_one_element(mocker): + +def test_Kalman_predictions_for_given_data_returns_expected_result_when_data_arg_has_only_one_element( + mocker, +): # Arrange arg_data = [MagicMock()] cut = Kalman.__new__(Kalman) - mocker.patch.object(cut, 'predict') + mocker.patch.object(cut, "predict") expected_result = [0] @@ -529,18 +626,23 @@ def test_Kalman_predictions_for_given_data_returns_expected_result_when_data_arg assert result == expected_result assert cut.predict.call_count == 0 -def test_Kalman_predictions_for_given_data_returns_expected_result_when_data_arg_has_more_than_one_element(mocker): + +def test_Kalman_predictions_for_given_data_returns_expected_result_when_data_arg_has_more_than_one_element( + mocker, +): # Arrange - len_data = pytest.gen.randint(2, 10) # arbitrary, random int from 1 to 10 + len_data = pytest.gen.randint(2, 10) # arbitrary, random int from 1 to 10 arg_data = [MagicMock()] * len_data cut = Kalman.__new__(Kalman) forced_predict_return_value = MagicMock() forced_pred_mean = MagicMock() - mocker.patch.object(forced_predict_return_value, 'observations', forced_predict_return_value) - mocker.patch.object(forced_predict_return_value, 'mean', forced_pred_mean) - mocker.patch.object(cut, 'predict', return_value=forced_predict_return_value) + mocker.patch.object( + forced_predict_return_value, "observations", forced_predict_return_value + ) + mocker.patch.object(forced_predict_return_value, "mean", forced_pred_mean) + mocker.patch.object(cut, "predict", return_value=forced_predict_return_value) expected_result = [] for i in range(len_data - 1): @@ -553,31 +655,37 @@ def test_Kalman_predictions_for_given_data_returns_expected_result_when_data_arg assert result == expected_result assert cut.predict.call_count == len_data - 1 for i in range(len_data - 1): - cut.predict.call_args_list[i].args == (arg_data[0:i+1], 1, arg_data[0]) + cut.predict.call_args_list[i].args == (arg_data[0 : i + 1], 1, arg_data[0]) + # test generate_residuals_for_given_data -def test_Kalman_generate_residuals_for_given_data_raises_error_when_data_arg_is_empty(mocker): +def test_Kalman_generate_residuals_for_given_data_raises_error_when_data_arg_is_empty( + mocker, +): # Arrange arg_data = [] cut = Kalman.__new__(Kalman) - mocker.patch.object(cut, 'predict') + mocker.patch.object(cut, "predict") # Act with pytest.raises(IndexError) as e_info: cut.generate_residuals_for_given_data(arg_data) # Assert - assert e_info.match('list index out of range') + assert e_info.match("list index out of range") assert cut.predict.call_count == 0 -def test_Kalman_generate_residuals_for_given_data_returns_expected_result_when_data_arg_has_only_one_element(mocker): + +def test_Kalman_generate_residuals_for_given_data_returns_expected_result_when_data_arg_has_only_one_element( + mocker, +): # Arrange arg_data = [MagicMock()] cut = Kalman.__new__(Kalman) - mocker.patch.object(cut, 'predict') + mocker.patch.object(cut, "predict") expected_result = [0] @@ -588,9 +696,12 @@ def test_Kalman_generate_residuals_for_given_data_returns_expected_result_when_d assert result == expected_result assert cut.predict.call_count == 0 -def test_Kalman_generate_residuals_for_given_data_returns_expected_result_when_data_arg_has_more_than_one_element(mocker): + +def test_Kalman_generate_residuals_for_given_data_returns_expected_result_when_data_arg_has_more_than_one_element( + mocker, +): # Arrange - len_data = pytest.gen.randint(2, 10) # arbitrary, random int from 1 to 10 + len_data = pytest.gen.randint(2, 10) # arbitrary, random int from 1 to 10 arg_data = [MagicMock()] * len_data cut = Kalman.__new__(Kalman) @@ -599,12 +710,16 @@ def test_Kalman_generate_residuals_for_given_data_returns_expected_result_when_d forced_pred_mean = MagicMock() forced_residual_side_effect = [] for i in range(len_data - 1): - rand_float = pytest.gen.uniform(-10.0, 10.0) # arbitrary, random float from -10.0 to 10.0 + rand_float = pytest.gen.uniform( + -10.0, 10.0 + ) # arbitrary, random float from -10.0 to 10.0 forced_residual_side_effect.append(rand_float) - mocker.patch.object(forced_predict_return_value, 'observations', forced_predict_return_value) - mocker.patch.object(forced_predict_return_value, 'mean', forced_pred_mean) - mocker.patch.object(cut, 'residual', side_effect=forced_residual_side_effect) - mocker.patch.object(cut, 'predict', return_value=forced_predict_return_value) + mocker.patch.object( + forced_predict_return_value, "observations", forced_predict_return_value + ) + mocker.patch.object(forced_predict_return_value, "mean", forced_pred_mean) + mocker.patch.object(cut, "residual", side_effect=forced_residual_side_effect) + mocker.patch.object(cut, "predict", return_value=forced_predict_return_value) expected_result = [] for i in range(len_data - 1): @@ -617,24 +732,33 @@ def test_Kalman_generate_residuals_for_given_data_returns_expected_result_when_d assert result == expected_result assert cut.predict.call_count == len_data - 1 for i in range(len_data - 1): - cut.predict.call_args_list[i].args == (arg_data[0:i+1], 1, arg_data[0]) + cut.predict.call_args_list[i].args == (arg_data[0 : i + 1], 1, arg_data[0]) assert cut.residual.call_count == len_data - 1 for i in range(len_data - 1): cut.residual.call_args_list[i].args == (forced_pred_mean, arg_data[i + 1]) + # test current_attribute_chunk_get_error -def test_Kalman_current_attribute_chunk_get_error_returns_true_when_abs_of_mean_residuals_equal_to_or_greater_than_one_point_five(mocker): +def test_Kalman_current_attribute_chunk_get_error_returns_true_when_abs_of_mean_residuals_equal_to_or_greater_than_one_point_five( + mocker, +): # Arrange arg_data = MagicMock() cut = Kalman.__new__(Kalman) forced_generate_residuals_return_value = MagicMock() forced_mean_return_value = MagicMock() - forced_abs_return_value = pytest.gen.uniform(1.5, 10.0) # random float, greater than cutoff value 1.5 - - mocker.patch.object(cut, 'generate_residuals_for_given_data', return_value=forced_generate_residuals_return_value) - mocker.patch.object(cut, 'mean', return_value=forced_mean_return_value) - mocker.patch(kalman_plugin.__name__ + '.abs', return_value=forced_abs_return_value) + forced_abs_return_value = pytest.gen.uniform( + 1.5, 10.0 + ) # random float, greater than cutoff value 1.5 + + mocker.patch.object( + cut, + "generate_residuals_for_given_data", + return_value=forced_generate_residuals_return_value, + ) + mocker.patch.object(cut, "mean", return_value=forced_mean_return_value) + mocker.patch(kalman_plugin.__name__ + ".abs", return_value=forced_abs_return_value) # Act result = cut.current_attribute_chunk_get_error(arg_data) @@ -642,25 +766,34 @@ def test_Kalman_current_attribute_chunk_get_error_returns_true_when_abs_of_mean_ # Assert assert result == True assert cut.generate_residuals_for_given_data.call_count == 1 - assert cut.generate_residuals_for_given_data.call_args_list[0].args == (arg_data, ) + assert cut.generate_residuals_for_given_data.call_args_list[0].args == (arg_data,) assert cut.mean.call_count == 1 - assert cut.mean.call_args_list[0].args == (forced_generate_residuals_return_value, ) + assert cut.mean.call_args_list[0].args == (forced_generate_residuals_return_value,) assert kalman_plugin.abs.call_count == 2 - assert kalman_plugin.abs.call_args_list[0].args == (forced_mean_return_value, ) - assert kalman_plugin.abs.call_args_list[1].args == (forced_abs_return_value, ) + assert kalman_plugin.abs.call_args_list[0].args == (forced_mean_return_value,) + assert kalman_plugin.abs.call_args_list[1].args == (forced_abs_return_value,) + -def test_Kalman_current_attribute_chunk_get_error_returns_false_when_abs_of_mean_residuals_less_than_one_point_five(mocker): +def test_Kalman_current_attribute_chunk_get_error_returns_false_when_abs_of_mean_residuals_less_than_one_point_five( + mocker, +): # Arrange arg_data = MagicMock() cut = Kalman.__new__(Kalman) forced_generate_residuals_return_value = MagicMock() forced_mean_return_value = MagicMock() - forced_abs_return_value = pytest.gen.uniform(0.0, 1.49) # random float, less than cutoff value 1.5 - - mocker.patch.object(cut, 'generate_residuals_for_given_data', return_value=forced_generate_residuals_return_value) - mocker.patch.object(cut, 'mean', return_value=forced_mean_return_value) - mocker.patch(kalman_plugin.__name__ + '.abs', return_value=forced_abs_return_value) + forced_abs_return_value = pytest.gen.uniform( + 0.0, 1.49 + ) # random float, less than cutoff value 1.5 + + mocker.patch.object( + cut, + "generate_residuals_for_given_data", + return_value=forced_generate_residuals_return_value, + ) + mocker.patch.object(cut, "mean", return_value=forced_mean_return_value) + mocker.patch(kalman_plugin.__name__ + ".abs", return_value=forced_abs_return_value) # Act result = cut.current_attribute_chunk_get_error(arg_data) @@ -668,12 +801,13 @@ def test_Kalman_current_attribute_chunk_get_error_returns_false_when_abs_of_mean # Assert assert result == False assert cut.generate_residuals_for_given_data.call_count == 1 - assert cut.generate_residuals_for_given_data.call_args_list[0].args == (arg_data, ) + assert cut.generate_residuals_for_given_data.call_args_list[0].args == (arg_data,) assert cut.mean.call_count == 1 - assert cut.mean.call_args_list[0].args == (forced_generate_residuals_return_value, ) + assert cut.mean.call_args_list[0].args == (forced_generate_residuals_return_value,) assert kalman_plugin.abs.call_count == 2 - assert kalman_plugin.abs.call_args_list[0].args == (forced_mean_return_value, ) - assert kalman_plugin.abs.call_args_list[1].args == (forced_abs_return_value, ) + assert kalman_plugin.abs.call_args_list[0].args == (forced_mean_return_value,) + assert kalman_plugin.abs.call_args_list[1].args == (forced_abs_return_value,) + # test frame_diagnosis def test_Kalman_frame_diagnosis_returns_empty_list_when_args_frame_and_headers_are_empty(): @@ -689,9 +823,12 @@ def test_Kalman_frame_diagnosis_returns_empty_list_when_args_frame_and_headers_a # Assert assert result == [] -def test_Kalman_frame_diagnosis_returns_empty_list_when_current_attribute_chunk_get_error_always_returns_false_and_args_not_empty(mocker): + +def test_Kalman_frame_diagnosis_returns_empty_list_when_current_attribute_chunk_get_error_always_returns_false_and_args_not_empty( + mocker, +): # Arrange - len_args = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + len_args = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 arg_frame = [] for i in range(len_args): arg_frame.append(MagicMock()) @@ -699,7 +836,11 @@ def test_Kalman_frame_diagnosis_returns_empty_list_when_current_attribute_chunk_ cut = Kalman.__new__(Kalman) forced_get_error_return_value = False - mocker.patch.object(cut, 'current_attribute_chunk_get_error', return_value=forced_get_error_return_value) + mocker.patch.object( + cut, + "current_attribute_chunk_get_error", + return_value=forced_get_error_return_value, + ) # Act result = cut.frame_diagnosis(arg_frame, arg_headers) @@ -708,17 +849,26 @@ def test_Kalman_frame_diagnosis_returns_empty_list_when_current_attribute_chunk_ assert result == [] assert cut.current_attribute_chunk_get_error.call_count == len_args for i in range(len_args): - assert cut.current_attribute_chunk_get_error.call_args_list[i].args == (arg_frame[i], ) + assert cut.current_attribute_chunk_get_error.call_args_list[i].args == ( + arg_frame[i], + ) + -def test_Kalman_frame_diagnosis_returns_empty_list_when_all_elements_in_headers_arg_match_time_str(mocker): +def test_Kalman_frame_diagnosis_returns_empty_list_when_all_elements_in_headers_arg_match_time_str( + mocker, +): # Arrange - len_args = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + len_args = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 arg_frame = [MagicMock()] * len_args - arg_headers = ['TIME'] * len_args + arg_headers = ["TIME"] * len_args cut = Kalman.__new__(Kalman) forced_get_error_return_value = True - mocker.patch.object(cut, 'current_attribute_chunk_get_error', return_value=forced_get_error_return_value) + mocker.patch.object( + cut, + "current_attribute_chunk_get_error", + return_value=forced_get_error_return_value, + ) # Act result = cut.frame_diagnosis(arg_frame, arg_headers) @@ -726,9 +876,12 @@ def test_Kalman_frame_diagnosis_returns_empty_list_when_all_elements_in_headers_ # Assert assert result == [] -def test_Kalman_frame_diagnosis_returns_list_of_all_elements_in_headers_arg_when_current_attribute_chunk_get_error_always_returns_true_and_args_not_empty_and_headers_does_not_contain_strings_matching_time_str(mocker): + +def test_Kalman_frame_diagnosis_returns_list_of_all_elements_in_headers_arg_when_current_attribute_chunk_get_error_always_returns_true_and_args_not_empty_and_headers_does_not_contain_strings_matching_time_str( + mocker, +): # Arrange - len_args = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + len_args = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 arg_frame = [] arg_headers = [] for i in range(len_args): @@ -737,7 +890,11 @@ def test_Kalman_frame_diagnosis_returns_list_of_all_elements_in_headers_arg_when cut = Kalman.__new__(Kalman) forced_get_error_return_value = True - mocker.patch.object(cut, 'current_attribute_chunk_get_error', return_value=forced_get_error_return_value) + mocker.patch.object( + cut, + "current_attribute_chunk_get_error", + return_value=forced_get_error_return_value, + ) # Act result = cut.frame_diagnosis(arg_frame, arg_headers) @@ -745,33 +902,40 @@ def test_Kalman_frame_diagnosis_returns_list_of_all_elements_in_headers_arg_when # Assert assert result == arg_headers -def test_Kalman_frame_diagnosis_returns_expected_sublist_of_headers_when_headers_contains_strings_matching_time_str_and_the_result_of_current_attribute_chunk_get_error_is_not_constant(mocker): + +def test_Kalman_frame_diagnosis_returns_expected_sublist_of_headers_when_headers_contains_strings_matching_time_str_and_the_result_of_current_attribute_chunk_get_error_is_not_constant( + mocker, +): # Arrange - len_args = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + len_args = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 arg_frame = [] arg_headers = [] for i in range(len_args): arg_frame.append(MagicMock()) arg_headers.append(str(MagicMock())) - num_time_strings = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 + num_time_strings = pytest.gen.randint(1, 10) # arbitrary, random int from 1 to 10 for i in range(num_time_strings): - rand_index = pytest.gen.randint(0, len_args) # random index in arg_frame - arg_frame.append(MagicMock()) # ordering of frame does not matter - arg_headers.insert(rand_index, 'time') + rand_index = pytest.gen.randint(0, len_args) # random index in arg_frame + arg_frame.append(MagicMock()) # ordering of frame does not matter + arg_headers.insert(rand_index, "time") len_args += 1 cut = Kalman.__new__(Kalman) expected_result = [] forced_get_error_side_effect = [] for i in range(len_args): - coin_flip = pytest.gen.randint(0, 1) # random int, either 0 or 1 - if coin_flip == 0 or arg_headers[i] == 'time': + coin_flip = pytest.gen.randint(0, 1) # random int, either 0 or 1 + if coin_flip == 0 or arg_headers[i] == "time": forced_get_error_side_effect.append(False) else: forced_get_error_side_effect.append(True) expected_result.append(arg_headers[i]) - mocker.patch.object(cut, 'current_attribute_chunk_get_error', side_effect=forced_get_error_side_effect) + mocker.patch.object( + cut, + "current_attribute_chunk_get_error", + side_effect=forced_get_error_side_effect, + ) # Act result = cut.frame_diagnosis(arg_frame, arg_headers) diff --git a/test/plugins/reporter/test_reporter_plugin.py b/test/plugins/reporter/test_reporter_plugin.py index f3f4498a..5c540fea 100644 --- a/test/plugins/reporter/test_reporter_plugin.py +++ b/test/plugins/reporter/test_reporter_plugin.py @@ -15,83 +15,111 @@ from plugins.reporter import reporter_plugin from plugins.reporter.reporter_plugin import Plugin as Reporter + # test update -def test_Reporter_update_saves_given_args_and_only_outputs_update_when_not_verbose_mode(mocker): +def test_Reporter_update_saves_given_args_and_only_outputs_update_when_not_verbose_mode( + mocker, +): # Arrange - arg_low_level_data = MagicMock(name='arg_low_level_data') - arg_high_level_data = MagicMock(name='arg_high_level_data') + arg_low_level_data = MagicMock(name="arg_low_level_data") + arg_high_level_data = MagicMock(name="arg_high_level_data") cut = Reporter.__new__(Reporter) - cut.component_name = MagicMock(name='fake.cut.component_name') + cut.component_name = MagicMock(name="fake.cut.component_name") - mocker.patch(reporter_plugin.__name__ + '.print') + mocker.patch(reporter_plugin.__name__ + ".print") # Act cut.update(arg_low_level_data, arg_high_level_data) # Assert assert reporter_plugin.print.call_count == 1 - assert reporter_plugin.print.call_args_list[0].args == (f"{cut.component_name}: UPDATE", ) + assert reporter_plugin.print.call_args_list[0].args == ( + f"{cut.component_name}: UPDATE", + ) assert cut.low_level_data == arg_low_level_data assert cut.high_level_data == arg_high_level_data -def test_Reporter_update_saves_given_args_and_outputs_all_info_when_verbose_mode(mocker): + +def test_Reporter_update_saves_given_args_and_outputs_all_info_when_verbose_mode( + mocker, +): # Arrange - arg_low_level_data = MagicMock(name='arg_low_level_data') - arg_high_level_data = MagicMock(name='arg_high_level_data') + arg_low_level_data = MagicMock(name="arg_low_level_data") + arg_high_level_data = MagicMock(name="arg_high_level_data") cut = Reporter.__new__(Reporter) - cut.component_name = MagicMock(name='fake.cut.component_name') - cut.headers = MagicMock(name='fake.cut.headers') + cut.component_name = MagicMock(name="fake.cut.component_name") + cut.headers = MagicMock(name="fake.cut.headers") cut.verbose_mode = True - mocker.patch(reporter_plugin.__name__ + '.print') + mocker.patch(reporter_plugin.__name__ + ".print") # Act cut.update(arg_low_level_data, arg_high_level_data) # Assert assert reporter_plugin.print.call_count == 4 - assert reporter_plugin.print.call_args_list[0].args == (f"{cut.component_name}: UPDATE", ) - assert reporter_plugin.print.call_args_list[1].args == (f" : headers {cut.headers}", ) - assert reporter_plugin.print.call_args_list[2].args == (f" : low_level_data {arg_low_level_data.__class__} = '{arg_low_level_data}'", ) - assert reporter_plugin.print.call_args_list[3].args == (f" : high_level_data {arg_high_level_data.__class__} = '{arg_high_level_data}'", ) + assert reporter_plugin.print.call_args_list[0].args == ( + f"{cut.component_name}: UPDATE", + ) + assert reporter_plugin.print.call_args_list[1].args == ( + f" : headers {cut.headers}", + ) + assert reporter_plugin.print.call_args_list[2].args == ( + f" : low_level_data {arg_low_level_data.__class__} = '{arg_low_level_data}'", + ) + assert reporter_plugin.print.call_args_list[3].args == ( + f" : high_level_data {arg_high_level_data.__class__} = '{arg_high_level_data}'", + ) assert cut.low_level_data == arg_low_level_data assert cut.high_level_data == arg_high_level_data + # test render_reasoning -def test_Reporter_render_reasoning_only_outputs_render_reasoning_when_not_verbose_mode(mocker): +def test_Reporter_render_reasoning_only_outputs_render_reasoning_when_not_verbose_mode( + mocker, +): # Arrange cut = Reporter.__new__(Reporter) - cut.component_name = MagicMock(name='fake.cut.component_name') + cut.component_name = MagicMock(name="fake.cut.component_name") cut.verbose_mode = False - mocker.patch(reporter_plugin.__name__ + '.print') + mocker.patch(reporter_plugin.__name__ + ".print") # Act cut.render_reasoning() # Assert assert reporter_plugin.print.call_count == 1 - assert reporter_plugin.print.call_args_list[0].args == (f"{cut.component_name}: RENDER_REASONING", ) + assert reporter_plugin.print.call_args_list[0].args == ( + f"{cut.component_name}: RENDER_REASONING", + ) + def test_Reporter_render_reasoning_outputs_all_info_when_verbose_mode(mocker): # Arrange cut = Reporter.__new__(Reporter) - cut.component_name = MagicMock(name='fake.cut.component_name') - fake_low_level_data = MagicMock(name='fake_low_level_data') + cut.component_name = MagicMock(name="fake.cut.component_name") + fake_low_level_data = MagicMock(name="fake_low_level_data") cut.low_level_data = fake_low_level_data - fake_high_level_data = MagicMock(name='fake_high_level_data') + fake_high_level_data = MagicMock(name="fake_high_level_data") cut.high_level_data = fake_high_level_data cut.verbose_mode = True - mocker.patch(reporter_plugin.__name__ + '.print') + mocker.patch(reporter_plugin.__name__ + ".print") # Act cut.render_reasoning() # Assert assert reporter_plugin.print.call_count == 3 - assert reporter_plugin.print.call_args_list[0].args == (f"{cut.component_name}: RENDER_REASONING", ) - assert reporter_plugin.print.call_args_list[1].args == (f" : My low_level_data is {fake_low_level_data}", ) - assert reporter_plugin.print.call_args_list[2].args == (f" : My high_level_data is {fake_high_level_data}", ) + assert reporter_plugin.print.call_args_list[0].args == ( + f"{cut.component_name}: RENDER_REASONING", + ) + assert reporter_plugin.print.call_args_list[1].args == ( + f" : My low_level_data is {fake_low_level_data}", + ) + assert reporter_plugin.print.call_args_list[2].args == ( + f" : My high_level_data is {fake_high_level_data}", + ) diff --git a/test/test_driver.py b/test/test_driver.py index adc9a653..711353d2 100644 --- a/test/test_driver.py +++ b/test/test_driver.py @@ -11,14 +11,16 @@ import os import unittest + class TestDriver(unittest.TestCase): def setUp(self): - self.test_path = os.path.dirname(os.path.abspath(__file__)) + '/../../' + self.test_path = os.path.dirname(os.path.abspath(__file__)) + "/../../" def test_driver(self): # os.system('python3 ' + self.test_path + 'driver.py -t' ) - return + return + -if __name__ == '__main__': +if __name__ == "__main__": unittest.main()