diff --git a/.github/workflows/_code_quality.yml b/.github/workflows/_code_quality.yml index 2ee82477..ec18376e 100644 --- a/.github/workflows/_code_quality.yml +++ b/.github/workflows/_code_quality.yml @@ -13,7 +13,7 @@ jobs: options: '--check --diff' src: '.' jupyter: true - version: '==23.12' + version: '==24.1' ruff: runs-on: ubuntu-latest @@ -27,7 +27,7 @@ jobs: - name: Install dependencies run: pip install -r requirements.txt - name: Install ruff - run: pip install ruff==0.1.8 + run: pip install ruff==0.2.1 - name: Run ruff run: ruff . @@ -45,6 +45,6 @@ jobs: pip install -r requirements.txt pip install pytest - name: Install pyright - run: pip install pyright==1.1.338 + run: pip install pyright==1.1.350 - name: Run pyright run: pyright . diff --git a/CHANGELOG.md b/CHANGELOG.md index 0d514e0d..259ee25b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,15 @@ The changelog format is based on [Keep a Changelog](https://keepachangelog.com/e ## [Unreleased] +### Dependencies +* updated to black[jupyter]==24.1 (from black[jupyter]==23.12) +* updated to version: '==24.1' (from version: '==23.12') +* updated to ruff==0.2.1 (from ruff==0.1.8) +* updated to pyright==1.1.350 (from pyright==1.1.338) +* updated to sourcery==1.15 (from sourcery==1.14) +* updated to lxml>=5.1 (from lxml>=4.9) +* updated to pandas>=2.2 (from pandas>=2.1) + * -/- ## [0.2.12] - 2024-01-09 diff --git a/doc/cov_driven_resampling/cov_driven_resampling.pptx b/doc/cov_driven_resampling/cov_driven_resampling.pptx deleted file mode 100644 index 71a9978b..00000000 Binary files a/doc/cov_driven_resampling/cov_driven_resampling.pptx and /dev/null differ diff --git a/pyproject.toml b/pyproject.toml index c65209a7..0028cd08 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -17,6 +17,10 @@ exclude = [ "./docs/source/conf.py", ] src = ["src"] +line-length = 120 +target-version = "py39" + +[tool.ruff.lint] ignore = [ "E501", # Line length too long "D100", # Missing docstring in public module @@ -35,7 +39,6 @@ ignore = [ # "N816", # Variable in global scope should not be mixedCase (uncomment if you want to allow mixedCase variable names in global scope) "N999", # Invalid module name ] -line-length = 120 select = [ "E", "D", @@ -45,19 +48,18 @@ select = [ "I", "B", ] -target-version = "py39" -[tool.ruff.pep8-naming] +[tool.ruff.lint.pep8-naming] ignore-names = [ "test_*", "setUp", "tearDown", ] -[tool.ruff.pydocstyle] +[tool.ruff.lint.pydocstyle] convention = "numpy" -[tool.ruff.per-file-ignores] +[tool.ruff.lint.per-file-ignores] "__init__.py" = ["I001"] "./tests/*" = ["D"] diff --git a/requirements-dev.txt b/requirements-dev.txt index a886cb94..c57f0589 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,13 +1,13 @@ pytest>=7.4 pytest-cov>=4.1 -black[jupyter]==23.12 -ruff==0.1.8 -pyright==1.1.338 +black[jupyter]==24.1 +ruff==0.2.1 +pyright==1.1.350 Sphinx>=7.2 sphinx-argparse-cli>=1.11 myst-parser>=2.0 furo>=2023.9.10 -sourcery==1.14 +sourcery==1.15 -r requirements.txt -r requirements-types.txt diff --git a/requirements-types.txt b/requirements-types.txt index e4e98ed5..2fdf1d13 100644 --- a/requirements-types.txt +++ b/requirements-types.txt @@ -1 +1 @@ -types-lxml>=2023.10.21 +types-lxml>=5.1 diff --git a/requirements.txt b/requirements.txt index c1bc20a4..eecc5c6d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ -lxml>=4.9 +lxml>=5.1 numpy>=1.26 -pandas>=2.1 +pandas>=2.2 matplotlib>=3.8 graphviz>=0.20 diff --git a/setup.cfg b/setup.cfg index a8dac3e9..461a0462 100644 --- a/setup.cfg +++ b/setup.cfg @@ -39,9 +39,9 @@ packages = find: include_package_data = True python_requires = >=3.9 install_requires = - lxml>=4.9 + lxml>=5.1 numpy>=1.26 - pandas>=2.1 + pandas>=2.2 matplotlib>=3.8 graphviz>=0.20 dictIO>=0.3.1 diff --git a/src/ospx/graph.py b/src/ospx/graph.py index 2932c79c..f3ce7d9b 100644 --- a/src/ospx/graph.py +++ b/src/ospx/graph.py @@ -209,7 +209,9 @@ def _get_edge_label(connection: Connection) -> str: def _create_table(name: str, child: Union[Dict[str, Any], None] = None) -> str: _child: Dict[str, Any] = child or {" ": " "} n_child = len(_child) - string: str = f'<\n\n\n\n\n' + string: str = ( + f'<\n
{name}
\n\n\n\n' + ) for key, item in _child.items(): string += f"\n" string += "
{name}
{key}{item}
\n>" diff --git a/src/ospx/importer.py b/src/ospx/importer.py index 8f0da121..d37de8a1 100644 --- a/src/ospx/importer.py +++ b/src/ospx/importer.py @@ -98,7 +98,9 @@ def import_system_structure( logger.error(msg) raise NotImplementedError(msg) else: - msg: str = f"Import failed: {system_structure_file.name} contains a connection with unknown connection type '{connection_type}'\n" + msg: str = ( + f"Import failed: {system_structure_file.name} contains a connection with unknown connection type '{connection_type}'\n" + ) logger.error(msg) raise TypeError(msg) @@ -117,7 +119,9 @@ def import_system_structure( logger.error(msg) raise NotImplementedError(msg) else: - msg: str = f"Import failed: {system_structure_file.name} contains a connection with unknown endpoint type '{endpoint_type}'\n" + msg: str = ( + f"Import failed: {system_structure_file.name} contains a connection with unknown endpoint type '{endpoint_type}'\n" + ) logger.error(msg) raise TypeError(msg) diff --git a/src/ospx/ospSimulationCase.py b/src/ospx/ospSimulationCase.py index caed2261..3e9aa520 100644 --- a/src/ospx/ospSimulationCase.py +++ b/src/ospx/ospSimulationCase.py @@ -368,14 +368,14 @@ def write_watch_dict(self): "simulation": {"name": self.simulation.name}, } + # @TODO: Time, StepCount, conn0, conn1, etc from modelDescription.xml ModelVariables + # should match connectors in caseDict for respective model. Improvement needed. + # FRALUM, 2021-xx-xx + time_column = 0 # Components for component_name, component in self.system_structure.components.items(): no_of_connectors = len(component.connectors.keys()) - # @TODO: Time, StepCount, conn0, conn1, etc from modelDescription.xml ModelVariables - # should match connectors in caseDict for respective model. Improvement needed. - # FRALUM, 2021-xx-xx - time_column = 0 data_columns = [1] + [x + 2 for x in range(no_of_connectors)] # f*** StepCount watch_dict["datasources"].update({component_name: {"dataColumns": data_columns, "timeColumn": time_column}}) diff --git a/src/ospx/watch/watchCosim.py b/src/ospx/watch/watchCosim.py index a97e9024..f6ad2942 100644 --- a/src/ospx/watch/watchCosim.py +++ b/src/ospx/watch/watchCosim.py @@ -1,6 +1,7 @@ # pyright: reportUnknownMemberType=false -# pyright: reportUnnecessaryTypeIgnoreComment=false -# pyright: reportGeneralTypeIssues=false +# pyright: reportArgumentType=false +# pyright: reportCallIssue=false + import contextlib import logging import os @@ -273,14 +274,12 @@ def _define_data_source_properties_for_plotting(self): data_columns: List[int] = [] # read_only_shortlisted_columns: bool = False #flr 2023-11-07 greedy approach needs to be updated on demand - read_only_shortlisted_columns = True if "dataColumns" in data_source_properties else False - if read_only_shortlisted_columns: - # if columns were explicitely specified (shortlisted) in watch dict: - # Read only shortlisted columns. - if "dataColumns" in data_source_properties and isinstance( - data_source_properties["dataColumns"], List - ): - data_columns = [int(col) for col in data_source_properties["dataColumns"]] + read_only_shortlisted_columns = "dataColumns" in data_source_properties + if read_only_shortlisted_columns and ( + "dataColumns" in data_source_properties + and isinstance(data_source_properties["dataColumns"], List) + ): + data_columns = [int(col) for col in data_source_properties["dataColumns"]] # else: frl 2023-11-07 simx heritage? # # if columns were not explicitely specified in watch dict: # # Read all columns except settings. @@ -292,9 +291,7 @@ def _define_data_source_properties_for_plotting(self): _column_names: List[str] = [data_header[column] for column in data_columns] data_source_properties.update({"colNames": _column_names}) - _display_column_names: List[str] = [ - pattern.sub("", col_name) for col_name in data_source_properties["colNames"] # type: ignore - ] + _display_column_names: List[str] = [pattern.sub("", col_name) for col_name in _column_names] # _display_column_names = ["Time", "StepCount"] + [ _display_column_names = [ data_source_name + "|" + col_name