Skip to content

Commit

Permalink
Merge pull request #14 from dnv-opensource/update_dependencies
Browse files Browse the repository at this point in the history
  • Loading branch information
ClaasRostock authored Feb 15, 2024
2 parents 80b2dfd + 7501879 commit 91e7dcf
Show file tree
Hide file tree
Showing 12 changed files with 51 additions and 37 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/_code_quality.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ jobs:
options: '--check --diff'
src: '.'
jupyter: true
version: '==23.12'
version: '==24.1'

ruff:
runs-on: ubuntu-latest
Expand All @@ -27,7 +27,7 @@ jobs:
- name: Install dependencies
run: pip install -r requirements.txt
- name: Install ruff
run: pip install ruff==0.1.8
run: pip install ruff==0.2.1
- name: Run ruff
run: ruff .

Expand All @@ -45,6 +45,6 @@ jobs:
pip install -r requirements.txt
pip install pytest
- name: Install pyright
run: pip install pyright==1.1.338
run: pip install pyright==1.1.350
- name: Run pyright
run: pyright .
9 changes: 9 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,15 @@ The changelog format is based on [Keep a Changelog](https://keepachangelog.com/e

## [Unreleased]

### Dependencies
* updated to black[jupyter]==24.1 (from black[jupyter]==23.12)
* updated to version: '==24.1' (from version: '==23.12')
* updated to ruff==0.2.1 (from ruff==0.1.8)
* updated to pyright==1.1.350 (from pyright==1.1.338)
* updated to sourcery==1.15 (from sourcery==1.14)
* updated to lxml>=5.1 (from lxml>=4.9)
* updated to pandas>=2.2 (from pandas>=2.1)

* -/-

## [0.2.12] - 2024-01-09
Expand Down
Binary file removed doc/cov_driven_resampling/cov_driven_resampling.pptx
Binary file not shown.
12 changes: 7 additions & 5 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@ exclude = [
"./docs/source/conf.py",
]
src = ["src"]
line-length = 120
target-version = "py39"

[tool.ruff.lint]
ignore = [
"E501", # Line length too long
"D100", # Missing docstring in public module
Expand All @@ -35,7 +39,6 @@ ignore = [
# "N816", # Variable in global scope should not be mixedCase (uncomment if you want to allow mixedCase variable names in global scope)
"N999", # Invalid module name
]
line-length = 120
select = [
"E",
"D",
Expand All @@ -45,19 +48,18 @@ select = [
"I",
"B",
]
target-version = "py39"

[tool.ruff.pep8-naming]
[tool.ruff.lint.pep8-naming]
ignore-names = [
"test_*",
"setUp",
"tearDown",
]

[tool.ruff.pydocstyle]
[tool.ruff.lint.pydocstyle]
convention = "numpy"

[tool.ruff.per-file-ignores]
[tool.ruff.lint.per-file-ignores]
"__init__.py" = ["I001"]
"./tests/*" = ["D"]

Expand Down
8 changes: 4 additions & 4 deletions requirements-dev.txt
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
pytest>=7.4
pytest-cov>=4.1
black[jupyter]==23.12
ruff==0.1.8
pyright==1.1.338
black[jupyter]==24.1
ruff==0.2.1
pyright==1.1.350
Sphinx>=7.2
sphinx-argparse-cli>=1.11
myst-parser>=2.0
furo>=2023.9.10
sourcery==1.14
sourcery==1.15

-r requirements.txt
-r requirements-types.txt
2 changes: 1 addition & 1 deletion requirements-types.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
types-lxml>=2023.10.21
types-lxml>=5.1
4 changes: 2 additions & 2 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
lxml>=4.9
lxml>=5.1
numpy>=1.26
pandas>=2.1
pandas>=2.2
matplotlib>=3.8
graphviz>=0.20

Expand Down
4 changes: 2 additions & 2 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -39,9 +39,9 @@ packages = find:
include_package_data = True
python_requires = >=3.9
install_requires =
lxml>=4.9
lxml>=5.1
numpy>=1.26
pandas>=2.1
pandas>=2.2
matplotlib>=3.8
graphviz>=0.20
dictIO>=0.3.1
Expand Down
4 changes: 3 additions & 1 deletion src/ospx/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,9 @@ def _get_edge_label(connection: Connection) -> str:
def _create_table(name: str, child: Union[Dict[str, Any], None] = None) -> str:
_child: Dict[str, Any] = child or {" ": " "}
n_child = len(_child)
string: str = f'<\n<TABLE BORDER="1" CELLBORDER="1" CELLSPACING="0">\n<TR>\n<TD COLSPAN="{2 * n_child:d}">{name}</TD>\n</TR>\n'
string: str = (
f'<\n<TABLE BORDER="1" CELLBORDER="1" CELLSPACING="0">\n<TR>\n<TD COLSPAN="{2 * n_child:d}">{name}</TD>\n</TR>\n'
)
for key, item in _child.items():
string += f"<TR><TD>{key}</TD><TD>{item}</TD></TR>\n"
string += "</TABLE>\n>"
Expand Down
8 changes: 6 additions & 2 deletions src/ospx/importer.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,9 @@ def import_system_structure(
logger.error(msg)
raise NotImplementedError(msg)
else:
msg: str = f"Import failed: {system_structure_file.name} contains a connection with unknown connection type '{connection_type}'\n"
msg: str = (
f"Import failed: {system_structure_file.name} contains a connection with unknown connection type '{connection_type}'\n"
)
logger.error(msg)
raise TypeError(msg)

Expand All @@ -117,7 +119,9 @@ def import_system_structure(
logger.error(msg)
raise NotImplementedError(msg)
else:
msg: str = f"Import failed: {system_structure_file.name} contains a connection with unknown endpoint type '{endpoint_type}'\n"
msg: str = (
f"Import failed: {system_structure_file.name} contains a connection with unknown endpoint type '{endpoint_type}'\n"
)
logger.error(msg)
raise TypeError(msg)

Expand Down
8 changes: 4 additions & 4 deletions src/ospx/ospSimulationCase.py
Original file line number Diff line number Diff line change
Expand Up @@ -368,14 +368,14 @@ def write_watch_dict(self):
"simulation": {"name": self.simulation.name},
}

# @TODO: Time, StepCount, conn0, conn1, etc from modelDescription.xml ModelVariables
# should match connectors in caseDict for respective model. Improvement needed.
# FRALUM, 2021-xx-xx
time_column = 0
# Components
for component_name, component in self.system_structure.components.items():
no_of_connectors = len(component.connectors.keys())

# @TODO: Time, StepCount, conn0, conn1, etc from modelDescription.xml ModelVariables
# should match connectors in caseDict for respective model. Improvement needed.
# FRALUM, 2021-xx-xx
time_column = 0
data_columns = [1] + [x + 2 for x in range(no_of_connectors)] # f*** StepCount
watch_dict["datasources"].update({component_name: {"dataColumns": data_columns, "timeColumn": time_column}})

Expand Down
23 changes: 10 additions & 13 deletions src/ospx/watch/watchCosim.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# pyright: reportUnknownMemberType=false
# pyright: reportUnnecessaryTypeIgnoreComment=false
# pyright: reportGeneralTypeIssues=false
# pyright: reportArgumentType=false
# pyright: reportCallIssue=false

import contextlib
import logging
import os
Expand Down Expand Up @@ -273,14 +274,12 @@ def _define_data_source_properties_for_plotting(self):
data_columns: List[int] = []
# read_only_shortlisted_columns: bool = False #flr 2023-11-07 greedy approach needs to be updated on demand

read_only_shortlisted_columns = True if "dataColumns" in data_source_properties else False
if read_only_shortlisted_columns:
# if columns were explicitely specified (shortlisted) in watch dict:
# Read only shortlisted columns.
if "dataColumns" in data_source_properties and isinstance(
data_source_properties["dataColumns"], List
):
data_columns = [int(col) for col in data_source_properties["dataColumns"]]
read_only_shortlisted_columns = "dataColumns" in data_source_properties
if read_only_shortlisted_columns and (
"dataColumns" in data_source_properties
and isinstance(data_source_properties["dataColumns"], List)
):
data_columns = [int(col) for col in data_source_properties["dataColumns"]]
# else: frl 2023-11-07 simx heritage?
# # if columns were not explicitely specified in watch dict:
# # Read all columns except settings.
Expand All @@ -292,9 +291,7 @@ def _define_data_source_properties_for_plotting(self):

_column_names: List[str] = [data_header[column] for column in data_columns]
data_source_properties.update({"colNames": _column_names})
_display_column_names: List[str] = [
pattern.sub("", col_name) for col_name in data_source_properties["colNames"] # type: ignore
]
_display_column_names: List[str] = [pattern.sub("", col_name) for col_name in _column_names]
# _display_column_names = ["Time", "StepCount"] + [
_display_column_names = [
data_source_name + "|" + col_name
Expand Down

0 comments on commit 91e7dcf

Please sign in to comment.