diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
new file mode 100644
index 00000000..d9442ffd
--- /dev/null
+++ b/.github/pull_request_template.md
@@ -0,0 +1,8 @@
+
+
+Checklist:
+
+- [ ] I made sure that the CI passed before I ask for a review.
+- [ ] I added a summary of the changes (compared to the last release) in the `CHANGELOG.md`.
+- [ ] If necessary, I made changes to the documentation and/or added new content.
+- [ ] I will remember to squash-and-merge, providing a useful summary of the changes of this PR.
diff --git a/.github/workflows/check-links.yml b/.github/workflows/check-links.yml
deleted file mode 100644
index f7e0ce61..00000000
--- a/.github/workflows/check-links.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-name: Check links (manual)
-on: workflow_dispatch
-jobs:
- check_links:
- runs-on: ubuntu-latest
- steps:
- - name: Check out repository
- uses: actions/checkout@v2
- - name: Check links in markdown files (markdown-link-check)
- uses: gaurav-nelson/github-action-markdown-link-check@v1
- with:
- use-quiet-mode: 'yes'
- use-verbose-mode: 'no'
- config-file: '.markdown-link-check-config.json'
diff --git a/.github/workflows/check-markdown.yml b/.github/workflows/check-markdown.yml
deleted file mode 100644
index 39819fb4..00000000
--- a/.github/workflows/check-markdown.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-name: Lint docs
-on: [push, pull_request]
-jobs:
- check_md:
- runs-on: ubuntu-latest
- steps:
- - name: Check out repository
- uses: actions/checkout@v2
- - name: Lint markdown files (markdownlint)
- uses: articulate/actions-markdownlint@v1
- with:
- config: .markdownlint.json
- files: '.'
diff --git a/.github/workflows/check-pep8.yml b/.github/workflows/check-pep8.yml
deleted file mode 100644
index 46ef5313..00000000
--- a/.github/workflows/check-pep8.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-name: autopep8
-on:
- push:
- branches:
- - main
- - develop
- pull_request:
- branches:
- - "*"
-jobs:
- autopep8:
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v2
- - name: autopep8
- id: autopep8
- uses: peter-evans/autopep8@v1
- with:
- args: --recursive --diff --aggressive --aggressive --exit-code --ignore E402 --max-line-length 120 .
- - name: Fail if autopep8 made changes
- if: ${{ steps.autopep8.outputs.exit-code == 2 }}
- run: exit 1
diff --git a/.github/workflows/pythonpublish.yml b/.github/workflows/pythonpublish.yml
index 275817cd..d2f0faf0 100644
--- a/.github/workflows/pythonpublish.yml
+++ b/.github/workflows/pythonpublish.yml
@@ -15,13 +15,15 @@ jobs:
with:
python-version: '3.x'
- name: Install dependencies
- run: |
- python -m pip install --upgrade pip
- pip install setuptools wheel twine
+ uses: BSFishy/pip-action@v1
+ with:
+ packages: |
+ twine
+ build
- name: Build and publish
env:
TWINE_USERNAME: __token__
TWINE_PASSWORD: ${{ secrets.PYPI_TOKEN }}
run: |
- python setup.py sdist
+ pyproject-build
twine upload dist/*
diff --git a/.github/workflows/run-adaptivity-test.yml b/.github/workflows/run-adaptivity-test.yml
index f708a48e..e949a007 100644
--- a/.github/workflows/run-adaptivity-test.yml
+++ b/.github/workflows/run-adaptivity-test.yml
@@ -7,14 +7,14 @@ on:
pull_request:
branches:
- "*"
-jobs:
+jobs:
adaptivity_integration_tests:
name: Run adaptivity integration tests
runs-on: ubuntu-latest
container: precice/precice:nightly
steps:
- name: Checkout repository
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
path: micro-manager
@@ -23,26 +23,25 @@ jobs:
run: |
apt-get -qq update
apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config
- python -m pip install --upgrade pip
- pip install setuptools wheel
+ pip3 install --upgrade pip
- name: Install Micro Manager
working-directory: micro-manager
- run: pip3 install --user .
+ run: pip3 install .
- name: Run integration test with local adaptivity
timeout-minutes: 3
working-directory: micro-manager/tests/integration/test_unit_cube
run: |
- export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
- python3 unit_cube.py & python3 run_micro_manager.py --config micro-manager-config-local-adaptivity.json
+ micro-manager-precice micro-manager-config-local-adaptivity.json &
+ python3 unit_cube.py
- name: Run integration test with global adaptivity
timeout-minutes: 3
working-directory: micro-manager/tests/integration/test_unit_cube
run: |
- export LD_LIBRARY_PATH=/usr/local/lib:$LD_LIBRARY_PATH
- python3 unit_cube.py & python3 run_micro_manager.py --config micro-manager-config-global-adaptivity.json
+ micro-manager-precice micro-manager-config-global-adaptivity.json &
+ python3 unit_cube.py
adaptivity_unit_tests_serial:
name: Run adaptivity unit tests in serial
@@ -50,7 +49,7 @@ jobs:
container: precice/precice:nightly
steps:
- name: Checkout Repository
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
path: micro-manager
@@ -59,8 +58,7 @@ jobs:
run: |
apt-get -qq update
apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config
- python -m pip install --upgrade pip
- pip install setuptools wheel
+ pip3 install --upgrade pip
- name: Install Micro Manager
working-directory: micro-manager
@@ -76,7 +74,7 @@ jobs:
container: precice/precice:nightly
steps:
- name: Checkout Repository
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
path: micro-manager
@@ -94,8 +92,7 @@ jobs:
run: |
apt-get -qq update
apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config
- python -m pip install --upgrade pip
- pip install setuptools wheel twine
+ pip3 install --upgrade pip
- name: Install Micro Manager
working-directory: micro-manager
diff --git a/.github/workflows/run-checks.yml b/.github/workflows/run-checks.yml
new file mode 100644
index 00000000..7bc52561
--- /dev/null
+++ b/.github/workflows/run-checks.yml
@@ -0,0 +1,55 @@
+name: Run checks for markdown, links, and pre-commit
+on:
+ push:
+ branches:
+ - main
+ - develop
+ pull_request:
+ branches:
+ - "*"
+jobs:
+ check_md:
+ name: Lint markdown files
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v4
+ - name: Lint markdown files (markdownlint)
+ uses: articulate/actions-markdownlint@v1
+ with:
+ config: .markdownlint.json
+ files: '.'
+
+ check_links:
+ name: Check links in markdown files
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check out repository
+ uses: actions/checkout@v4
+ - name: Check links in markdown files (markdown-link-check)
+ uses: gaurav-nelson/github-action-markdown-link-check@v1
+ with:
+ use-quiet-mode: 'yes'
+ use-verbose-mode: 'no'
+ config-file: '.markdown-link-check-config.json'
+
+ precommit:
+ name: pre-commit checks
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - name: Setup python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.10'
+ check-latest: true
+ - name: Install pre-commit
+ run: pip install pre-commit
+ - name: Run checks
+ run: pre-commit run -a -v
+ - name: Git status
+ if: always()
+ run: git status
+ - name: Full diff
+ if: always()
+ run: git diff
diff --git a/.github/workflows/run-domain-decomposition-tests.yml b/.github/workflows/run-domain-decomposition-tests.yml
index b44745ac..e2563416 100644
--- a/.github/workflows/run-domain-decomposition-tests.yml
+++ b/.github/workflows/run-domain-decomposition-tests.yml
@@ -7,14 +7,14 @@ on:
pull_request:
branches:
- "*"
-jobs:
+jobs:
domain_decomposition_integration_tests:
name: Run domain decomposition integration tests
runs-on: ubuntu-latest
container: precice/precice:nightly
steps:
- name: Checkout repository
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
path: micro-manager
@@ -30,23 +30,27 @@ jobs:
- name: Install Dependencies
working-directory: micro-manager
run: |
+ apt-get -qq update
apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config
- python -m pip install --upgrade pip
- pip install setuptools wheel twine
+ pip3 install --upgrade pip
- name: Install micro-manager
working-directory: micro-manager
- run: pip3 install --user .
+ run: pip3 install .
- - name: Run integration test (variant 1)
+ - name: Run integration test (2 processes)
timeout-minutes: 3
working-directory: micro-manager/tests/integration/test_unit_cube
- run: mpiexec -n 2 --allow-run-as-root python3 run_micro_manager.py --config micro-manager-config-parallel-1.json & python3 unit_cube.py
-
- - name: Run integration test (variant 2)
+ run: |
+ mpiexec -n 2 --allow-run-as-root micro-manager-precice micro-manager-config-parallel-1.json &
+ python3 unit_cube.py
+
+ - name: Run integration test (6 processes)
timeout-minutes: 3
working-directory: micro-manager/tests/integration/test_unit_cube
- run: mpiexec -n 6 --oversubscribe --allow-run-as-root python3 run_micro_manager.py --config micro-manager-config-parallel-2.json & python3 unit_cube.py
+ run: |
+ mpiexec -n 6 --oversubscribe --allow-run-as-root micro-manager-precice micro-manager-config-parallel-2.json &
+ python3 unit_cube.py
domain_decomposition_unit_tests:
name: Run domain decomposition unit tests
@@ -54,7 +58,7 @@ jobs:
container: precice/precice:nightly
steps:
- name: Checkout Repository
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
path: micro-manager
@@ -63,8 +67,7 @@ jobs:
run: |
apt-get -qq update
apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config
- python -m pip install --upgrade pip
- pip install setuptools wheel
+ pip3 install --upgrade pip
- name: Install Micro Manager
working-directory: micro-manager
diff --git a/.github/workflows/run-macro-micro-dummy.yml b/.github/workflows/run-macro-micro-dummy.yml
index c615fd75..7ed02638 100644
--- a/.github/workflows/run-macro-micro-dummy.yml
+++ b/.github/workflows/run-macro-micro-dummy.yml
@@ -7,7 +7,7 @@ on:
pull_request:
branches:
- "*"
-jobs:
+jobs:
run_dummy:
name: Run dummy
runs-on: ubuntu-latest
@@ -15,7 +15,7 @@ jobs:
steps:
- name: Checkout Repository
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
path: micro-manager
@@ -25,22 +25,26 @@ jobs:
apt-get -qq update
apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config
apt-get -qq install sudo
- python -m pip install --upgrade pip
- pip install setuptools wheel twine
+ pip3 install --upgrade pip
- name: Install micro-manager
working-directory: micro-manager
- run: pip3 install --user .
+ run: |
+ pip3 install .
- name: Run python macro-micro dummy
timeout-minutes: 3
working-directory: micro-manager/examples
- run: python3 python-dummy/run_micro_manager.py --config micro-manager-config.json & python3 macro_dummy.py
+ run: |
+ micro-manager-precice micro-manager-python-config.json &
+ python3 macro_dummy.py
- name: Run adaptive python macro-micro dummy
timeout-minutes: 3
working-directory: micro-manager/examples
- run: python3 python-dummy/run_micro_manager.py --config micro-manager-adaptivity-config.json & python3 macro_dummy.py
+ run: |
+ micro-manager-precice micro-manager-python-adaptivity-config.json &
+ python3 macro_dummy.py
- name: Run c++ macro-micro dummy
timeout-minutes: 3
@@ -50,9 +54,12 @@ jobs:
pip install pybind11
c++ -O3 -Wall -shared -std=c++11 -fPIC $(python3 -m pybind11 --includes) micro_cpp_dummy.cpp -o micro_dummy$(python3-config --extension-suffix)
cd ../
- python3 cpp-dummy/run_micro_manager.py --config micro-manager-config.json & python3 macro_dummy.py
+ micro-manager-precice micro-manager-cpp-config.json &
+ python3 macro_dummy.py
- name: Run adaptive c++ macro-micro dummy
timeout-minutes: 3
working-directory: micro-manager/examples
- run: python3 cpp-dummy/run_micro_manager.py --config micro-manager-adaptivity-config.json & python3 macro_dummy.py
+ run: |
+ micro-manager-precice micro-manager-cpp-adaptivity-config.json &
+ python3 macro_dummy.py
diff --git a/.github/workflows/run-unit-tests.yml b/.github/workflows/run-unit-tests.yml
index 420f3fd5..f582228a 100644
--- a/.github/workflows/run-unit-tests.yml
+++ b/.github/workflows/run-unit-tests.yml
@@ -12,18 +12,52 @@ jobs:
runs-on: ubuntu-latest
container: precice/precice:nightly
steps:
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
path: micro-manager
- - name: Install Micro Manager and uninstall pyprecice
- working-directory: micro-manager
+ - name: Install dependencies
run: |
apt-get -qq update
apt-get -qq install python3-dev python3-pip git python-is-python3 pkg-config
+ pip3 install --upgrade pip
+
+ - name: Install Micro Manager and run micro_manager unit test
+ working-directory: micro-manager/
+ run: |
+ pip3 install --user .
+ pip3 uninstall -y pyprecice
+ cd tests/unit
+ python3 -m unittest test_micro_manager.py
+
+ - name: Install Micro Manager and run interpolation unit test
+ working-directory: micro-manager/
+ run: |
+ pip3 install --user .[sklearn]
+ pip3 uninstall -y pyprecice
+ cd tests/unit
+ python3 -m unittest test_interpolation.py
+
+ - name: Install Micro Manager and run micro simulation crash unit test
+ working-directory: micro-manager/
+ run: |
pip3 install --user .
pip3 uninstall -y pyprecice
+ cd tests/unit
+ python3 -m unittest test_micro_simulation_crash_handling.py
- - name: Run unit tests
- working-directory: micro-manager/tests/unit
- run: python3 -m unittest test_micro_manager.py
+ - name: Install Micro Manager and run HDF5 read and write unit tests
+ working-directory: micro-manager/
+ run: |
+ pip3 install --user .[snapshot]
+ pip3 uninstall -y pyprecice
+ cd tests/unit
+ python3 -m unittest test_hdf5_functionality.py
+
+ - name: Install Micro Manager and run snapshot_computation unit tests
+ working-directory: micro-manager/
+ run: |
+ pip3 install --user .[snapshot]
+ pip3 uninstall -y pyprecice
+ cd tests/unit
+ python3 -m unittest test_snapshot_computation.py
diff --git a/.markdown-link-check-config.json b/.markdown-link-check-config.json
index 3fff32c2..b539fcdd 100644
--- a/.markdown-link-check-config.json
+++ b/.markdown-link-check-config.json
@@ -1,3 +1,8 @@
{
- "aliveStatusCodes": [429, 200]
-}
\ No newline at end of file
+ "aliveStatusCodes": [429, 200],
+ "ignorePatterns": [
+ {
+ "pattern": "*.html"
+ }
+ ]
+}
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 00000000..8e06604a
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,21 @@
+repos:
+- repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v2.3.0
+ hooks:
+ - id: check-xml
+ - id: check-merge-conflict
+ - id: mixed-line-ending
+ - id: end-of-file-fixer
+ - id: trailing-whitespace
+# black repo for python formatting
+- repo: https://github.com/ambv/black
+ rev: 22.12.0
+ hooks:
+ - id: black
+- repo: https://github.com/precice/precice-pre-commit-hooks
+ rev: 'v3.3'
+ hooks:
+ - id: format-precice-config
+ files: "^.*/precice-config.xml"
+ - id: check-image-prefix
+ args: [ --prefix=docs-tooling-micro-manager- ]
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b31f5b64..38816263 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,24 @@
# Micro Manager changelog
+## v0.5.0
+
+- Use absolute values to calculate normalizing factor for relative norms in adaptivity https://github.com/precice/micro-manager/pull/125
+- Add option to use only one micro simulation object in the snapshot computation https://github.com/precice/micro-manager/pull/123
+- Explicitly check if time window has converged using the API function `is_time_window_complete()` https://github.com/precice/micro-manager/pull/118
+- Add `MicroManagerSnapshot` enabling snapshot computation and storage of microdata in HDF5 format https://github.com/precice/micro-manager/pull/101
+- Make `sklearn` an optional dependency
+- Move the config variable `micro_dt` from the coupling parameters section to the simulation parameters section https://github.com/precice/micro-manager/pull/114
+- Set time step of micro simulation in the configuration, and use it in the coupling https://github.com/precice/micro-manager/pull/112
+- Add a base class called `MicroManager` with minimal API and member function definitions, rename the existing `MicroManager` class to `MicroManagerCoupling` https://github.com/precice/micro-manager/pull/111
+- Handle calling `initialize()` function of micro simulations written in languages other than Python https://github.com/precice/micro-manager/pull/110
+- Check if initial data returned from the micro simulation is the data that the adaptivity computation requires https://github.com/precice/micro-manager/pull/109
+- Use executable `micro-manager-precice` by default, and stop using the script `run_micro_manager.py` https://github.com/precice/micro-manager/pull/105
+- Make `initialize()` method of the MicroManager class public https://github.com/precice/micro-manager/pull/105
+- Optionally use initial macro data to initialize micro simulations https://github.com/precice/micro-manager/pull/104
+- Use `pyproject.toml` instead of `setup.py` to configure the build. Package name is now `micro_manager_precice` https://github.com/precice/micro-manager/pull/84
+- Add handling of crashing micro simulations https://github.com/precice/micro-manager/pull/85
+- Add switch to turn adaptivity on and off in configuration https://github.com/precice/micro-manager/pull/93
+
## v0.4.0
- Add note in the cpp-dummy that pickling support does not work due to no good way to pass the sim id to the new micro simulation instance [commit](https://github.com/precice/micro-manager/commit/0a82966676717a533aca9bffa4a110453158f29c)
diff --git a/README.md b/README.md
index aedda6eb..ca71f51f 100644
--- a/README.md
+++ b/README.md
@@ -14,7 +14,7 @@
[![DOI](https://joss.theoj.org/papers/10.21105/joss.05842/status.svg)](https://doi.org/10.21105/joss.05842)
-A tool to facilitate solving two-scale (macro-micro) coupled problems using the coupling library [preCICE](https://www.precice.org/).
+A tool to facilitate solving two-scale (macro-micro) coupled problems using the coupling library [preCICE](https://precice.org/).
The main documentation is rendered on the [preCICE website](https://precice.org/tooling-micro-manager-overview.html).
diff --git a/docs/README.md b/docs/README.md
index 6fee76fc..f919e3e6 100644
--- a/docs/README.md
+++ b/docs/README.md
@@ -9,7 +9,7 @@ summary: A tool to manage many micro simulations and couple them to a macro simu
The Micro Manager manages many simulations on a micro scale and couples them to one simulation on a macro scale. For the coupling itself, it heavily relies on the coupling library [preCICE](https://precice.org/index.html).
-![Micro Manager strategy schematic](images/tooling-micro-manager-manager-solution.png)
+![Micro Manager strategy schematic](images/docs-tooling-micro-manager-manager-solution.png)
## What can it do?
@@ -19,11 +19,17 @@ The Micro Manager couples many micro simulations with one macro simulation. This
- ... running micro simulations in parallel using MPI.
- ... adaptively activating and deactivating micro simulations based on a similarity calculation.
+The Micro Manager can also compute snapshots of micro simulations given macro input parameters in an offline manner without preCICE.
+
## Documentation
-To use the Micro Manager for a macro-micro coupling, your micro simulation code needs to be in a library format with a specific class name and functions with specific names. For a macro-micro coupled problem, the macro simulation code is coupled to preCICE directly. The section [couple your code](couple-your-code-overview.html) of the preCICE documentation gives more details on coupling existing codes. To setup a macro-micro coupled simulation using the Micro Manager, follow these steps:
+To use the Micro Manager for a macro-micro coupling, your micro simulation code needs to be in a library format with a specific class name and functions with specific names. For a macro-micro coupled problem, the macro simulation code is coupled to preCICE directly. The section [couple your code](couple-your-code-overview.html) of the preCICE documentation gives more details on coupling existing codes. To set up a macro-micro coupled simulation using the Micro Manager, follow these steps:
- [Installation](tooling-micro-manager-installation.html)
- [Preparing micro simulation](tooling-micro-manager-prepare-micro-simulation.html)
- [Configuration](tooling-micro-manager-configuration.html)
- [Running](tooling-micro-manager-running.html)
+
+To compute snapshots in an offline manner your simulation code also needs to be in a library format with a specific class name and functions with specific names. To set up a snapshot computation using the Micro Manager, follow these steps:
+
+- [Snapshot computation](tooling-micro-manager-snapshot-configuration.html)
diff --git a/docs/ReleaseGuide.md b/docs/ReleaseGuide.md
index e4333d7e..86c47c62 100644
--- a/docs/ReleaseGuide.md
+++ b/docs/ReleaseGuide.md
@@ -8,7 +8,7 @@ The release of the `micro-manager` repository is made directly from a release br
2. If it is a real release, [open a Pull Request `main` <-- `micro-manager-v1.2.3`](https://github.com/precice/micro-manager/compare/main...main) named after the version (i.e. `Release v1.2.3`) and briefly describe the new features of the release in the PR description.
-3. Bump the version in the `CHANGELOG.md` and in `setup.py` on `micro-manager-v1.2.3`.
+3. Bump the version in the `CHANGELOG.md` on the branch `micro-manager-v1.2.3`.
4. [Draft a new release](https://github.com/precice/micro-manager/releases/new) in the `Releases` section of the repository page in a web browser. The release tag needs to be the exact version number (i.e.`v1.2.3` or `v1.2.3rc1`, compare to [existing tags](https://github.com/precice/micro-manager/tags)). Use `@target:main`. Release title is also the version number (i.e. `v1.2.3` or `v1.2.3rc1`, compare to [existing releases](https://github.com/precice/micro-manager/tags)).
diff --git a/docs/configuration.md b/docs/configuration.md
index 50282950..e7fe0b7e 100644
--- a/docs/configuration.md
+++ b/docs/configuration.md
@@ -20,6 +20,7 @@ The Micro Manager is configured with a JSON file. An example configuration file
},
"simulation_params": {
"macro_domain_bounds": [0.0, 1.0, 0.0, 1.0, 0.0, 1.0],
+ "micro_dt": 1.0
},
"diagnostics": {
"output_micro_sim_solve_time": "True"
@@ -49,6 +50,7 @@ Parameter | Description
`macro_domain_bounds`| Minimum and maximum bounds of the macro-domain, having the format `[xmin, xmax, ymin, ymax, zmin, zmax]` in 3D and `[xmin, xmax, ymin, ymax]` in 2D.
Domain decomposition parameters | See section on [domain decomposition](#domain-decomposition). But default, the Micro Manager assumes that it will be run in serial.
Adaptivity parameters | See section on [adaptivity](#adaptivity). By default, adaptivity is disabled.
+`micro_dt` | Initial time window size (dt) of the micro simulation.
## Diagnostics
@@ -107,7 +109,7 @@ The Micro Manager can adaptively control micro simulations. The adaptivity strat
All the adaptivity parameters are chosen from the second publication.
-To turn on adaptivity, the following options need to be set in `simulation_params` under the sub-heading `adaptivity`:
+To turn on adaptivity, set `"adaptivity": True` in `simulation_params`. Then under `adaptivity_settings` set the following variables:
Parameter | Description
--- | ---
@@ -119,12 +121,13 @@ Parameter | Description
`every_implicit_iteration` | If True, adaptivity is calculated in every implicit iteration. If False, adaptivity is calculated once at the start of the time window and then reused in every implicit time iteration.
`similarity_measure`| Similarity measure to be used for adaptivity. Can be either `L1`, `L2`, `L1rel` or `L2rel`. By default, `L1` is used. The `rel` variants calculate the respective relative norms. This parameter is *optional*.
-Example of adaptivity configuration
+Example of adaptivity configuration is
```json
"simulation_params": {
"macro_domain_bounds": [0, 1, 0, 1, 0, 1],
- "adaptivity" {
+ "adaptivity": "True",
+ "adaptivity_settings" {
"type": "local",
"data": ["temperature", "porosity"],
"history_param": 0.5,
@@ -161,6 +164,13 @@ The Micro Manager uses the output functionality of preCICE, hence these data set
```
+## Interpolate a crashed micro simulation
+
+If the optional dependency `sklearn` is installed, the Micro Manager will derive the output of a crashed micro simulation by interpolating outputs from similar simulations. To enable this, set
+`"interpolate_crash": "True"` in the `simulation_params` section of the configuration file.
+
+For more details on the interpolation see the [crash handling documentation](tooling-micro-manager-running.html/#what-happens-when-a-micro-simulation-crashes).
+
## Next step
After creating a configuration file you are ready to [run the Micro Manager](tooling-micro-manager-running.html).
diff --git a/docs/images/tooling-micro-manager-manager-solution.png b/docs/images/docs-tooling-micro-manager-manager-solution.png
similarity index 100%
rename from docs/images/tooling-micro-manager-manager-solution.png
rename to docs/images/docs-tooling-micro-manager-manager-solution.png
diff --git a/docs/installation.md b/docs/installation.md
index 03f3f243..5b4e7046 100644
--- a/docs/installation.md
+++ b/docs/installation.md
@@ -17,7 +17,13 @@ The Micro Manager package has the name [micro-manager-precice](https://pypi.org/
pip install --user micro-manager-precice
```
-Unless already installed, the dependencies will be installed by `pip` during the installation procedure. preCICE itself needs to be installed separately. If you encounter problems in the direct installation, see the [dependencies section](#required-dependencies) below.
+Unless already installed, the dependencies will be installed by `pip` during the installation procedure. To enable [crash handling by interpolation](tooling-micro-manager-running.html/#what-happens-when-a-micro-simulation-crashes), the optional dependency `sklearn` is required. To install `micro-manager-precice` with `sklearn`, run
+
+```bash
+pip install --user micro-manager-precice[sklearn]
+```
+
+preCICE itself needs to be installed separately. If you encounter problems in the direct installation, see the [dependencies section](#required-dependencies) and [optional dependency section](#optional-dependencies) below.
### Option 2: Install manually
@@ -31,6 +37,11 @@ Ensure that the following dependencies are installed:
* [numpy](https://numpy.org/install/)
* [mpi4py](https://mpi4py.readthedocs.io/en/stable/install.html)
+#### Optional dependencies
+
+* [sklearn](https://scikit-learn.org/stable/index.html)
+* [h5py](https://www.h5py.org/) (required for snapshot computations)
+
#### Clone the Micro Manager
```bash
@@ -45,11 +56,7 @@ To install using `pip`, go to the directory `micro-manager/` and run
pip install --user .
```
-To install using Python, go to the project directory `micro-manager/` and run
-
-```bash
-python setup.py install --user
-```
+Adding optional dependencies works as above by adding them after the dot, e.g. `.[sklearn]`.
## Get the latest development version
diff --git a/docs/micro-simulation-convert-to-library.md b/docs/micro-simulation-convert-to-library.md
index ed89d7fd..ba3a4188 100644
--- a/docs/micro-simulation-convert-to-library.md
+++ b/docs/micro-simulation-convert-to-library.md
@@ -28,6 +28,8 @@ class MicroSimulation: # Name is fixed
"""
Initialize the micro simulation and return initial data which will be used in computing adaptivity before the first time step.
+ Defining this function is OPTIONAL.
+
Returns
-------
initial_data : dict
@@ -95,6 +97,10 @@ The `solve()` function should have the following signature:
This will create a shared library `micro_dummy.so` which can be directly imported in Python.
For more information on compiling C++ libraries, see the [pybind11 documentation](https://pybind11.readthedocs.io/en/stable/compiling.html).
+## Initializing micro simulations
+
+Micro simulations can be initialized before the actual coupling starts. To initialize a micro simulation, define an `initialize()` function in the code. The Micro Manager calls the initialize function for every micro simulation. If the macro simulation writes initial data to preCICE, the Micro Manager attempts to pass it to the micro simulation. If the `initialize()` function does not have input parameters, the initial data will not be passed. The `initialize()` function can return data to the Micro Manager. This data is only relevant to compute the adaptivity before the coupling starts. Therefore, if the `initialize()` functions returns data, it must be the data expected by the adaptivity.
+
## Next step
After restructuring your micro simulation code into a Python-importable class structure, [configure the Micro Manager](tooling-micro-manager-configuration.html).
diff --git a/docs/running.md b/docs/running.md
index cf0e9dd3..ee6fe6aa 100644
--- a/docs/running.md
+++ b/docs/running.md
@@ -8,21 +8,15 @@ summary: Run the Micro Manager from the terminal with a configuration file as in
The Micro Manager is run directly from the terminal by providing the path to the configuration file as an input argument in the following way
```bash
-micro_manager micro-manager-config.json
+micro-manager-precice micro-manager-config.json
```
-Alternatively the Manager can also be run by creating a Python script which imports the Micro Manager package and calls its run function. For example a run script `run-micro-manager.py` would look like
+The Micro Manager can also be run in parallel
-```python
-from micro_manager import MicroManager
-
-manager = MicroManager("micro-manager-config.json")
-
-manager.solve()
+```bash
+mpiexec -n micro-manager-precice micro-manager-config.json
```
-The Micro Manager can also be run in parallel, using the same script as stated above
+### What happens when a micro simulation crashes?
-```bash
-mpirun -n python3 run-micro-manager.py
-```
+If a micro simulation crashes and the Micro Manager is configured to [interpolate a crashed micro simulation](tooling-micro-manager-configuration.html/#Interpolate-a-crashed-micro-simulation), the Micro Manager attempts to continue running. The error message from the micro simulation, along with the macro location are logged in the Micro Manager log file. The Micro Manager continues the simulation run even if a micro simulation crashes. Results of the crashed micro simulation are generated by interpolating results of a certain number of similar running simulations. The [inverse distance weighed](https://en.wikipedia.org/wiki/Inverse_distance_weighting) method is used. If more than 20% of global micro simulations crash or if locally no neighbors are available for interpolation, the Micro Manager terminates.
diff --git a/docs/snapshot_configuration.md b/docs/snapshot_configuration.md
new file mode 100644
index 00000000..c2648a6d
--- /dev/null
+++ b/docs/snapshot_configuration.md
@@ -0,0 +1,107 @@
+---
+title: Snapshot Computation
+permalink: tooling-micro-manager-snapshot-configuration.html
+keywords: tooling, macro-micro, two-scale, snapshot
+summary: Set up the Micro Manager snapshot computation.
+---
+
+## Installation
+
+To use the Micro Manager for snapshot computation, the dependency `h5py` is necessary. To install `micro-manager-precice` with `h5py`, run
+
+```bash
+pip install --user micro-manager-precice[snapshot]
+```
+
+If you have already installed `micro-manager-precice`, you can install `h5py` separately by running
+
+```bash
+pip install --user h5py
+```
+
+## Preparation
+
+Prepare your micro simulation for the Micro Manager snapshot computation by following the instructions in the [preparation guide](tooling-micro-manager-preparation.html).
+
+Note: The `initialize()` method is not supported for the snapshot computation.
+
+## Configuration
+
+Configure the snapshot computation functionality with a JSON file. An example configuration file is
+
+```json
+{
+ "micro_file_name": "python-dummy/micro_dummy",
+ "coupling_params": {
+ "parameter_file_name": "parameter.hdf5",
+ "read_data_names": {"macro-scalar-data": "scalar", "macro-vector-data": "vector"},
+ "write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"},
+ },
+ "simulation_params": {
+ "micro_dt": 1.0,
+ },
+ "snapshot_params": {
+ "post_processing_file_name": "snapshot_postprocessing"
+ },
+ "diagnostics": {
+ "output_micro_sim_solve_time": "True"
+ }
+}
+```
+
+This example configuration file is in [`examples/snapshot-config.json`](https://github.com/precice/micro-manager/tree/develop/examples/snapshot-config.json).
+
+The path to the file containing the Python importable micro simulation class is specified in the `micro_file_name` parameter. If the file is not in the working directory, give the relative path.
+
+There are four main sections in the configuration file, the `coupling_params`, the `simulations_params`, the `snapshot_params` and the optional `diagnostics`.
+
+## Coupling Parameters
+
+Parameter | Description
+--- | ---
+`parameter_file_name` | Path to the HDF5 file containing the parameter space from the current working directory. Each macro parameter must be given as a dataset. Macro data for the same micro simulation should have the same index in the first dimension. The name must correspond to the names given in the config file.
+`read_data_names` | A Python dictionary with the names of the data to be read from preCICE as keys and `"scalar"` or `"vector"` as values depending on the nature of the data.
+`write_data_names` | A Python dictionary with the names of the data to be written to the database as keys and `"scalar"` or `"vector"` as values depending on the nature of the data.
+
+## Simulation Parameters
+
+Parameter | Description
+--- | ---
+`micro_dt` | Initial time window size (dt) of the micro simulation. Must be set even if the micro simulation is time-independent.
+
+## Snapshot Parameters
+
+Parameter | Description
+--- | ---
+`post_processing_file_name`| Path to the post-processing Python script from the current working directory. Providing a post-processing script is optional. The script must contain a class `PostProcessing` with a method `postprocessing(sim_output)` that takes the simulation output as an argument. The method can be used to post-process the simulation output before writing it to the database.
+`initialize_once` | If `True`, only one micro simulation is initialized and solved for all macro inputs per rank. If `False` a new micro simulation is initialized and solved for each macro input in the parameter space. Default is `False`. This option can be True if the micro simulation is not history-dependent and the same setup is shared across all micro simulations.
+
+## Diagnostics
+
+Parameter | Description
+--- | ---
+`output_micro_sim_solve_time` | If `True`, the Micro Manager writes the wall clock time of the `solve()` function of each micro simulation to the database.
+
+## Running
+
+Run the snapshot computation directly from the terminal by adding the `--snapshot` argument to the Micro Manager executable, and by providing the path to the configuration file as an input argument in the following way
+
+```bash
+micro-manager-precice --snapshot snapshot-config.json
+```
+
+Run the snapshot computation in parallel by
+
+```bash
+mpiexec -n micro-manager-precice --snapshot snapshot-config.json
+```
+
+where `` is the number of processes used.
+
+### Results
+
+The results of the snapshot computation are written into `output/` in HDF5-format. Each parameter is stored in a separate dataset. The dataset names correspond to the names specified in the configuration file. The first dimension of the datasets corresponds to the macro parameter index.
+
+### What happens when a micro simulation crashes during snapshot computation?
+
+If the computation of a snapshot fails, the snapshot is skipped.
diff --git a/examples/README.md b/examples/README.md
index 7aea421b..086270b7 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -4,20 +4,34 @@ The `solverdummies` are minimal working examples for using the preCICE Micro Man
## Python
-To run the Python solverdummies, run the following commands in the `examples/` directory in **two different terminals**:
+To run the Python solverdummies, run the commands given below in the `examples/` directory in **two different terminals**.
+
+First terminal:
```bash
python macro_dummy.py
-python python-dummy/run_micro_manager.py --config micro-manager-config.json
+```
+
+Second terminal:
+
+```bash
+micro-manager-precice micro-manager-python-config.json
```
Note that running `micro_manager micro-manager-config.json` from the terminal will not work, as the path in the configuration file is relative to the current working directory. See [#36](https://github.com/precice/micro-manager/issues/36) for more information.
-To run the Python solverdummies with adaptivity run the following commands in the `examples/` directory in **two different terminals**:
+To run the Python solverdummies with adaptivity, run the commands given below in the `examples/` directory in **two different terminals**.
+
+First terminal:
```bash
python macro_dummy.py
-python python-dummy/run_micro_manager.py --config micro-manager-adaptivity-config.json
+```
+
+Second terminal:
+
+```bash
+micro-manager-precice micro-manager-python-adaptivity-config.json
```
## C++
@@ -45,18 +59,32 @@ The command above compiles the C++ solverdummy and creates a shared library that
-Then, run the following commands in the `examples/` directory, in **two different terminals**:
+To run the Python solverdummies, run the commands given below in the `examples/` directory in **two different terminals**.
+
+First terminal:
```bash
python macro_dummy.py
-python cpp-dummy/run_micro_manager.py --config micro-manager-config.json
```
-To run the C++ solverdummies with adaptivity run the following commands in the `examples/` directory in **two different terminals**:
+Second terminal:
+
+```bash
+micro-manager-precice micro-manager-cpp-config.json
+```
+
+To run the C++ solverdummies with adaptivity, run the following commands in the `examples/` directory in **two different terminals**:
+
+First terminal:
```bash
python macro_dummy.py
-python cpp-dummy/run_micro_manager.py --config micro-manager-adaptivity-config.json
+```
+
+Second terminal:
+
+```bash
+micro-manager-precice micro-manager-cpp-adaptivity-config.json
```
When changing the C++ solverdummy to your own solver, make sure to change the `PYBIND11_MODULE` in `micro_cpp_dummy.cpp` to the name that you want to compile to.
diff --git a/examples/clean-example.sh b/examples/clean-example.sh
index aa44870d..d1f7cb98 100755
--- a/examples/clean-example.sh
+++ b/examples/clean-example.sh
@@ -1,5 +1,8 @@
rm -fv *.log
-rm -r -fv precice-run/
+rm -rfv precice-run/
+rm -rfv precice-profiling/
rm -fv *-events.json
rm -fv cpp-dummy/micro-manager.log
rm -fv cpp-dummy/micro_dummy.cpython-310-x86_64-linux-gnu.so
+rm -r -fv snapshot-example/output
+rm -fv snapshot-example/*.log
diff --git a/examples/cpp-dummy/.gitignore b/examples/cpp-dummy/.gitignore
index 9604e78e..e181e156 100644
--- a/examples/cpp-dummy/.gitignore
+++ b/examples/cpp-dummy/.gitignore
@@ -8,4 +8,4 @@ precice-run/
__pycache__
# Compiled files
-*.so
\ No newline at end of file
+*.so
diff --git a/examples/cpp-dummy/run_micro_manager.py b/examples/cpp-dummy/run_micro_manager.py
deleted file mode 100644
index 6c7336bd..00000000
--- a/examples/cpp-dummy/run_micro_manager.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-Script to run the Micro Manager
-"""
-
-from micro_manager import MicroManager
-from argparse import ArgumentParser
-
-parser = ArgumentParser()
-parser.add_argument("--config", required=True, help="Path to the micro manager configuration file")
-args = parser.parse_args()
-
-manager = MicroManager(args.config)
-
-manager.solve()
diff --git a/examples/macro_dummy.py b/examples/macro_dummy.py
index acb3e56d..8cf5319c 100644
--- a/examples/macro_dummy.py
+++ b/examples/macro_dummy.py
@@ -33,19 +33,27 @@ def main():
vertex_ids = interface.set_mesh_vertices(read_mesh_name, coords)
write_scalar_data = np.zeros(nv)
- write_vector_data = np.zeros((nv, interface.get_data_dimensions(write_mesh_name, "macro-vector-data")))
+ write_vector_data = np.zeros(
+ (nv, interface.get_data_dimensions(write_mesh_name, "macro-vector-data"))
+ )
for i in range(nv):
write_scalar_data[i] = i
- for d in range(interface.get_data_dimensions(write_mesh_name, "macro-vector-data")):
+ for d in range(
+ interface.get_data_dimensions(write_mesh_name, "macro-vector-data")
+ ):
write_vector_data[i, d] = i
if interface.requires_initial_data():
for name, dim in write_data_names.items():
if dim == 0:
- interface.write_data(write_mesh_name, name, vertex_ids, write_scalar_data)
+ interface.write_data(
+ write_mesh_name, name, vertex_ids, write_scalar_data
+ )
elif dim == 1:
- interface.write_data(write_mesh_name, name, vertex_ids, write_vector_data)
+ interface.write_data(
+ write_mesh_name, name, vertex_ids, write_vector_data
+ )
# initialize preCICE
interface.initialize()
@@ -61,13 +69,19 @@ def main():
for name, dim in read_data_names.items():
if dim == 0:
- read_scalar_data = interface.read_data(read_mesh_name, name, vertex_ids, 1)
+ read_scalar_data = interface.read_data(
+ read_mesh_name, name, vertex_ids, 1
+ )
elif dim == 1:
- read_vector_data = interface.read_data(read_mesh_name, name, vertex_ids, 1)
+ read_vector_data = interface.read_data(
+ read_mesh_name, name, vertex_ids, 1
+ )
write_scalar_data[:] = read_scalar_data[:]
for i in range(nv):
- for d in range(interface.get_data_dimensions(read_mesh_name, "micro-vector-data")):
+ for d in range(
+ interface.get_data_dimensions(read_mesh_name, "micro-vector-data")
+ ):
write_vector_data[i, d] = read_vector_data[i, d]
if t > 1: # to trigger adaptivity after some time
# ensure that the data is different from the previous time step
@@ -76,9 +90,13 @@ def main():
for name, dim in write_data_names.items():
if dim == 0:
- interface.write_data(write_mesh_name, name, vertex_ids, write_scalar_data)
+ interface.write_data(
+ write_mesh_name, name, vertex_ids, write_scalar_data
+ )
elif dim == 1:
- interface.write_data(write_mesh_name, name, vertex_ids, write_vector_data)
+ interface.write_data(
+ write_mesh_name, name, vertex_ids, write_vector_data
+ )
# do the coupling
interface.advance(dt)
@@ -96,5 +114,5 @@ def main():
interface.finalize()
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/examples/micro-manager-adaptivity-config.json b/examples/micro-manager-cpp-adaptivity-config.json
similarity index 77%
rename from examples/micro-manager-adaptivity-config.json
rename to examples/micro-manager-cpp-adaptivity-config.json
index 76a7fd78..89b424ec 100644
--- a/examples/micro-manager-adaptivity-config.json
+++ b/examples/micro-manager-cpp-adaptivity-config.json
@@ -1,14 +1,16 @@
{
- "micro_file_name": "micro_dummy",
+ "micro_file_name": "cpp-dummy/micro_dummy",
"coupling_params": {
- "config_file_name": "./precice-config-adaptivity.xml",
+ "config_file_name": "precice-config-adaptivity.xml",
"macro_mesh_name": "macro-mesh",
"read_data_names": {"macro-scalar-data": "scalar", "macro-vector-data": "vector"},
"write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"}
},
"simulation_params": {
+ "micro_dt": 1.0,
"macro_domain_bounds": [0.0, 25.0, 0.0, 25.0, 0.0, 25.0],
- "adaptivity": {
+ "adaptivity": "True",
+ "adaptivity_settings": {
"type": "local",
"data": ["macro-scalar-data", "macro-vector-data"],
"history_param": 0.5,
diff --git a/examples/micro-manager-config.json b/examples/micro-manager-cpp-config.json
similarity index 77%
rename from examples/micro-manager-config.json
rename to examples/micro-manager-cpp-config.json
index 4be15a61..d44f70b4 100644
--- a/examples/micro-manager-config.json
+++ b/examples/micro-manager-cpp-config.json
@@ -1,12 +1,13 @@
{
- "micro_file_name": "micro_dummy",
+ "micro_file_name": "cpp-dummy/micro_dummy",
"coupling_params": {
- "config_file_name": "./precice-config.xml",
+ "config_file_name": "precice-config.xml",
"macro_mesh_name": "macro-mesh",
"read_data_names": {"macro-scalar-data": "scalar", "macro-vector-data": "vector"},
"write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"}
},
"simulation_params": {
+ "micro_dt": 1.0,
"macro_domain_bounds": [0.0, 25.0, 0.0, 25.0, 0.0, 25.0]
},
"diagnostics": {
diff --git a/examples/micro-manager-python-adaptivity-config.json b/examples/micro-manager-python-adaptivity-config.json
new file mode 100644
index 00000000..14640faa
--- /dev/null
+++ b/examples/micro-manager-python-adaptivity-config.json
@@ -0,0 +1,25 @@
+{
+ "micro_file_name": "python-dummy/micro_dummy",
+ "coupling_params": {
+ "config_file_name": "precice-config-adaptivity.xml",
+ "macro_mesh_name": "macro-mesh",
+ "read_data_names": {"macro-scalar-data": "scalar", "macro-vector-data": "vector"},
+ "write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"}
+ },
+ "simulation_params": {
+ "micro_dt": 1.0,
+ "macro_domain_bounds": [0.0, 25.0, 0.0, 25.0, 0.0, 25.0],
+ "adaptivity": "True",
+ "adaptivity_settings": {
+ "type": "local",
+ "data": ["macro-scalar-data", "macro-vector-data"],
+ "history_param": 0.5,
+ "coarsening_constant": 0.3,
+ "refining_constant": 0.4,
+ "every_implicit_iteration": "True"
+ }
+ },
+ "diagnostics": {
+ "output_micro_sim_solve_time": "True"
+ }
+}
diff --git a/examples/micro-manager-python-config.json b/examples/micro-manager-python-config.json
new file mode 100644
index 00000000..85f84931
--- /dev/null
+++ b/examples/micro-manager-python-config.json
@@ -0,0 +1,16 @@
+{
+ "micro_file_name": "python-dummy/micro_dummy",
+ "coupling_params": {
+ "config_file_name": "precice-config.xml",
+ "macro_mesh_name": "macro-mesh",
+ "read_data_names": {"macro-scalar-data": "scalar", "macro-vector-data": "vector"},
+ "write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"}
+ },
+ "simulation_params": {
+ "micro_dt": 1.0,
+ "macro_domain_bounds": [0.0, 25.0, 0.0, 25.0, 0.0, 25.0]
+ },
+ "diagnostics": {
+ "output_micro_sim_solve_time": "True"
+ }
+}
diff --git a/examples/parameter.hdf5 b/examples/parameter.hdf5
new file mode 100644
index 00000000..6f1576e2
Binary files /dev/null and b/examples/parameter.hdf5 differ
diff --git a/examples/precice-config-adaptivity.xml b/examples/precice-config-adaptivity.xml
index 80f35c3f..1e4180b9 100644
--- a/examples/precice-config-adaptivity.xml
+++ b/examples/precice-config-adaptivity.xml
@@ -5,7 +5,7 @@
-
+
diff --git a/examples/precice-config.xml b/examples/precice-config.xml
index 9bebd403..d89f648f 100644
--- a/examples/precice-config.xml
+++ b/examples/precice-config.xml
@@ -1,55 +1,52 @@
-
-
+
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/examples/python-dummy/micro_dummy.py b/examples/python-dummy/micro_dummy.py
index eeed2984..638e2051 100644
--- a/examples/python-dummy/micro_dummy.py
+++ b/examples/python-dummy/micro_dummy.py
@@ -5,7 +5,6 @@
class MicroSimulation:
-
def __init__(self, sim_id):
"""
Constructor of MicroSimulation class.
@@ -23,8 +22,10 @@ def solve(self, macro_data, dt):
for d in range(self._dims):
self._micro_vector_data.append(macro_data["macro-vector-data"][d] + 1)
- return {"micro-scalar-data": self._micro_scalar_data.copy(),
- "micro-vector-data": self._micro_vector_data.copy()}
+ return {
+ "micro-scalar-data": self._micro_scalar_data.copy(),
+ "micro-vector-data": self._micro_vector_data.copy(),
+ }
def set_state(self, state):
self._state = state
diff --git a/examples/python-dummy/run_micro_manager.py b/examples/python-dummy/run_micro_manager.py
deleted file mode 100644
index 3cffab3a..00000000
--- a/examples/python-dummy/run_micro_manager.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-Script to run the Micro Manager
-"""
-
-from micro_manager import MicroManager
-from argparse import ArgumentParser
-
-parser = ArgumentParser()
-parser.add_argument("--config", help="Path to the micro manager configuration file")
-args = parser.parse_args()
-
-manager = MicroManager(args.config)
-
-manager.solve()
diff --git a/examples/snapshot-config.json b/examples/snapshot-config.json
new file mode 100644
index 00000000..6964122b
--- /dev/null
+++ b/examples/snapshot-config.json
@@ -0,0 +1,14 @@
+{
+ "micro_file_name": "python-dummy/micro_dummy",
+ "coupling_params": {
+ "parameter_file_name": "parameter.hdf5",
+ "read_data_names": {"macro-scalar-data": "scalar", "macro-vector-data": "vector"},
+ "write_data_names": {"micro-scalar-data": "scalar", "micro-vector-data": "vector"}
+ },
+ "simulation_params": {
+ "micro_dt": 1.0
+ },
+ "snapshot_params": {
+ "post_processing_file_name": "snapshot_postprocessing"
+ }
+}
diff --git a/examples/snapshot_postprocessing.py b/examples/snapshot_postprocessing.py
new file mode 100644
index 00000000..248cb80f
--- /dev/null
+++ b/examples/snapshot_postprocessing.py
@@ -0,0 +1,24 @@
+"""
+Post-processing
+In this script a post-processing step is defined.
+A script like this can be used to post-process the simulation output before writing it to a file,
+if this is not done in the micro simulation itself.
+"""
+
+
+class Postprocessing:
+ def postprocessing(sim_output):
+ """Post-process the simulation output.
+
+ Parameters
+ ----------
+ sim_output : dict
+ Raw simulation output.
+
+ Returns
+ -------
+ sim_output : dict
+ Post-processed simulation output.
+ """
+ sim_output["micro-scalar-data"] = sim_output["micro-scalar-data"] + 20
+ return sim_output
diff --git a/micro_manager/__init__.py b/micro_manager/__init__.py
index a4d9bf6c..255f162b 100644
--- a/micro_manager/__init__.py
+++ b/micro_manager/__init__.py
@@ -1,2 +1,42 @@
-from .micro_manager import MicroManager
+import argparse
+import os
+
from .config import Config
+from .micro_manager import MicroManagerCoupling
+
+try:
+ from .snapshot.snapshot import MicroManagerSnapshot
+
+ is_snapshot_possible = True
+except ImportError:
+ is_snapshot_possible = False
+
+
+def main():
+
+ parser = argparse.ArgumentParser(description=".")
+ parser.add_argument(
+ "config_file", type=str, help="Path to the JSON config file of the manager."
+ )
+ parser.add_argument(
+ "--snapshot", action="store_true", help="compute offline snapshot database"
+ )
+
+ args = parser.parse_args()
+ config_file_path = args.config_file
+ if not os.path.isabs(config_file_path):
+ config_file_path = os.getcwd() + "/" + config_file_path
+
+ if not args.snapshot:
+ manager = MicroManagerCoupling(config_file_path)
+ else:
+ if is_snapshot_possible:
+ manager = MicroManagerSnapshot(config_file_path)
+ else:
+ raise ImportError(
+ "The Micro Manager snapshot computation requires the h5py package."
+ )
+
+ manager.initialize()
+
+ manager.solve()
diff --git a/micro_manager/__main__.py b/micro_manager/__main__.py
new file mode 100644
index 00000000..88607486
--- /dev/null
+++ b/micro_manager/__main__.py
@@ -0,0 +1,3 @@
+from micro_manager import main
+
+main()
diff --git a/micro_manager/adaptivity/adaptivity.py b/micro_manager/adaptivity/adaptivity.py
index ae2b9411..b1240505 100644
--- a/micro_manager/adaptivity/adaptivity.py
+++ b/micro_manager/adaptivity/adaptivity.py
@@ -2,11 +2,12 @@
Functionality for adaptive initialization and control of micro simulations
"""
import sys
-import numpy as np
from math import exp
from typing import Callable
from warnings import warn
+import numpy as np
+
class AdaptivityCalculator:
def __init__(self, configurator, logger) -> None:
@@ -30,9 +31,13 @@ def __init__(self, configurator, logger) -> None:
self._coarse_tol = 0.0
self._ref_tol = 0.0
- self._similarity_measure = self._get_similarity_measure(configurator.get_adaptivity_similarity_measure())
+ self._similarity_measure = self._get_similarity_measure(
+ configurator.get_adaptivity_similarity_measure()
+ )
- def _get_similarity_dists(self, dt: float, similarity_dists: np.ndarray, data: dict) -> np.ndarray:
+ def _get_similarity_dists(
+ self, dt: float, similarity_dists: np.ndarray, data: dict
+ ) -> np.ndarray:
"""
Calculate metric which determines if two micro simulations are similar enough to have one of them deactivated.
@@ -56,7 +61,7 @@ def _get_similarity_dists(self, dt: float, similarity_dists: np.ndarray, data: d
for name in data.keys():
data_vals = data[name]
if data_vals.ndim == 1:
- # If the adaptivity-data is a scalar for each simulation,
+ # If the adaptivity data is a scalar for each simulation,
# expand the dimension to make it a 2D array to unify the calculation.
# The axis is later reduced with a norm.
data_vals = np.expand_dims(data_vals, axis=1)
@@ -66,9 +71,8 @@ def _get_similarity_dists(self, dt: float, similarity_dists: np.ndarray, data: d
return exp(-self._hist_param * dt) * _similarity_dists + dt * data_diff
def _update_active_sims(
- self,
- similarity_dists: np.ndarray,
- is_sim_active: np.ndarray) -> np.ndarray:
+ self, similarity_dists: np.ndarray, is_sim_active: np.ndarray
+ ) -> np.ndarray:
"""
Update set of active micro simulations. Active micro simulations are compared to each other
and if found similar, one of them is deactivated.
@@ -88,12 +92,18 @@ def _update_active_sims(
max_similarity_dist = np.amax(similarity_dists)
if max_similarity_dist == 0.0:
- warn("All similarity distances are zero, probably because all the data for adaptivity is the same. Coarsening tolerance will be manually set to minimum float number.")
+ warn(
+ "All similarity distances are zero, probably because all the data for adaptivity is the same. Coarsening tolerance will be manually set to minimum float number."
+ )
self._coarse_tol = sys.float_info.min
else:
- self._coarse_tol = self._coarse_const * self._refine_const * max_similarity_dist
+ self._coarse_tol = (
+ self._coarse_const * self._refine_const * max_similarity_dist
+ )
- _is_sim_active = np.copy(is_sim_active) # Input is_sim_active is not longer used after this point
+ _is_sim_active = np.copy(
+ is_sim_active
+ ) # Input is_sim_active is not longer used after this point
# Update the set of active micro sims
for i in range(_is_sim_active.size):
@@ -104,10 +114,11 @@ def _update_active_sims(
return _is_sim_active
def _associate_inactive_to_active(
- self,
- similarity_dists: np.ndarray,
- is_sim_active: np.ndarray,
- sim_is_associated_to: np.ndarray) -> np.ndarray:
+ self,
+ similarity_dists: np.ndarray,
+ is_sim_active: np.ndarray,
+ sim_is_associated_to: np.ndarray,
+ ) -> np.ndarray:
"""
Associate inactive micro simulations to most similar active micro simulation.
@@ -144,10 +155,8 @@ def _associate_inactive_to_active(
return _sim_is_associated_to
def _check_for_activation(
- self,
- inactive_id: int,
- similarity_dists: np.ndarray,
- is_sim_active: np.ndarray) -> bool:
+ self, inactive_id: int, similarity_dists: np.ndarray, is_sim_active: np.ndarray
+ ) -> bool:
"""
Check if an inactive simulation needs to be activated.
@@ -173,10 +182,8 @@ def _check_for_activation(
return min(dists) > self._ref_tol
def _check_for_deactivation(
- self,
- active_id: int,
- similarity_dists: np.ndarray,
- is_sim_active: np.ndarray) -> bool:
+ self, active_id: int, similarity_dists: np.ndarray, is_sim_active: np.ndarray
+ ) -> bool:
"""
Check if an active simulation needs to be deactivated.
@@ -203,7 +210,9 @@ def _check_for_deactivation(
return True
return False
- def _get_similarity_measure(self, similarity_measure: str) -> Callable[[np.ndarray], np.ndarray]:
+ def _get_similarity_measure(
+ self, similarity_measure: str
+ ) -> Callable[[np.ndarray], np.ndarray]:
"""
Get similarity measure to be used for similarity calculation
@@ -217,17 +226,18 @@ def _get_similarity_measure(self, similarity_measure: str) -> Callable[[np.ndarr
similarity_measure : function
Function to be used for similarity calculation. Takes data as input and returns similarity measure
"""
- if similarity_measure == 'L1':
+ if similarity_measure == "L1":
return self._l1
- elif similarity_measure == 'L2':
+ elif similarity_measure == "L2":
return self._l2
- elif similarity_measure == 'L1rel':
+ elif similarity_measure == "L1rel":
return self._l1rel
- elif similarity_measure == 'L2rel':
+ elif similarity_measure == "L2rel":
return self._l2rel
else:
raise ValueError(
- 'Similarity measure not supported. Currently supported similarity measures are "L1", "L2", "L1rel", "L2rel".')
+ 'Similarity measure not supported. Currently supported similarity measures are "L1", "L2", "L1rel", "L2rel".'
+ )
def _l1(self, data: np.ndarray) -> np.ndarray:
"""
@@ -264,7 +274,7 @@ def _l2(self, data: np.ndarray) -> np.ndarray:
def _l1rel(self, data: np.ndarray) -> np.ndarray:
"""
Calculate L1 norm of relative difference of data.
- The relative difference is calculated by dividing the difference of two data points by the maximum of the two data points.
+ The relative difference is calculated by dividing the difference of two data points by the maximum of the absolute value of the two data points.
Parameters
----------
@@ -278,14 +288,21 @@ def _l1rel(self, data: np.ndarray) -> np.ndarray:
"""
pointwise_diff = data[np.newaxis, :] - data[:, np.newaxis]
# divide by data to get relative difference
- # divide i,j by max(data[i],data[j]) to get relative difference
- relative = np.nan_to_num((pointwise_diff / np.maximum(data[np.newaxis, :], data[:, np.newaxis])))
+ # divide i,j by max(abs(data[i]),abs(data[j])) to get relative difference
+ relative = np.nan_to_num(
+ (
+ pointwise_diff
+ / np.maximum(
+ np.absolute(data[np.newaxis, :]), np.absolute(data[:, np.newaxis])
+ )
+ )
+ )
return np.linalg.norm(relative, ord=1, axis=-1)
def _l2rel(self, data: np.ndarray) -> np.ndarray:
"""
Calculate L2 norm of relative difference of data.
- The relative difference is calculated by dividing the difference of two data points by the maximum of the two data points.
+ The relative difference is calculated by dividing the difference of two data points by the maximum of the absolute value of the two data points.
Parameters
----------
@@ -299,6 +316,13 @@ def _l2rel(self, data: np.ndarray) -> np.ndarray:
"""
pointwise_diff = data[np.newaxis, :] - data[:, np.newaxis]
# divide by data to get relative difference
- # divide i,j by max(data[i],data[j]) to get relative difference
- relative = np.nan_to_num((pointwise_diff / np.maximum(data[np.newaxis, :], data[:, np.newaxis])))
+ # divide i,j by max(abs(data[i]),abs(data[j])) to get relative difference
+ relative = np.nan_to_num(
+ (
+ pointwise_diff
+ / np.maximum(
+ np.absolute(data[np.newaxis, :]), np.absolute(data[:, np.newaxis])
+ )
+ )
+ )
return np.linalg.norm(relative, ord=2, axis=-1)
diff --git a/micro_manager/adaptivity/global_adaptivity.py b/micro_manager/adaptivity/global_adaptivity.py
index 155853a7..90690ec7 100644
--- a/micro_manager/adaptivity/global_adaptivity.py
+++ b/micro_manager/adaptivity/global_adaptivity.py
@@ -5,23 +5,26 @@
Note: All ID variables used in the methods of this class are global IDs, unless they have *local* in their name.
"""
-import numpy as np
import hashlib
from copy import deepcopy
-from mpi4py import MPI
from typing import Dict
+
+import numpy as np
+from mpi4py import MPI
+
from .adaptivity import AdaptivityCalculator
class GlobalAdaptivityCalculator(AdaptivityCalculator):
def __init__(
- self,
- configurator,
- logger,
- global_number_of_sims: float,
- global_ids: list,
- rank: int,
- comm) -> None:
+ self,
+ configurator,
+ logger,
+ global_number_of_sims: float,
+ global_ids: list,
+ rank: int,
+ comm,
+ ) -> None:
"""
Class constructor.
@@ -52,7 +55,9 @@ def __init__(
for i in range(local_number_of_sims):
micro_sims_on_this_rank[i] = self._rank
- self._rank_of_sim = np.zeros(global_number_of_sims, dtype=np.intc) # DECLARATION
+ self._rank_of_sim = np.zeros(
+ global_number_of_sims, dtype=np.intc
+ ) # DECLARATION
self._comm.Allgatherv(micro_sims_on_this_rank, self._rank_of_sim)
@@ -62,13 +67,14 @@ def __init__(
self._is_sim_on_this_rank[i] = True
def compute_adaptivity(
- self,
- dt: float,
- micro_sims: list,
- similarity_dists_nm1: np.ndarray,
- is_sim_active_nm1: np.ndarray,
- sim_is_associated_to_nm1: np.ndarray,
- data_for_adaptivity: dict) -> tuple:
+ self,
+ dt: float,
+ micro_sims: list,
+ similarity_dists_nm1: np.ndarray,
+ is_sim_active_nm1: np.ndarray,
+ sim_is_associated_to_nm1: np.ndarray,
+ data_for_adaptivity: dict,
+ ) -> tuple:
"""
Compute adaptivity globally based on similarity distances and micro simulation states
@@ -98,7 +104,9 @@ def compute_adaptivity(
if name not in self._adaptivity_data_names:
raise ValueError(
"Data for adaptivity must be one of the following: {}".format(
- self._adaptivity_data_names.keys()))
+ self._adaptivity_data_names.keys()
+ )
+ )
# Gather adaptivity data from all ranks
global_data_for_adaptivity = dict()
@@ -106,29 +114,39 @@ def compute_adaptivity(
data_as_list = self._comm.allgather(data_for_adaptivity[name])
global_data_for_adaptivity[name] = np.concatenate((data_as_list[:]), axis=0)
- similarity_dists = self._get_similarity_dists(dt, similarity_dists_nm1, global_data_for_adaptivity)
+ similarity_dists = self._get_similarity_dists(
+ dt, similarity_dists_nm1, global_data_for_adaptivity
+ )
is_sim_active = self._update_active_sims(similarity_dists, is_sim_active_nm1)
is_sim_active, sim_is_associated_to = self._update_inactive_sims(
- similarity_dists, is_sim_active, sim_is_associated_to_nm1, micro_sims)
+ similarity_dists, is_sim_active, sim_is_associated_to_nm1, micro_sims
+ )
sim_is_associated_to = self._associate_inactive_to_active(
- similarity_dists, is_sim_active, sim_is_associated_to)
+ similarity_dists, is_sim_active, sim_is_associated_to
+ )
self._logger.info(
"{} active simulations, {} inactive simulations".format(
np.count_nonzero(
- is_sim_active[self._global_ids[0]:self._global_ids[-1] + 1]),
+ is_sim_active[self._global_ids[0] : self._global_ids[-1] + 1]
+ ),
np.count_nonzero(
- is_sim_active[self._global_ids[0]:self._global_ids[-1] + 1] == False)))
+ is_sim_active[self._global_ids[0] : self._global_ids[-1] + 1]
+ == False
+ ),
+ )
+ )
return similarity_dists, is_sim_active, sim_is_associated_to
def communicate_micro_output(
- self,
- is_sim_active: np.ndarray,
- sim_is_associated_to: np.ndarray,
- micro_output: list) -> None:
+ self,
+ is_sim_active: np.ndarray,
+ sim_is_associated_to: np.ndarray,
+ micro_output: list,
+ ) -> None:
"""
Communicate micro output from active simulation to their associated inactive simulations.
Process to process (p2p) communication is done.
@@ -144,9 +162,13 @@ def communicate_micro_output(
micro_output : list
List of dicts having individual output of each simulation. Only the active simulation outputs are entered.
"""
- inactive_local_ids = np.where(is_sim_active[self._global_ids[0]:self._global_ids[-1] + 1] == False)[0]
+ inactive_local_ids = np.where(
+ is_sim_active[self._global_ids[0] : self._global_ids[-1] + 1] == False
+ )[0]
- local_sim_is_associated_to = sim_is_associated_to[self._global_ids[0]:self._global_ids[-1] + 1]
+ local_sim_is_associated_to = sim_is_associated_to[
+ self._global_ids[0] : self._global_ids[-1] + 1
+ ]
# Keys are global IDs of active simulations associated to inactive
# simulations on this rank. Values are global IDs of the inactive
@@ -162,7 +184,9 @@ def communicate_micro_output(
else:
active_to_inactive_map[assoc_active_id] = [i]
else: # If associated active simulation is on this rank, copy the output directly
- micro_output[i] = deepcopy(micro_output[self._global_ids.index(assoc_active_id)])
+ micro_output[i] = deepcopy(
+ micro_output[self._global_ids.index(assoc_active_id)]
+ )
assoc_active_ids = list(active_to_inactive_map.keys())
@@ -175,11 +199,12 @@ def communicate_micro_output(
micro_output[local_id] = deepcopy(output)
def _update_inactive_sims(
- self,
- similarity_dists: np.ndarray,
- is_sim_active: np.ndarray,
- sim_is_associated_to: np.ndarray,
- micro_sims: list) -> tuple:
+ self,
+ similarity_dists: np.ndarray,
+ is_sim_active: np.ndarray,
+ sim_is_associated_to: np.ndarray,
+ micro_sims: list,
+ ) -> tuple:
"""
Update set of inactive micro simulations. Each inactive micro simulation is compared to all active ones
and if it is not similar to any of them, it is activated.
@@ -204,7 +229,9 @@ def _update_inactive_sims(
"""
self._ref_tol = self._refine_const * np.amax(similarity_dists)
- _is_sim_active = np.copy(is_sim_active) # Input is_sim_active is not longer used after this point
+ _is_sim_active = np.copy(
+ is_sim_active
+ ) # Input is_sim_active is not longer used after this point
_sim_is_associated_to = np.copy(sim_is_associated_to)
_sim_is_associated_to_updated = np.copy(sim_is_associated_to)
@@ -214,11 +241,15 @@ def _update_inactive_sims(
if not _is_sim_active[i]: # if id is inactive
if self._check_for_activation(i, similarity_dists, _is_sim_active):
_is_sim_active[i] = True
- _sim_is_associated_to_updated[i] = -2 # Active sim cannot have an associated sim
+ _sim_is_associated_to_updated[
+ i
+ ] = -2 # Active sim cannot have an associated sim
if self._is_sim_on_this_rank[i]:
to_be_activated_ids.append(i)
- local_sim_is_associated_to = _sim_is_associated_to[self._global_ids[0]:self._global_ids[-1] + 1]
+ local_sim_is_associated_to = _sim_is_associated_to[
+ self._global_ids[0] : self._global_ids[-1] + 1
+ ]
# Keys are global IDs of active sims not on this rank, values are lists of local and
# global IDs of inactive sims associated to the active sims which are on this rank
@@ -230,20 +261,30 @@ def _update_inactive_sims(
to_be_activated_local_id = self._global_ids.index(i)
assoc_active_id = local_sim_is_associated_to[to_be_activated_local_id]
- if self._is_sim_on_this_rank[assoc_active_id]: # Associated active simulation is on the same rank
+ if self._is_sim_on_this_rank[
+ assoc_active_id
+ ]: # Associated active simulation is on the same rank
assoc_active_local_id = self._global_ids.index(assoc_active_id)
- micro_sims[to_be_activated_local_id].set_state(micro_sims[assoc_active_local_id].get_state())
+ micro_sims[to_be_activated_local_id].set_state(
+ micro_sims[assoc_active_local_id].get_state()
+ )
else: # Associated active simulation is not on this rank
if assoc_active_id in to_be_activated_map:
- to_be_activated_map[assoc_active_id].append(to_be_activated_local_id)
+ to_be_activated_map[assoc_active_id].append(
+ to_be_activated_local_id
+ )
else:
- to_be_activated_map[assoc_active_id] = [to_be_activated_local_id]
+ to_be_activated_map[assoc_active_id] = [
+ to_be_activated_local_id
+ ]
sim_states_and_global_ids = []
for sim in micro_sims:
sim_states_and_global_ids.append((sim.get_state(), sim.get_global_id()))
- recv_reqs = self._p2p_comm(list(to_be_activated_map.keys()), sim_states_and_global_ids)
+ recv_reqs = self._p2p_comm(
+ list(to_be_activated_map.keys()), sim_states_and_global_ids
+ )
# Use received micro sims to activate the required simulations
for req in recv_reqs:
@@ -273,7 +314,9 @@ def _create_tag(self, sim_id: int, src_rank: int, dest_rank: int) -> int:
Unique tag.
"""
send_hashtag = hashlib.sha256()
- send_hashtag.update((str(src_rank) + str(sim_id) + str(dest_rank)).encode('utf-8'))
+ send_hashtag.update(
+ (str(src_rank) + str(sim_id) + str(dest_rank)).encode("utf-8")
+ )
tag = int(send_hashtag.hexdigest()[:6], base=16)
return tag
@@ -294,9 +337,17 @@ def _p2p_comm(self, assoc_active_ids: list, data: list) -> list:
recv_reqs : list
List of MPI requests of receive operations.
"""
- send_map_local: Dict[int, int] = dict() # keys are global IDs, values are rank to send to
- send_map: Dict[int, list] = dict() # keys are global IDs of sims to send, values are ranks to send the sims to
- recv_map: Dict[int, int] = dict() # keys are global IDs to receive, values are ranks to receive from
+ send_map_local: Dict[
+ int, int
+ ] = dict() # keys are global IDs, values are rank to send to
+ send_map: Dict[
+ int, list
+ ] = (
+ dict()
+ ) # keys are global IDs of sims to send, values are ranks to send the sims to
+ recv_map: Dict[
+ int, int
+ ] = dict() # keys are global IDs to receive, values are ranks to receive from
for i in assoc_active_ids:
# Add simulation and its rank to receive map
@@ -328,7 +379,9 @@ def _p2p_comm(self, assoc_active_ids: list, data: list) -> list:
recv_reqs = []
for global_id, recv_rank in recv_map.items():
tag = self._create_tag(global_id, recv_rank, self._rank)
- bufsize = 1 << 30 # allocate and use a temporary 1 MiB buffer size https://github.com/mpi4py/mpi4py/issues/389
+ bufsize = (
+ 1 << 30
+ ) # allocate and use a temporary 1 MiB buffer size https://github.com/mpi4py/mpi4py/issues/389
req = self._comm.irecv(bufsize, source=recv_rank, tag=tag)
recv_reqs.append(req)
diff --git a/micro_manager/adaptivity/local_adaptivity.py b/micro_manager/adaptivity/local_adaptivity.py
index 3fc45c2a..a2defb8c 100644
--- a/micro_manager/adaptivity/local_adaptivity.py
+++ b/micro_manager/adaptivity/local_adaptivity.py
@@ -4,6 +4,7 @@
each other. A global comparison is not done.
"""
import numpy as np
+
from .adaptivity import AdaptivityCalculator
@@ -22,13 +23,14 @@ def __init__(self, configurator, logger) -> None:
super().__init__(configurator, logger)
def compute_adaptivity(
- self,
- dt,
- micro_sims,
- similarity_dists_nm1: np.ndarray,
- is_sim_active_nm1: np.ndarray,
- sim_is_associated_to_nm1: np.ndarray,
- data_for_adaptivity: dict) -> tuple:
+ self,
+ dt,
+ micro_sims,
+ similarity_dists_nm1: np.ndarray,
+ is_sim_active_nm1: np.ndarray,
+ sim_is_associated_to_nm1: np.ndarray,
+ data_for_adaptivity: dict,
+ ) -> tuple:
"""
Compute adaptivity locally (within a rank).
@@ -59,31 +61,41 @@ def compute_adaptivity(
if name not in self._adaptivity_data_names:
raise ValueError(
"Data for adaptivity must be one of the following: {}".format(
- self._adaptivity_data_names.keys()))
+ self._adaptivity_data_names.keys()
+ )
+ )
- similarity_dists = self._get_similarity_dists(dt, similarity_dists_nm1, data_for_adaptivity)
+ similarity_dists = self._get_similarity_dists(
+ dt, similarity_dists_nm1, data_for_adaptivity
+ )
# Operation done globally if global adaptivity is chosen
is_sim_active = self._update_active_sims(similarity_dists, is_sim_active_nm1)
is_sim_active, sim_is_associated_to = self._update_inactive_sims(
- similarity_dists, is_sim_active, sim_is_associated_to_nm1, micro_sims)
+ similarity_dists, is_sim_active, sim_is_associated_to_nm1, micro_sims
+ )
sim_is_associated_to = self._associate_inactive_to_active(
- similarity_dists, is_sim_active, sim_is_associated_to)
+ similarity_dists, is_sim_active, sim_is_associated_to
+ )
self._logger.info(
"{} active simulations, {} inactive simulations".format(
- np.count_nonzero(is_sim_active), np.count_nonzero(is_sim_active == False)))
+ np.count_nonzero(is_sim_active),
+ np.count_nonzero(is_sim_active == False),
+ )
+ )
return similarity_dists, is_sim_active, sim_is_associated_to
def _update_inactive_sims(
- self,
- similarity_dists: np.ndarray,
- is_sim_active: np.ndarray,
- sim_is_associated_to: np.ndarray,
- micro_sims: list) -> tuple:
+ self,
+ similarity_dists: np.ndarray,
+ is_sim_active: np.ndarray,
+ sim_is_associated_to: np.ndarray,
+ micro_sims: list,
+ ) -> tuple:
"""
Update set of inactive micro simulations. Each inactive micro simulation is compared to all active ones
and if it is not similar to any of them, it is activated.
@@ -108,7 +120,9 @@ def _update_inactive_sims(
"""
self._ref_tol = self._refine_const * np.amax(similarity_dists)
- _is_sim_active = np.copy(is_sim_active) # Input is_sim_active is not longer used after this point
+ _is_sim_active = np.copy(
+ is_sim_active
+ ) # Input is_sim_active is not longer used after this point
_sim_is_associated_to = np.copy(sim_is_associated_to)
# Update the set of inactive micro sims
@@ -116,8 +130,12 @@ def _update_inactive_sims(
if not _is_sim_active[i]: # if id is inactive
if self._check_for_activation(i, similarity_dists, _is_sim_active):
associated_active_local_id = _sim_is_associated_to[i]
- micro_sims[i].set_state(micro_sims[associated_active_local_id].get_state())
+ micro_sims[i].set_state(
+ micro_sims[associated_active_local_id].get_state()
+ )
_is_sim_active[i] = True
- _sim_is_associated_to[i] = -2 # Active sim cannot have an associated sim
+ _sim_is_associated_to[
+ i
+ ] = -2 # Active sim cannot have an associated sim
return _is_sim_active, _sim_is_associated_to
diff --git a/micro_manager/config.py b/micro_manager/config.py
index 897d95f0..b6eb7cd4 100644
--- a/micro_manager/config.py
+++ b/micro_manager/config.py
@@ -30,6 +30,7 @@ def __init__(self, logger, config_filename):
self._macro_mesh_name = None
self._read_data_names = dict()
self._write_data_names = dict()
+ self._micro_dt = None
self._macro_domain_bounds = None
self._ranks_per_axis = None
@@ -38,6 +39,8 @@ def __init__(self, logger, config_filename):
self._output_micro_sim_time = False
+ self._interpolate_crash = False
+
self._adaptivity = False
self._adaptivity_type = "local"
self._data_for_adaptivity = dict()
@@ -47,100 +50,176 @@ def __init__(self, logger, config_filename):
self._adaptivity_every_implicit_iteration = False
self._adaptivity_similarity_measure = "L1"
+ # Snapshot information
+ self._parameter_file_name = None
+ self._postprocessing_file_name = None
+ self._initialize_once = False
+
+ self._output_micro_sim_time = False
+
self.read_json(config_filename)
def read_json(self, config_filename):
"""
- Reads JSON adapter configuration file and saves the data to the respective instance attributes.
+ Reads JSON configuration file.
Parameters
----------
config_filename : string
Name of the JSON configuration file
"""
- folder = os.path.dirname(os.path.join(os.getcwd(), config_filename))
- path = os.path.join(folder, os.path.basename(config_filename))
+ self._folder = os.path.dirname(os.path.join(os.getcwd(), config_filename))
+ path = os.path.join(self._folder, os.path.basename(config_filename))
with open(path, "r") as read_file:
- data = json.load(read_file)
+ self._data = json.load(read_file)
# convert paths to python-importable paths
- self._micro_file_name = data["micro_file_name"].replace("/", ".").replace("\\", ".").replace(".py", "")
-
- self._config_file_name = os.path.join(folder, data["coupling_params"]["config_file_name"])
- self._macro_mesh_name = data["coupling_params"]["macro_mesh_name"]
+ self._micro_file_name = (
+ self._data["micro_file_name"]
+ .replace("/", ".")
+ .replace("\\", ".")
+ .replace(".py", "")
+ )
try:
- self._write_data_names = data["coupling_params"]["write_data_names"]
- assert isinstance(self._write_data_names, dict), "Write data entry is not a dictionary"
+ self._write_data_names = self._data["coupling_params"]["write_data_names"]
+ assert isinstance(
+ self._write_data_names, dict
+ ), "Write data entry is not a dictionary"
for key, value in self._write_data_names.items():
if value == "scalar":
self._write_data_names[key] = False
elif value == "vector":
self._write_data_names[key] = True
else:
- raise Exception("Write data dictionary as a value other than 'scalar' or 'vector'")
+ raise Exception(
+ "Write data dictionary as a value other than 'scalar' or 'vector'"
+ )
except BaseException:
- self._logger.info("No write data names provided. Micro manager will only read data from preCICE.")
+ self._logger.info(
+ "No write data names provided. Micro manager will only read data from preCICE."
+ )
try:
- self._read_data_names = data["coupling_params"]["read_data_names"]
- assert isinstance(self._read_data_names, dict), "Read data entry is not a dictionary"
+ self._read_data_names = self._data["coupling_params"]["read_data_names"]
+ assert isinstance(
+ self._read_data_names, dict
+ ), "Read data entry is not a dictionary"
for key, value in self._read_data_names.items():
if value == "scalar":
self._read_data_names[key] = False
elif value == "vector":
self._read_data_names[key] = True
else:
- raise Exception("Read data dictionary as a value other than 'scalar' or 'vector'")
+ raise Exception(
+ "Read data dictionary as a value other than 'scalar' or 'vector'"
+ )
+ except BaseException:
+ self._logger.info(
+ "No read data names provided. Micro manager will only write data to preCICE."
+ )
+
+ self._micro_dt = self._data["simulation_params"]["micro_dt"]
+
+ try:
+ if self._data["diagnostics"]["output_micro_sim_solve_time"]:
+ self._output_micro_sim_time = True
+ self._write_data_names["micro_sim_time"] = False
except BaseException:
- self._logger.info("No read data names provided. Micro manager will only write data to preCICE.")
+ self._logger.info(
+ "Micro manager will not output time required to solve each micro simulation in each time step."
+ )
- self._macro_domain_bounds = data["simulation_params"]["macro_domain_bounds"]
+ def read_json_micro_manager(self):
+ """
+ Reads Micro Manager relevant information from JSON configuration file
+ and saves the data to the respective instance attributes.
+ """
+ self._config_file_name = os.path.join(
+ self._folder, self._data["coupling_params"]["config_file_name"]
+ )
+ self._macro_mesh_name = self._data["coupling_params"]["macro_mesh_name"]
+
+ self._macro_domain_bounds = self._data["simulation_params"][
+ "macro_domain_bounds"
+ ]
try:
- self._ranks_per_axis = data["simulation_params"]["decomposition"]
+ self._ranks_per_axis = self._data["simulation_params"]["decomposition"]
except BaseException:
self._logger.info(
- "Domain decomposition is not specified, so the Micro Manager will expect to be run in serial.")
+ "Domain decomposition is not specified, so the Micro Manager will expect to be run in serial."
+ )
try:
- if data["simulation_params"]["adaptivity"]:
+ if self._data["simulation_params"]["adaptivity"] == "True":
self._adaptivity = True
+ if not self._data["simulation_params"]["adaptivity_settings"]:
+ raise Exception(
+ "Adaptivity is turned on but no adaptivity settings are provided."
+ )
else:
self._adaptivity = False
+ if self._data["simulation_params"]["adaptivity_settings"]:
+ raise Exception(
+ "Adaptivity settings are provided but adaptivity is turned off."
+ )
except BaseException:
self._logger.info(
- "Micro Manager will not adaptively run micro simulations, but instead will run all micro simulations in all time steps.")
+ "Micro Manager will not adaptively run micro simulations, but instead will run all micro simulations."
+ )
if self._adaptivity:
- if data["simulation_params"]["adaptivity"]["type"] == "local":
+ if (
+ self._data["simulation_params"]["adaptivity_settings"]["type"]
+ == "local"
+ ):
self._adaptivity_type = "local"
- elif data["simulation_params"]["adaptivity"]["type"] == "global":
+ elif (
+ self._data["simulation_params"]["adaptivity_settings"]["type"]
+ == "global"
+ ):
self._adaptivity_type = "global"
else:
raise Exception("Adaptivity type can be either local or global.")
exchange_data = {**self._read_data_names, **self._write_data_names}
- for dname in data["simulation_params"]["adaptivity"]["data"]:
+ for dname in self._data["simulation_params"]["adaptivity_settings"]["data"]:
self._data_for_adaptivity[dname] = exchange_data[dname]
if self._data_for_adaptivity.keys() == self._write_data_names.keys():
warn(
"Only micro simulation data is used for similarity computation in adaptivity. This would lead to the"
" same set of active and inactive simulations for the entire simulation time. If this is not intended,"
- " please include macro simulation data as well.")
-
- self._adaptivity_history_param = data["simulation_params"]["adaptivity"]["history_param"]
- self._adaptivity_coarsening_constant = data["simulation_params"]["adaptivity"]["coarsening_constant"]
- self._adaptivity_refining_constant = data["simulation_params"]["adaptivity"]["refining_constant"]
-
- if "similarity_measure" in data["simulation_params"]["adaptivity"]:
- self._adaptivity_similarity_measure = data["simulation_params"]["adaptivity"]["similarity_measure"]
+ " please include macro simulation data as well."
+ )
+
+ self._adaptivity_history_param = self._data["simulation_params"][
+ "adaptivity_settings"
+ ]["history_param"]
+ self._adaptivity_coarsening_constant = self._data["simulation_params"][
+ "adaptivity_settings"
+ ]["coarsening_constant"]
+ self._adaptivity_refining_constant = self._data["simulation_params"][
+ "adaptivity_settings"
+ ]["refining_constant"]
+
+ if (
+ "similarity_measure"
+ in self._data["simulation_params"]["adaptivity_settings"]
+ ):
+ self._adaptivity_similarity_measure = self._data["simulation_params"][
+ "adaptivity_settings"
+ ]["similarity_measure"]
else:
- self._logger.info("No similarity measure provided, using L1 norm as default")
+ self._logger.info(
+ "No similarity measure provided, using L1 norm as default"
+ )
self._adaptivity_similarity_measure = "L1"
- adaptivity_every_implicit_iteration = data["simulation_params"]["adaptivity"]["every_implicit_iteration"]
+ adaptivity_every_implicit_iteration = self._data["simulation_params"][
+ "adaptivity_settings"
+ ]["every_implicit_iteration"]
if adaptivity_every_implicit_iteration == "True":
self._adaptivity_every_implicit_iteration = True
@@ -148,38 +227,88 @@ def read_json(self, config_filename):
self._adaptivity_every_implicit_iteration = False
if not self._adaptivity_every_implicit_iteration:
- self._logger.info("Micro Manager will compute adaptivity once at the start of every time window")
+ self._logger.info(
+ "Micro Manager will compute adaptivity once at the start of every time window"
+ )
self._write_data_names["active_state"] = False
self._write_data_names["active_steps"] = False
+ if "interpolate_crash" in self._data["simulation_params"]:
+ if self._data["simulation_params"]["interpolate_crash"] == "True":
+ self._interpolate_crash = True
+
try:
- diagnostics_data_names = data["diagnostics"]["data_from_micro_sims"]
- assert isinstance(diagnostics_data_names, dict), "Diagnostics data is not a dictionary"
+ diagnostics_data_names = self._data["diagnostics"]["data_from_micro_sims"]
+ assert isinstance(
+ diagnostics_data_names, dict
+ ), "Diagnostics data is not a dictionary"
for key, value in diagnostics_data_names.items():
if value == "scalar":
self._write_data_names[key] = False
elif value == "vector":
self._write_data_names[key] = True
else:
- raise Exception("Diagnostics data dictionary as a value other than 'scalar' or 'vector'")
+ raise Exception(
+ "Diagnostics data dictionary as a value other than 'scalar' or 'vector'"
+ )
except BaseException:
- self._logger.info("No diagnostics data is defined. Micro Manager will not output any diagnostics data.")
+ self._logger.info(
+ "No diagnostics data is defined. Micro Manager will not output any diagnostics data."
+ )
try:
- self._micro_output_n = data["diagnostics"]["micro_output_n"]
+ self._micro_output_n = self._data["diagnostics"]["micro_output_n"]
except BaseException:
self._logger.info(
"Output interval of micro simulations not specified, if output is available then it will be called "
- "in every time window.")
+ "in every time window."
+ )
+
+ def read_json_snapshot(self):
+ self._parameter_file_name = os.path.join(
+ self._folder, self._data["coupling_params"]["parameter_file_name"]
+ )
try:
- if data["diagnostics"]["output_micro_sim_solve_time"]:
- self._output_micro_sim_time = True
- self._write_data_names["micro_sim_time"] = False
+ self._postprocessing_file_name = (
+ self._data["snapshot_params"]["post_processing_file_name"]
+ .replace("/", ".")
+ .replace("\\", ".")
+ .replace(".py", "")
+ )
except BaseException:
self._logger.info(
- "Micro manager will not output time required to solve each micro simulation in each time step.")
+ "No post-processing file name provided. Snapshot computation will not perform any post-processing."
+ )
+ self._postprocessing_file_name = None
+
+ try:
+ diagnostics_data_names = self._data["diagnostics"]["data_from_micro_sims"]
+ assert isinstance(
+ diagnostics_data_names, dict
+ ), "Diagnostics data is not a dictionary"
+ for key, value in diagnostics_data_names.items():
+ if value == "scalar":
+ self._write_data_names[key] = False
+ elif value == "vector":
+ self._write_data_names[key] = True
+ else:
+ raise Exception(
+ "Diagnostics data dictionary has a value other than 'scalar' or 'vector'"
+ )
+ except BaseException:
+ self._logger.info(
+ "No diagnostics data is defined. Snapshot computation will not output any diagnostics data."
+ )
+
+ try:
+ if self._data["snapshot_params"]["initialize_once"] == "True":
+ self._initialize_once = True
+ except BaseException:
+ self._logger.info(
+ "For each snapshot a new micro simulation object will be created"
+ )
def get_config_file_name(self):
"""
@@ -379,3 +508,59 @@ def is_adaptivity_required_in_every_implicit_iteration(self):
True if adaptivity needs to be calculated in every time iteration, False otherwise.
"""
return self._adaptivity_every_implicit_iteration
+
+ def get_micro_dt(self):
+ """
+ Get the size of the micro time window.
+
+ Returns
+ -------
+ micro_time_window : float
+ Size of the micro time window.
+ """
+ return self._micro_dt
+
+ def get_parameter_file_name(self):
+ """
+ Get the name of the parameter file.
+
+ Returns
+ -------
+ parameter_file_name : string
+ Name of the hdf5 file containing the macro parameters.
+ """
+
+ return self._parameter_file_name
+
+ def get_postprocessing_file_name(self):
+ """
+ Depending on user input, snapshot computation will perform post-processing for every micro simulation before writing output to a file.
+
+ Returns
+ -------
+ postprocessing : str
+ Name of post-processing script.
+ """
+ return self._postprocessing_file_name
+
+ def interpolate_crashed_micro_sim(self):
+ """
+ Check if user wants crashed micro simulations to be interpolated.
+
+ Returns
+ -------
+ interpolate_crash : bool
+ True if crashed micro simulations need to be interpolated, False otherwise.
+ """
+ return self._interpolate_crash
+
+ def create_single_sim_object(self):
+ """
+ Check if multiple snapshots can be computed on a single micro simulation object.
+
+ Returns
+ -------
+ initialize_once : bool
+ True if initialization is done only once, False otherwise.
+ """
+ return self._initialize_once
diff --git a/micro_manager/domain_decomposition.py b/micro_manager/domain_decomposition.py
index 91fd6bf8..cfc7cbe7 100644
--- a/micro_manager/domain_decomposition.py
+++ b/micro_manager/domain_decomposition.py
@@ -47,12 +47,15 @@ def decompose_macro_domain(self, macro_bounds: list, ranks_per_axis: list) -> li
List containing the upper and lower bounds of the domain pertaining to this rank.
Format is same as input parameter macro_bounds.
"""
- assert np.prod(
- ranks_per_axis) == self._size, "Total number of processors provided in the Micro Manager configuration and in the MPI execution command do not match."
+ assert (
+ np.prod(ranks_per_axis) == self._size
+ ), "Total number of processors provided in the Micro Manager configuration and in the MPI execution command do not match."
dx = []
for d in range(self._dims):
- dx.append(abs(macro_bounds[d * 2 + 1] - macro_bounds[d * 2]) / ranks_per_axis[d])
+ dx.append(
+ abs(macro_bounds[d * 2 + 1] - macro_bounds[d * 2]) / ranks_per_axis[d]
+ )
rank_in_axis: list[int] = [0] * self._dims
if ranks_per_axis[0] == 1: # if serial in x axis
@@ -69,13 +72,18 @@ def decompose_macro_domain(self, macro_bounds: list, ranks_per_axis: list) -> li
if ranks_per_axis[2] == 1: # if serial in z axis
rank_in_axis[2] = 0
else:
- rank_in_axis[2] = int(self._rank / (ranks_per_axis[0] * ranks_per_axis[1])) # z axis
+ rank_in_axis[2] = int(
+ self._rank / (ranks_per_axis[0] * ranks_per_axis[1])
+ ) # z axis
if ranks_per_axis[1] == 1: # if serial in y axis
rank_in_axis[1] = 0
else:
- rank_in_axis[1] = (self._rank - ranks_per_axis[0] * ranks_per_axis[1]
- * rank_in_axis[2]) % ranks_per_axis[2] # y axis
+ rank_in_axis[1] = (
+ self._rank - ranks_per_axis[0] * ranks_per_axis[1] * rank_in_axis[2]
+ ) % ranks_per_axis[
+ 2
+ ] # y axis
mesh_bounds = []
for d in range(self._dims):
diff --git a/micro_manager/interpolation.py b/micro_manager/interpolation.py
new file mode 100644
index 00000000..9fa08d62
--- /dev/null
+++ b/micro_manager/interpolation.py
@@ -0,0 +1,83 @@
+import numpy as np
+from sklearn.neighbors import NearestNeighbors
+
+
+class Interpolation:
+ def __init__(self, logger):
+
+ self._logger = logger
+
+ def get_nearest_neighbor_indices(
+ self,
+ coords: np.ndarray,
+ inter_point: np.ndarray,
+ k: int,
+ ) -> np.ndarray:
+ """
+ Get local indices of the k nearest neighbors of a point.
+
+ Parameters
+ ----------
+ coords : list
+ List of coordinates of all points.
+ inter_point : list | np.ndarray
+ Coordinates of the point for which the neighbors are to be found.
+ k : int
+ Number of neighbors to consider.
+
+ Returns
+ ------
+ neighbor_indices : np.ndarray
+ Local indices of the k nearest neighbors in all local points.
+ """
+ if len(coords) < k:
+ self._logger.info(
+ "Number of desired neighbors k = {} is larger than the number of available neighbors {}. Resetting k = {}.".format(
+ k, len(coords), len(coords)
+ )
+ )
+ k = len(coords)
+ neighbors = NearestNeighbors(n_neighbors=k).fit(coords)
+
+ neighbor_indices = neighbors.kneighbors(
+ [inter_point], return_distance=False
+ ).flatten()
+
+ return neighbor_indices
+
+ def interpolate(self, neighbors: np.ndarray, point: np.ndarray, values):
+ """
+ Interpolate a value at a point using inverse distance weighting. (https://en.wikipedia.org/wiki/Inverse_distance_weighting)
+ .. math::
+ f(x) = (\sum_{i=1}^{n} \frac{f_i}{\Vert x_i - x \Vert^2}) / (\sum_{j=1}^{n} \frac{1}{\Vert x_j - x \Vert^2})
+
+ Parameters
+ ----------
+ neighbors : np.ndarray
+ Coordinates at which the values are known.
+ point : np.ndarray
+ Coordinates at which the value is to be interpolated.
+ values :
+ Values at the known coordinates.
+
+ Returns
+ -------
+ interpol_val / summed_weights :
+ Value at interpolation point.
+ """
+ interpol_val = 0
+ summed_weights = 0
+ # Iterate over all neighbors
+ for inx in range(len(neighbors)):
+ # Compute the squared norm of the difference between interpolation point and neighbor
+ norm = np.linalg.norm(np.array(neighbors[inx]) - np.array(point)) ** 2
+ # If interpolation point is already part of the data it is returned as the interpolation result
+ # This avoids division by zero
+ if norm < 1e-16:
+ return values[inx]
+ # Update interpolation value
+ interpol_val += values[inx] / norm
+ # Extend normalization factor
+ summed_weights += 1 / norm
+
+ return interpol_val / summed_weights
diff --git a/micro_manager/micro_manager.py b/micro_manager/micro_manager.py
index 28ca71a8..7e4ad20e 100644
--- a/micro_manager/micro_manager.py
+++ b/micro_manager/micro_manager.py
@@ -4,35 +4,41 @@
This files the class MicroManager which has the following callable public methods:
- solve
+- initialize
-This file is directly executable as it consists of a main() function. Upon execution, an object of the class MicroManager is created using a given JSON file,
+Upon execution, an object of the class MicroManager is created using a given JSON file,
and the initialize and solve methods are called.
Detailed documentation: https://precice.org/tooling-micro-manager-overview.html
"""
-import argparse
+import importlib
import os
import sys
-import precice
-from mpi4py import MPI
-import numpy as np
-import logging
import time
+import inspect
from copy import deepcopy
from typing import Dict
from warnings import warn
-from .config import Config
-from .micro_simulation import create_simulation_class
-from .adaptivity.local_adaptivity import LocalAdaptivityCalculator
+import numpy as np
+import precice
+
+from .micro_manager_base import MicroManager
from .adaptivity.global_adaptivity import GlobalAdaptivityCalculator
+from .adaptivity.local_adaptivity import LocalAdaptivityCalculator
from .domain_decomposition import DomainDecomposer
+from .micro_simulation import create_simulation_class
+
+try:
+ from .interpolation import Interpolation
+except ImportError:
+ Interpolation = None
sys.path.append(os.getcwd())
-class MicroManager:
+class MicroManagerCoupling(MicroManager):
def __init__(self, config_file: str) -> None:
"""
Constructor.
@@ -42,56 +48,33 @@ def __init__(self, config_file: str) -> None:
config_file : string
Name of the JSON configuration file (provided by the user).
"""
- self._comm = MPI.COMM_WORLD
- self._rank = self._comm.Get_rank()
- self._size = self._comm.Get_size()
-
- self._logger = logging.getLogger(__name__)
- self._logger.setLevel(level=logging.INFO)
-
- # Create file handler which logs messages
- fh = logging.FileHandler('micro-manager.log')
- fh.setLevel(logging.INFO)
-
- # Create formatter and add it to handlers
- formatter = logging.Formatter('[' + str(self._rank) + '] %(name)s - %(levelname)s - %(message)s')
- fh.setFormatter(formatter)
- self._logger.addHandler(fh) # add the handlers to the logger
-
- self._is_parallel = self._size > 1
- self._micro_sims_have_output = False
-
- self._logger.info("Provided configuration file: {}".format(config_file))
- self._config = Config(self._logger, config_file)
-
+ super().__init__(config_file)
+ self._config.read_json_micro_manager()
# Define the preCICE Participant
self._participant = precice.Participant(
- "Micro-Manager",
- self._config.get_config_file_name(),
- self._rank,
- self._size)
-
- micro_file_name = self._config.get_micro_file_name()
+ "Micro-Manager", self._config.get_config_file_name(), self._rank, self._size
+ )
self._macro_mesh_name = self._config.get_macro_mesh_name()
- # Data names of data written to preCICE
- self._write_data_names = self._config.get_write_data_names()
-
- # Data names of data read from preCICE
- self._read_data_names = self._config.get_read_data_names()
-
self._macro_bounds = self._config.get_macro_domain_bounds()
if self._is_parallel: # Simulation is run in parallel
self._ranks_per_axis = self._config.get_ranks_per_axis()
- self._is_micro_solve_time_required = self._config.write_micro_solve_time()
+ # Parameter for interpolation in case of a simulation crash
+ self._interpolate_crashed_sims = self._config.interpolate_crashed_micro_sim()
+ if self._interpolate_crashed_sims:
+ if Interpolation is None:
+ self._logger.info(
+ "Interpolation is turned off as the required package is not installed."
+ )
+ self._interpolate_crashed_sims = False
+ else:
+ # The following parameters can potentially become configurable by the user in the future
+ self._crash_threshold = 0.2
+ self._number_of_nearest_neighbors = 4
- self._local_number_of_sims = 0
- self._global_number_of_sims = 0
- self._is_rank_empty = False
- self._dt = 0
self._mesh_vertex_ids = None # IDs of macro vertices as set by preCICE
self._micro_n_out = self._config.get_micro_output_n()
@@ -115,11 +98,11 @@ def __init__(self, config_file: str) -> None:
if name in self._write_data_names:
self._adaptivity_micro_data_names[name] = is_data_vector
- self._adaptivity_in_every_implicit_step = self._config.is_adaptivity_required_in_every_implicit_iteration()
+ self._adaptivity_in_every_implicit_step = (
+ self._config.is_adaptivity_required_in_every_implicit_iteration()
+ )
self._micro_sims_active_steps = None
- self._initialize()
-
# **************
# Public methods
# **************
@@ -138,24 +121,43 @@ def solve(self) -> None:
sim_is_associated_to_cp = None
sim_states_cp = [None] * self._local_number_of_sims
+ dt = min(self._participant.get_max_time_step_size(), self._micro_dt)
+
if self._is_adaptivity_on:
similarity_dists = np.zeros(
- (self._number_of_sims_for_adaptivity,
- self._number_of_sims_for_adaptivity))
+ (
+ self._number_of_sims_for_adaptivity,
+ self._number_of_sims_for_adaptivity,
+ )
+ )
# Start adaptivity calculation with all sims active
is_sim_active = np.array([True] * self._number_of_sims_for_adaptivity)
# Active sims do not have an associated sim
- sim_is_associated_to = np.full((self._number_of_sims_for_adaptivity), -2, dtype=np.intc)
+ sim_is_associated_to = np.full(
+ (self._number_of_sims_for_adaptivity), -2, dtype=np.intc
+ )
- # If micro simulations have been initialized, compute adaptivity based on initial data
+ # If micro simulations have been initialized, compute adaptivity before starting the coupling
if self._micro_sims_init:
- # Compute adaptivity based on initial data of micro sims
- similarity_dists, is_sim_active, sim_is_associated_to = self._adaptivity_controller.compute_adaptivity(
- self._dt, self._micro_sims, similarity_dists, is_sim_active, sim_is_associated_to, self._data_for_adaptivity)
+ (
+ similarity_dists,
+ is_sim_active,
+ sim_is_associated_to,
+ ) = self._adaptivity_controller.compute_adaptivity(
+ dt,
+ self._micro_sims,
+ similarity_dists,
+ is_sim_active,
+ sim_is_associated_to,
+ self._data_for_adaptivity,
+ )
while self._participant.is_coupling_ongoing():
+
+ dt = min(self._participant.get_max_time_step_size(), self._micro_dt)
+
# Write a checkpoint
if self._participant.requires_writing_checkpoint():
for i in range(self._local_number_of_sims):
@@ -165,8 +167,18 @@ def solve(self) -> None:
if self._is_adaptivity_on:
if not self._adaptivity_in_every_implicit_step:
- similarity_dists, is_sim_active, sim_is_associated_to = self._adaptivity_controller.compute_adaptivity(
- self._dt, self._micro_sims, similarity_dists, is_sim_active, sim_is_associated_to, self._data_for_adaptivity)
+ (
+ similarity_dists,
+ is_sim_active,
+ sim_is_associated_to,
+ ) = self._adaptivity_controller.compute_adaptivity(
+ dt,
+ self._micro_sims,
+ similarity_dists,
+ is_sim_active,
+ sim_is_associated_to,
+ self._data_for_adaptivity,
+ )
# Only checkpoint the adaptivity configuration if adaptivity is computed
# once in every time window
@@ -178,39 +190,85 @@ def solve(self) -> None:
active_sim_ids = np.where(is_sim_active)[0]
elif self._adaptivity_type == "global":
active_sim_ids = np.where(
- is_sim_active[self._global_ids_of_local_sims[0]:self._global_ids_of_local_sims[-1] + 1])[0]
+ is_sim_active[
+ self._global_ids_of_local_sims[
+ 0
+ ] : self._global_ids_of_local_sims[-1]
+ + 1
+ ]
+ )[0]
for active_id in active_sim_ids:
self._micro_sims_active_steps[active_id] += 1
- micro_sims_input = self._read_data_from_precice()
+ micro_sims_input = self._read_data_from_precice(dt)
if self._is_adaptivity_on:
if self._adaptivity_in_every_implicit_step:
- similarity_dists, is_sim_active, sim_is_associated_to = self._adaptivity_controller.compute_adaptivity(
- self._dt, self._micro_sims, similarity_dists, is_sim_active, sim_is_associated_to, self._data_for_adaptivity)
+ (
+ similarity_dists,
+ is_sim_active,
+ sim_is_associated_to,
+ ) = self._adaptivity_controller.compute_adaptivity(
+ dt,
+ self._micro_sims,
+ similarity_dists,
+ is_sim_active,
+ sim_is_associated_to,
+ self._data_for_adaptivity,
+ )
if self._adaptivity_type == "local":
active_sim_ids = np.where(is_sim_active)[0]
elif self._adaptivity_type == "global":
active_sim_ids = np.where(
- is_sim_active[self._global_ids_of_local_sims[0]:self._global_ids_of_local_sims[-1] + 1])[0]
+ is_sim_active[
+ self._global_ids_of_local_sims[
+ 0
+ ] : self._global_ids_of_local_sims[-1]
+ + 1
+ ]
+ )[0]
for active_id in active_sim_ids:
self._micro_sims_active_steps[active_id] += 1
micro_sims_output = self._solve_micro_simulations_with_adaptivity(
- micro_sims_input, is_sim_active, sim_is_associated_to)
+ micro_sims_input, is_sim_active, sim_is_associated_to, dt
+ )
else:
- micro_sims_output = self._solve_micro_simulations(micro_sims_input)
+ micro_sims_output = self._solve_micro_simulations(micro_sims_input, dt)
+
+ # Check if more than a certain percentage of the micro simulations have crashed and terminate if threshold is exceeded
+ if self._interpolate_crashed_sims:
+ crashed_sims_on_all_ranks = np.zeros(self._size, dtype=np.int64)
+ self._comm.Allgather(
+ np.sum(self._has_sim_crashed), crashed_sims_on_all_ranks
+ )
+
+ if self._is_parallel:
+ crash_ratio = (
+ np.sum(crashed_sims_on_all_ranks) / self._global_number_of_sims
+ )
+ else:
+ crash_ratio = np.sum(self._has_sim_crashed) / len(
+ self._has_sim_crashed
+ )
- self._write_data_to_precice(micro_sims_output)
+ if crash_ratio > self._crash_threshold:
+ self._logger.info(
+ "{:.1%} of the micro simulations have crashed exceeding the threshold of {:.1%}. "
+ "Exiting simulation.".format(crash_ratio, self._crash_threshold)
+ )
+ sys.exit()
- self._participant.advance(self._dt)
- self._dt = self._participant.get_max_time_step_size()
+ self._write_data_to_precice(micro_sims_output)
- t += self._dt
- n += 1
+ t += dt # increase internal time when time step is done.
+ n += 1 # increase counter
+ self._participant.advance(
+ dt
+ ) # notify preCICE that time step of size dt is complete
# Revert micro simulations to their last checkpoints if required
if self._participant.requires_reading_checkpoint():
@@ -226,9 +284,16 @@ def solve(self) -> None:
is_sim_active = np.copy(is_sim_active_cp)
sim_is_associated_to = np.copy(sim_is_associated_to_cp)
- else: # Time window has converged, now micro output can be generated
- self._logger.info("Micro simulations {} - {} have converged at t = {}".format(
- self._micro_sims[0].get_global_id(), self._micro_sims[-1].get_global_id(), t))
+ if (
+ self._participant.is_time_window_complete()
+ ): # Time window has converged, now micro output can be generated
+ self._logger.info(
+ "Micro simulations {} - {} have converged at t = {}".format(
+ self._micro_sims[0].get_global_id(),
+ self._micro_sims[-1].get_global_id(),
+ t,
+ )
+ )
if self._micro_sims_have_output:
if n % self._micro_n_out == 0:
@@ -237,11 +302,7 @@ def solve(self) -> None:
self._participant.finalize()
- # ***************
- # Private methods
- # ***************
-
- def _initialize(self) -> None:
+ def initialize(self) -> None:
"""
Initialize the Micro Manager by performing the following tasks:
- Decompose the domain if the Micro Manager is executed in parallel.
@@ -252,31 +313,46 @@ def _initialize(self) -> None:
"""
# Decompose the macro-domain and set the mesh access region for each partition in preCICE
assert len(self._macro_bounds) / 2 == self._participant.get_mesh_dimensions(
- self._macro_mesh_name), "Provided macro mesh bounds are of incorrect dimension"
+ self._macro_mesh_name
+ ), "Provided macro mesh bounds are of incorrect dimension"
if self._is_parallel:
domain_decomposer = DomainDecomposer(
- self._logger, self._participant.get_mesh_dimensions(self._macro_mesh_name), self._rank, self._size)
- coupling_mesh_bounds = domain_decomposer.decompose_macro_domain(self._macro_bounds, self._ranks_per_axis)
+ self._logger,
+ self._participant.get_mesh_dimensions(self._macro_mesh_name),
+ self._rank,
+ self._size,
+ )
+ coupling_mesh_bounds = domain_decomposer.decompose_macro_domain(
+ self._macro_bounds, self._ranks_per_axis
+ )
else:
coupling_mesh_bounds = self._macro_bounds
- self._participant.set_mesh_access_region(self._macro_mesh_name, coupling_mesh_bounds)
+ self._participant.set_mesh_access_region(
+ self._macro_mesh_name, coupling_mesh_bounds
+ )
# initialize preCICE
self._participant.initialize()
- self._mesh_vertex_ids, mesh_vertex_coords = self._participant.get_mesh_vertex_ids_and_coordinates(
- self._macro_mesh_name)
- assert (mesh_vertex_coords.size != 0), "Macro mesh has no vertices."
+ (
+ self._mesh_vertex_ids,
+ self._mesh_vertex_coords,
+ ) = self._participant.get_mesh_vertex_ids_and_coordinates(self._macro_mesh_name)
+ assert self._mesh_vertex_coords.size != 0, "Macro mesh has no vertices."
- self._local_number_of_sims, _ = mesh_vertex_coords.shape
- self._logger.info("Number of local micro simulations = {}".format(self._local_number_of_sims))
+ self._local_number_of_sims, _ = self._mesh_vertex_coords.shape
+ self._logger.info(
+ "Number of local micro simulations = {}".format(self._local_number_of_sims)
+ )
if self._local_number_of_sims == 0:
if self._is_parallel:
self._logger.info(
"Rank {} has no micro simulations and hence will not do any computation.".format(
- self._rank))
+ self._rank
+ )
+ )
self._is_rank_empty = True
else:
raise Exception("Micro Manager has no micro simulations.")
@@ -294,13 +370,20 @@ def _initialize(self) -> None:
for name, is_data_vector in self._adaptivity_data_names.items():
if is_data_vector:
self._data_for_adaptivity[name] = np.zeros(
- (self._local_number_of_sims, self._participant.get_data_dimensions(
- self._macro_mesh_name, name)))
+ (
+ self._local_number_of_sims,
+ self._participant.get_data_dimensions(
+ self._macro_mesh_name, name
+ ),
+ )
+ )
else:
- self._data_for_adaptivity[name] = np.zeros((self._local_number_of_sims))
+ self._data_for_adaptivity[name] = np.zeros(
+ (self._local_number_of_sims)
+ )
# Create lists of local and global IDs
- sim_id = np.sum(nms_all_ranks[:self._rank])
+ sim_id = np.sum(nms_all_ranks[: self._rank])
self._global_ids_of_local_sims = [] # DECLARATION
for i in range(self._local_number_of_sims):
self._global_ids_of_local_sims.append(sim_id)
@@ -308,24 +391,35 @@ def _initialize(self) -> None:
self._micro_sims = [None] * self._local_number_of_sims # DECLARATION
+ # Setup for simulation crashes
+ self._has_sim_crashed = [False] * self._local_number_of_sims
+ if self._interpolate_crashed_sims:
+ self._interpolant = Interpolation(self._logger)
+
micro_problem = getattr(
- __import__(
- self._config.get_micro_file_name(),
- fromlist=["MicroSimulation"]),
- "MicroSimulation")
+ importlib.import_module(
+ self._config.get_micro_file_name(), "MicroSimulation"
+ ),
+ "MicroSimulation",
+ )
# Create micro simulation objects
for i in range(self._local_number_of_sims):
- self._micro_sims[i] = create_simulation_class(
- micro_problem)(self._global_ids_of_local_sims[i])
+ self._micro_sims[i] = create_simulation_class(micro_problem)(
+ self._global_ids_of_local_sims[i]
+ )
- self._logger.info("Micro simulations with global IDs {} - {} created.".format(
- self._global_ids_of_local_sims[0], self._global_ids_of_local_sims[-1]))
+ self._logger.info(
+ "Micro simulations with global IDs {} - {} created.".format(
+ self._global_ids_of_local_sims[0], self._global_ids_of_local_sims[-1]
+ )
+ )
if self._is_adaptivity_on:
if self._adaptivity_type == "local":
self._adaptivity_controller = LocalAdaptivityCalculator(
- self._config, self._logger)
+ self._config, self._logger
+ )
self._number_of_sims_for_adaptivity = self._local_number_of_sims
elif self._adaptivity_type == "global":
self._adaptivity_controller = GlobalAdaptivityCalculator(
@@ -334,46 +428,157 @@ def _initialize(self) -> None:
self._global_number_of_sims,
self._global_ids_of_local_sims,
self._rank,
- self._comm)
+ self._comm,
+ )
self._number_of_sims_for_adaptivity = self._global_number_of_sims
self._micro_sims_active_steps = np.zeros(self._local_number_of_sims)
self._micro_sims_init = False # DECLARATION
+ # Read initial data from preCICE, if it is available
+ initial_data = self._read_data_from_precice(dt=0)
+
+ if not initial_data:
+ is_initial_data_available = False
+ else:
+ is_initial_data_available = True
+
+ # Boolean which states if the initialize() method of the micro simulation requires initial data
+ is_initial_data_required = False
+
+ # Check if provided micro simulation has an initialize() method
+ if hasattr(micro_problem, "initialize") and callable(
+ getattr(micro_problem, "initialize")
+ ):
+ self._micro_sims_init = True # Starting value before setting
+
+ try: # Try to get the signature of the initialize() method, if it is written in Python
+ argspec = inspect.getfullargspec(micro_problem.initialize)
+ if (
+ len(argspec.args) == 1
+ ): # The first argument in the signature is self
+ is_initial_data_required = False
+ elif len(argspec.args) == 2:
+ is_initial_data_required = True
+ else:
+ raise Exception(
+ "The initialize() method of the Micro simulation has an incorrect number of arguments."
+ )
+ except TypeError:
+ self._logger.info(
+ "The signature of initialize() method of the micro simulation cannot be determined. Trying to determine the signature by calling the method."
+ )
+ # Try to get the signature of the initialize() method, if it is not written in Python
+ try: # Try to call the initialize() method without initial data
+ self._micro_sims[0].initialize()
+ is_initial_data_required = False
+ except TypeError:
+ self._logger.info(
+ "The initialize() method of the micro simulation has arguments. Attempting to call it again with initial data."
+ )
+ try: # Try to call the initialize() method with initial data
+ self._micro_sims[0].initialize(initial_data[0])
+ is_initial_data_required = True
+ except TypeError:
+ raise Exception(
+ "The initialize() method of the Micro simulation has an incorrect number of arguments."
+ )
+
+ if is_initial_data_required and not is_initial_data_available:
+ raise Exception(
+ "The initialize() method of the Micro simulation requires initial data, but no initial data has been provided."
+ )
+
+ if not is_initial_data_required and is_initial_data_available:
+ warn(
+ "The initialize() method is only allowed to return data which is required for the adaptivity calculation."
+ )
+
# Get initial data from micro simulations if initialize() method exists
- if hasattr(micro_problem, 'initialize') and callable(getattr(micro_problem, 'initialize')):
- if self._is_adaptivity_on:
- self._micro_sims_init = True
- initial_micro_output = self._micro_sims[0].initialize() # Call initialize() of the first simulation
- if initial_micro_output is None: # Check if the detected initialize() method returns any data
- warn("The initialize() call of the Micro simulation has not returned any initial data."
- " The initialize call is stopped.")
- self._micro_sims_init = False
+ if self._micro_sims_init:
+
+ # Call initialize() method of the micro simulation to check if it returns any initial data
+ if is_initial_data_required:
+ initial_micro_output = self._micro_sims[0].initialize(initial_data[0])
+ else:
+ initial_micro_output = self._micro_sims[0].initialize()
+
+ if (
+ initial_micro_output is None
+ ): # Check if the detected initialize() method returns any data
+ warn(
+ "The initialize() call of the Micro simulation has not returned any initial data."
+ " This means that the initialize() call has no effect on the adaptivity. The initialize method will nevertheless still be called."
+ )
+ self._micro_sims_init = False
+
+ if is_initial_data_required:
+ for i in range(1, self._local_number_of_sims):
+ self._micro_sims[i].initialize(initial_data[i])
else:
+ for i in range(1, self._local_number_of_sims):
+ self._micro_sims[i].initialize()
+ else: # Case where the initialize() method returns data
+ if self._is_adaptivity_on:
# Save initial data from first micro simulation as we anyway have it
for name in initial_micro_output.keys():
- self._data_for_adaptivity[name][0] = initial_micro_output[name]
+ if name in self._data_for_adaptivity:
+ self._data_for_adaptivity[name][0] = initial_micro_output[
+ name
+ ]
+ else:
+ raise Exception(
+ "The initialize() method needs to return data which is required for the adaptivity calculation."
+ )
# Gather initial data from the rest of the micro simulations
- for i in range(1, self._local_number_of_sims):
- initial_micro_output = self._micro_sims[i].initialize()
- for name in self._adaptivity_micro_data_names:
- self._data_for_adaptivity[name][i] = initial_micro_output[name]
- else:
- self._logger.info(
- "Micro simulation has the method initialize(), but it is not called, because adaptivity is off.")
+ if is_initial_data_required:
+ for i in range(1, self._local_number_of_sims):
+ initial_micro_output = self._micro_sims[i].initialize(
+ initial_data[i]
+ )
+ for name in self._adaptivity_micro_data_names:
+ self._data_for_adaptivity[name][
+ i
+ ] = initial_micro_output[name]
+ else:
+ for i in range(1, self._local_number_of_sims):
+ initial_micro_output = self._micro_sims[i].initialize()
+ for name in self._adaptivity_micro_data_names:
+ self._data_for_adaptivity[name][
+ i
+ ] = initial_micro_output[name]
+ else:
+ warn(
+ "The initialize() method of the Micro simulation returns initial data, but adaptivity is turned off. The returned data will be ignored. The initialize method will nevertheless still be called."
+ )
+ if is_initial_data_required:
+ for i in range(1, self._local_number_of_sims):
+ self._micro_sims[i].initialize(initial_data[i])
+ else:
+ for i in range(1, self._local_number_of_sims):
+ self._micro_sims[i].initialize()
self._micro_sims_have_output = False
- if hasattr(micro_problem, 'output') and callable(getattr(micro_problem, 'output')):
+ if hasattr(micro_problem, "output") and callable(
+ getattr(micro_problem, "output")
+ ):
self._micro_sims_have_output = True
- self._dt = self._participant.get_max_time_step_size()
+ # ***************
+ # Private methods
+ # ***************
- def _read_data_from_precice(self) -> list:
+ def _read_data_from_precice(self, dt) -> list:
"""
Read data from preCICE.
+ Parameters
+ ----------
+ dt : float
+ Time step size at which data is to be read from preCICE.
+
Returns
-------
local_read_data : list
@@ -384,8 +589,13 @@ def _read_data_from_precice(self) -> list:
read_data[name] = []
for name in self._read_data_names.keys():
- read_data.update({name: self._participant.read_data(
- self._macro_mesh_name, name, self._mesh_vertex_ids, self._dt)})
+ read_data.update(
+ {
+ name: self._participant.read_data(
+ self._macro_mesh_name, name, self._mesh_vertex_ids, dt
+ )
+ }
+ )
if self._is_adaptivity_on:
if name in self._adaptivity_macro_data_names:
@@ -413,12 +623,18 @@ def _write_data_to_precice(self, data: list) -> None:
for dname in self._write_data_names.keys():
self._participant.write_data(
- self._macro_mesh_name, dname, self._mesh_vertex_ids, data_dict[dname])
+ self._macro_mesh_name,
+ dname,
+ self._mesh_vertex_ids,
+ data_dict[dname],
+ )
else:
for dname in self._write_data_names.keys():
- self._participant.write_data(self._macro_mesh_name, dname, [], np.array([]))
+ self._participant.write_data(
+ self._macro_mesh_name, dname, [], np.array([])
+ )
- def _solve_micro_simulations(self, micro_sims_input: list) -> list:
+ def _solve_micro_simulations(self, micro_sims_input: list, dt: float) -> list:
"""
Solve all micro simulations and assemble the micro simulations outputs in a list of dicts format.
@@ -427,6 +643,8 @@ def _solve_micro_simulations(self, micro_sims_input: list) -> list:
micro_sims_input : list
List of dicts in which keys are names of data and the values are the data which are required inputs to
solve a micro simulation.
+ dt : float
+ Time step size.
Returns
-------
@@ -437,20 +655,66 @@ def _solve_micro_simulations(self, micro_sims_input: list) -> list:
micro_sims_output = [None] * self._local_number_of_sims
for count, sim in enumerate(self._micro_sims):
- start_time = time.time()
- micro_sims_output[count] = sim.solve(micro_sims_input[count], self._dt)
- end_time = time.time()
-
- if self._is_micro_solve_time_required:
- micro_sims_output[count]["micro_sim_time"] = end_time - start_time
+ # If micro simulation has not crashed in a previous iteration, attempt to solve it
+ if not self._has_sim_crashed[count]:
+ # Attempt to solve the micro simulation
+ try:
+ start_time = time.time()
+ micro_sims_output[count] = sim.solve(micro_sims_input[count], dt)
+ end_time = time.time()
+ # Write solve time of the macro simulation if required and the simulation has not crashed
+ if self._is_micro_solve_time_required:
+ micro_sims_output[count]["micro_sim_time"] = (
+ end_time - start_time
+ )
+
+ # If simulation crashes, log the error and keep the output constant at the previous iteration's output
+ except Exception as error_message:
+ self._logger.error(
+ "Micro simulation at macro coordinates {} with input {} has experienced an error. "
+ "See next entry on this rank for error message.".format(
+ self._mesh_vertex_coords[count], micro_sims_input[count]
+ )
+ )
+ self._logger.error(error_message)
+ self._has_sim_crashed[count] = True
+
+ # If interpolate is off, terminate after crash
+ if not self._interpolate_crashed_sims:
+ crashed_sims_on_all_ranks = np.zeros(self._size, dtype=np.int64)
+ self._comm.Allgather(
+ np.sum(self._has_sim_crashed), crashed_sims_on_all_ranks
+ )
+ if sum(crashed_sims_on_all_ranks) > 0:
+ self._logger.info("Exiting simulation after micro simulation crash.")
+ sys.exit()
+
+ # Interpolate result for crashed simulation
+ unset_sims = [
+ count for count, value in enumerate(micro_sims_output) if value is None
+ ]
+
+ # Iterate over all crashed simulations to interpolate output
+ if self._interpolate_crashed_sims:
+ for unset_sim in unset_sims:
+ self._logger.info(
+ "Interpolating output for crashed simulation at macro vertex {}.".format(
+ self._mesh_vertex_coords[unset_sim]
+ )
+ )
+ micro_sims_output[unset_sim] = self._interpolate_output_for_crashed_sim(
+ micro_sims_input, micro_sims_output, unset_sim
+ )
return micro_sims_output
def _solve_micro_simulations_with_adaptivity(
- self,
- micro_sims_input: list,
- is_sim_active: np.ndarray,
- sim_is_associated_to: np.ndarray) -> list:
+ self,
+ micro_sims_input: list,
+ is_sim_active: np.ndarray,
+ sim_is_associated_to: np.ndarray,
+ dt: float,
+ ) -> list:
"""
Solve all micro simulations and assemble the micro simulations outputs in a list of dicts format.
@@ -463,6 +727,8 @@ def _solve_micro_simulations_with_adaptivity(
1D array having state (active or inactive) of each micro simulation
sim_is_associated_to : numpy array
1D array with values of associated simulations of inactive simulations. Active simulations have None
+ dt : float
+ Time step size.
Returns
-------
@@ -472,9 +738,22 @@ def _solve_micro_simulations_with_adaptivity(
"""
if self._adaptivity_type == "global":
active_sim_ids = np.where(
- is_sim_active[self._global_ids_of_local_sims[0]:self._global_ids_of_local_sims[-1] + 1])[0]
+ is_sim_active[
+ self._global_ids_of_local_sims[0] : self._global_ids_of_local_sims[
+ -1
+ ]
+ + 1
+ ]
+ )[0]
inactive_sim_ids = np.where(
- is_sim_active[self._global_ids_of_local_sims[0]:self._global_ids_of_local_sims[-1] + 1] == False)[0]
+ is_sim_active[
+ self._global_ids_of_local_sims[0] : self._global_ids_of_local_sims[
+ -1
+ ]
+ + 1
+ ]
+ == False
+ )[0]
elif self._adaptivity_type == "local":
active_sim_ids = np.where(is_sim_active)[0]
inactive_sim_ids = np.where(is_sim_active == False)[0]
@@ -483,29 +762,83 @@ def _solve_micro_simulations_with_adaptivity(
# Solve all active micro simulations
for active_id in active_sim_ids:
- start_time = time.time()
- micro_sims_output[active_id] = self._micro_sims[active_id].solve(micro_sims_input[active_id], self._dt)
- end_time = time.time()
+ # If micro simulation has not crashed in a previous iteration, attempt to solve it
+ if not self._has_sim_crashed[active_id]:
+ # Attempt to solve the micro simulation
+ try:
+ start_time = time.time()
+ micro_sims_output[active_id] = self._micro_sims[active_id].solve(
+ micro_sims_input[active_id], dt
+ )
+ end_time = time.time()
+ # Write solve time of the macro simulation if required and the simulation has not crashed
+ if self._is_micro_solve_time_required:
+ micro_sims_output[active_id]["micro_sim_time"] = (
+ end_time - start_time
+ )
+
+ # Mark the micro sim as active for export
+ micro_sims_output[active_id]["active_state"] = 1
+ micro_sims_output[active_id][
+ "active_steps"
+ ] = self._micro_sims_active_steps[active_id]
+
+ # If simulation crashes, log the error and keep the output constant at the previous iteration's output
+ except Exception as error_message:
+ self._logger.error(
+ "Micro simulation at macro coordinates {} has experienced an error. "
+ "See next entry on this rank for error message.".format(
+ self._mesh_vertex_coords[active_id]
+ )
+ )
+ self._logger.error(error_message)
+ self._has_sim_crashed[active_id] = True
+
+ # If interpolate is off, terminate after crash
+ if not self._interpolate_crashed_sims:
+ crashed_sims_on_all_ranks = np.zeros(self._size, dtype=np.int64)
+ self._comm.Allgather(
+ np.sum(self._has_sim_crashed), crashed_sims_on_all_ranks
+ )
+ if sum(crashed_sims_on_all_ranks) > 0:
+ self._logger.info("Exiting simulation after micro simulation crash.")
+ sys.exit()
+ # Interpolate result for crashed simulation
+ unset_sims = []
+ for active_id in active_sim_ids:
+ if micro_sims_output[active_id] is None:
+ unset_sims.append(active_id)
- # Mark the micro sim as active for export
- micro_sims_output[active_id]["active_state"] = 1
- micro_sims_output[active_id]["active_steps"] = self._micro_sims_active_steps[active_id]
+ # Iterate over all crashed simulations to interpolate output
+ if self._interpolate_crashed_sims:
+ for unset_sim in unset_sims:
+ self._logger.info(
+ "Interpolating output for crashed simulation at macro vertex {}.".format(
+ self._mesh_vertex_coords[unset_sim]
+ )
+ )
- if self._is_micro_solve_time_required:
- micro_sims_output[active_id]["micro_sim_time"] = end_time - start_time
+ micro_sims_output[unset_sim] = self._interpolate_output_for_crashed_sim(
+ micro_sims_input, micro_sims_output, unset_sim, active_sim_ids
+ )
# For each inactive simulation, copy data from most similar active simulation
if self._adaptivity_type == "global":
- self._adaptivity_controller.communicate_micro_output(is_sim_active, sim_is_associated_to, micro_sims_output)
+ self._adaptivity_controller.communicate_micro_output(
+ is_sim_active, sim_is_associated_to, micro_sims_output
+ )
elif self._adaptivity_type == "local":
for inactive_id in inactive_sim_ids:
micro_sims_output[inactive_id] = deepcopy(
- micro_sims_output[sim_is_associated_to[inactive_id]])
+ micro_sims_output[sim_is_associated_to[inactive_id]]
+ )
# Resolve micro sim output data for inactive simulations
for inactive_id in inactive_sim_ids:
micro_sims_output[inactive_id]["active_state"] = 0
- micro_sims_output[inactive_id]["active_steps"] = self._micro_sims_active_steps[inactive_id]
+ micro_sims_output[inactive_id][
+ "active_steps"
+ ] = self._micro_sims_active_steps[inactive_id]
if self._is_micro_solve_time_required:
micro_sims_output[inactive_id]["micro_sim_time"] = 0
@@ -517,23 +850,106 @@ def _solve_micro_simulations_with_adaptivity(
return micro_sims_output
+ def _interpolate_output_for_crashed_sim(
+ self,
+ micro_sims_input: list,
+ micro_sims_output: list,
+ unset_sim: int,
+ active_sim_ids: np.ndarray = None,
+ ) -> dict:
+ """
+ Using the output of neighboring simulations, interpolate the output for a crashed simulation.
-def main():
- parser = argparse.ArgumentParser(description='.')
- parser.add_argument(
- 'config_file',
- type=str,
- help='Path to the JSON config file of the manager.')
-
- args = parser.parse_args()
- config_file_path = args.config_file
- if not os.path.isabs(config_file_path):
- config_file_path = os.getcwd() + "/" + config_file_path
-
- manager = MicroManager(config_file_path)
-
- manager.solve()
-
+ Parameters
+ ----------
+ micro_sims_input : list
+ List of dicts in which keys are names of data and the values are the data which are required inputs to
+ solve a micro simulation.
+ micro_sims_output : list
+ List dicts containing output of local micro simulations.
+ unset_sim : int
+ Index of the crashed simulation in the list of all local simulations currently interpolating.
+ active_sim_ids : numpy.ndarray, optional
+ Array of active simulation IDs.
-if __name__ == "__main__":
- main()
+ Returns
+ -------
+ output_interpol : dict
+ Result of the interpolation in which keys are names of data and the values are the data.
+ """
+ # Find neighbors of the crashed simulation in active and non-crashed simulations
+ # Set iteration length to only iterate over active simulations
+ if self._is_adaptivity_on:
+ iter_length = active_sim_ids
+ else:
+ iter_length = range(len(micro_sims_input))
+ micro_sims_active_input_lists = []
+ micro_sims_active_values = []
+ # Turn crashed simulation macro parameters into list to use as coordinate for interpolation
+ crashed_position = []
+ for value in micro_sims_input[unset_sim].values():
+ if isinstance(value, np.ndarray) or isinstance(value, list):
+ crashed_position.extend(value)
+ else:
+ crashed_position.append(value)
+ # Turn active simulation macro parameters into lists to use as coordinates for interpolation based on parameters
+ for i in iter_length:
+ if not self._has_sim_crashed[i]:
+ # Collect macro data at one macro vertex
+ intermediate_list = []
+ for value in micro_sims_input[i].values():
+ if isinstance(value, np.ndarray) or isinstance(value, list):
+ intermediate_list.extend(value)
+ else:
+ intermediate_list.append(value)
+ # Create lists of macro data for interpolation
+ micro_sims_active_input_lists.append(intermediate_list)
+ micro_sims_active_values.append(micro_sims_output[i].copy())
+ # Find nearest neighbors
+ if len(micro_sims_active_input_lists) == 0:
+ self._logger.error(
+ "No active neighbors available for interpolation at macro vertex {}. Value cannot be interpolated".format(
+ self._mesh_vertex_coords[unset_sim]
+ )
+ )
+ return None
+ else:
+ nearest_neighbors = self._interpolant.get_nearest_neighbor_indices(
+ micro_sims_active_input_lists,
+ crashed_position,
+ self._number_of_nearest_neighbors,
+ )
+ # Interpolate
+ interpol_space = []
+ interpol_values = []
+ # Collect neighbor vertices for interpolation
+ for neighbor in nearest_neighbors:
+ # Remove data not required for interpolation from values
+ if self._is_adaptivity_on:
+ interpol_space.append(micro_sims_active_input_lists[neighbor].copy())
+ interpol_values.append(micro_sims_active_values[neighbor].copy())
+ interpol_values[-1].pop("micro_sim_time", None)
+ interpol_values[-1].pop("active_state", None)
+ interpol_values[-1].pop("active_steps", None)
+ else:
+ interpol_space.append(micro_sims_active_input_lists[neighbor].copy())
+ interpol_values.append(micro_sims_active_values[neighbor].copy())
+ interpol_values[-1].pop("micro_sim_time", None)
+
+ # Interpolate for each parameter
+ output_interpol = dict()
+ for key in interpol_values[0].keys():
+ key_values = [] # DECLARATION
+ # Collect values of current parameter from neighboring simulations
+ for elems in range(len(interpol_values)):
+ key_values.append(interpol_values[elems][key])
+ output_interpol[key] = self._interpolant.interpolate(
+ interpol_space, crashed_position, key_values
+ )
+ # Reintroduce removed information
+ if self._is_micro_solve_time_required:
+ output_interpol["micro_sim_time"] = 0
+ if self._is_adaptivity_on:
+ output_interpol["active_state"] = 1
+ output_interpol["active_steps"] = self._micro_sims_active_steps[unset_sim]
+ return output_interpol
diff --git a/micro_manager/micro_manager_base.py b/micro_manager/micro_manager_base.py
new file mode 100644
index 00000000..9e8d54b4
--- /dev/null
+++ b/micro_manager/micro_manager_base.py
@@ -0,0 +1,103 @@
+#!/usr/bin/env python3
+"""
+The Micro Manager abstract base class provides an interface for its subclasses.
+The Micro Manager base class handles initialization shared by its subclasses (MicroManagerCoupling).
+The base class should not be executed on its own. It is meant to be inherited by MicroManagerCoupling.
+
+For more details see the MicroManagerCoupling class or the documentation at https://precice.org/tooling-micro-manager-overview.html.
+"""
+
+from mpi4py import MPI
+import logging
+from abc import ABC, abstractmethod
+
+from .config import Config
+
+
+class MicroManagerInterface(ABC):
+ """
+ Abstract base class of Micro Manager classes. Defines interface for Micro Manager classes.
+ """
+
+ @abstractmethod
+ def initialize(self):
+ """
+ Initialize micro simulations.
+ """
+ pass
+
+ @abstractmethod
+ def solve(self):
+ """
+ Solve micro simulations.
+ """
+ pass
+
+
+class MicroManager(MicroManagerInterface):
+ """
+ Micro Manager base class provides common functionalities for its subclasses.
+ """
+
+ def __init__(self, config_file):
+ """
+ Constructor. Initializes member variables and logger shared between all subclasses.
+
+ Parameters
+ ----------
+ config_file : string
+ Name of the JSON configuration file (provided by the user).
+ """
+ self._comm = MPI.COMM_WORLD
+ self._rank = self._comm.Get_rank()
+ self._size = self._comm.Get_size()
+
+ self._logger = logging.getLogger(__name__)
+ self._logger.setLevel(level=logging.INFO)
+
+ # Create file handler which logs messages
+ fh = logging.FileHandler("micro-manager.log")
+ fh.setLevel(logging.INFO)
+
+ # Create formatter and add it to handlers
+ formatter = logging.Formatter(
+ "[" + str(self._rank) + "] %(name)s - %(levelname)s - %(message)s"
+ )
+ fh.setFormatter(formatter)
+ self._logger.addHandler(fh) # add the handlers to the logger
+
+ self._is_parallel = self._size > 1
+ self._micro_sims_have_output = False
+
+ self._local_number_of_sims = 0
+ self._global_number_of_sims = 0
+ self._is_rank_empty = False
+
+ self._logger.info("Provided configuration file: {}".format(config_file))
+ self._config = Config(self._logger, config_file)
+
+ # Data names of data to output to the snapshot database
+ self._write_data_names = self._config.get_write_data_names()
+
+ # Data names of data to read as input parameter to the simulations
+ self._read_data_names = self._config.get_read_data_names()
+
+ self._micro_dt = self._config.get_micro_dt()
+
+ self._is_micro_solve_time_required = self._config.write_micro_solve_time()
+
+ def initialize(self):
+ """
+ Initialize micro simulations. Not implemented
+ """
+ raise NotImplementedError(
+ "Initialization of micro simulations is not implemented in base class"
+ )
+
+ def solve(self):
+ """
+ Solve micro simulations. Not implemented
+ """
+ raise NotImplementedError(
+ "Solving micro simulations is not implemented in base class"
+ )
diff --git a/micro_manager/micro_simulation.py b/micro_manager/micro_simulation.py
index 902eaba1..dad1c872 100644
--- a/micro_manager/micro_simulation.py
+++ b/micro_manager/micro_simulation.py
@@ -19,6 +19,7 @@ def create_simulation_class(micro_simulation_class):
Simulation : class
Definition of class Simulation defined in this function.
"""
+
class Simulation(micro_simulation_class):
def __init__(self, global_id):
micro_simulation_class.__init__(self, global_id)
diff --git a/micro_manager/snapshot/__init__.py b/micro_manager/snapshot/__init__.py
new file mode 100644
index 00000000..e69de29b
diff --git a/micro_manager/snapshot/dataset.py b/micro_manager/snapshot/dataset.py
new file mode 100644
index 00000000..36172e88
--- /dev/null
+++ b/micro_manager/snapshot/dataset.py
@@ -0,0 +1,230 @@
+from importlib import metadata
+import os
+from datetime import datetime
+
+import numpy as np
+
+try:
+ import h5py
+except ImportError:
+ raise ImportError(
+ "The Micro Manager snapshot computation requires the h5py package."
+ )
+
+
+class ReadWriteHDF:
+ def __init__(self, logger) -> None:
+ self._logger = logger
+ self._has_datasets = False
+
+ def create_file(self, file_path: str) -> None:
+ """
+ Create an HDF5 file for a given file name and path.
+
+ Parameters
+ ----------
+ file_path : str
+ File name added to the path to the file.
+
+ """
+ f = h5py.File(file_path, "w")
+ f.attrs["status"] = "writing"
+ f.attrs["MicroManager_version"] = str(metadata.version("micro-manager-precice"))
+ f.attrs["date"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ f.close()
+
+ def collect_output_files(
+ self, dir_name: str, file_list: list, database_length: int
+ ) -> None:
+ """
+ Iterate over a list of HDF5 files in a given directory and copy the content into a single file.
+ The files are deleted after the content is copied.
+
+ Parameters
+ ----------
+ dir_name : str
+ Path to directory containing the files.
+ file_list : list
+ List of files to be combined.
+ dataset_length : int
+ Global number of snapshots.
+ """
+ # Create a output file
+ main_file = h5py.File(os.path.join(dir_name, "snapshot_data.hdf5"), "w")
+ main_file.attrs["status"] = "writing"
+ main_file.attrs["MicroManager_version"] = str(
+ metadata.version("micro-manager-precice")
+ )
+ main_file.attrs["date"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ # Create datasets in output file
+ with h5py.File(os.path.join(dir_name, file_list[0]), "r") as parameter_file:
+ for key in parameter_file.keys():
+ if not key == "crashed_snapshots":
+ current_data = parameter_file[key][0]
+ main_file.create_dataset(
+ key,
+ shape=(database_length, *current_data.shape),
+ chunks=(1, *current_data.shape),
+ fillvalue=np.nan,
+ )
+ # Loop over files
+ crashed_snapshots = []
+ outer_position = 0
+ for file in file_list:
+ parameter_file = h5py.File(os.path.join(dir_name, file), "r")
+ # Add all data sets to the main file.
+ for key in parameter_file.keys():
+ inner_position = outer_position
+ for chunk in parameter_file[key].iter_chunks():
+ current_data = parameter_file[key][chunk]
+ # If the key is "crashed_snapshots" add the indices to the list of crashed snapshots
+ # Otherwise write the data to the main file
+ if key == "crashed_snapshots":
+ crashed_snapshots.extend(
+ inner_position + parameter_file[key][:]
+ )
+ else:
+ main_file[key][inner_position] = current_data
+ inner_position += 1
+ outer_position = inner_position
+ parameter_file.close()
+ os.remove(os.path.join(dir_name, file))
+
+ # Write the indices of crashed snapshots to the main file
+ if len(crashed_snapshots) > 0:
+ main_file.create_dataset(
+ "crashed_snapshots", data=crashed_snapshots, dtype=int
+ )
+ main_file.attrs["status"] = "finished"
+ main_file.close()
+
+ def write_output_to_hdf(
+ self,
+ file_path: str,
+ macro_data: dict,
+ micro_data: dict | None,
+ idx: int,
+ length: int,
+ ) -> None:
+ """
+ Write the output of a micro simulation to a HDF5 file.
+
+ Parameters
+ ----------
+ file_path : str
+ Path to file in which the data should be written.
+ macro_data : dict
+ Dict of macro simulation input.
+ micro_data : dict | None
+ Dict of micro simulation output. If None, only the macro data is written.
+ idx: int
+ Local index of the current snapshot.
+ length : int
+ Local number of snapshots.
+ """
+ parameter_file = h5py.File(file_path, "a")
+ if micro_data is None:
+ input_data = macro_data
+ else:
+ input_data = macro_data | micro_data
+
+ # If the datasets are not created yet, create them
+ if not self._has_datasets:
+ for key in input_data.keys():
+ current_data = np.asarray(input_data[key])
+ parameter_file.create_dataset(
+ key,
+ shape=(length, *current_data.shape),
+ chunks=(1, *current_data.shape),
+ fillvalue=np.nan,
+ )
+ self._has_datasets = True
+
+ # Iterate over macro and micro data sets and write current simulation data to the file
+ for key in input_data.keys():
+ current_data = np.asarray(input_data[key])
+ parameter_file[key][idx] = current_data
+
+ parameter_file.close()
+
+ def read_hdf(self, file_path: str, data_names: dict, start: int, end: int) -> list:
+ """
+ Read data from an HDF5 file and return it as a list of dictionaries.
+
+ Parameters
+ ----------
+ file_path : str
+ Path of file to read data from.
+ data_names : dict
+ Names of parameters to read from the file.
+ start: int
+ Index of the first snapshot to read on process.
+ end: int
+ Index of the last snapshot to read on process.
+
+ Returns
+ -------
+ output: list
+ List of dicts where the keys are the names of the parameters and the values the corresponding data.
+ """
+
+ parameter_file = h5py.File(file_path, "r")
+ parameter_data = dict()
+ output = []
+ # Read data by iterating over the relevant datasets
+ for key in data_names.keys():
+ parameter_data[key] = np.asarray(parameter_file[key][start:end])
+ my_key = (
+ key # Save one key to be able to iterate over the length of the data
+ )
+ # Iterate over len of data. In each iteration write data from all macro data sets
+ # to a dictionary and append it to the output list of dicts.
+ for i in range(len(parameter_data[my_key])):
+ current_data = dict()
+ for key in data_names.keys():
+ current_data[key] = parameter_data[key][i]
+ output.append(current_data)
+ return output
+
+ def get_parameter_space_size(self, file_path: str) -> int:
+ """
+ Get the length of the parameter space from the HDF5 file.
+
+ Parameters
+ ----------
+ file_path : str
+ Path of file to read data from.
+
+ Returns
+ -------
+ int
+ Size of Parameter Space
+ """
+ with h5py.File(file_path, "r") as file:
+ return file[list(file.keys())[0]].len()
+
+ def write_crashed_snapshots(self, file_path: str, crashed_input: list):
+ """
+ Write indices of crashed snapshots to the HDF5 database.
+
+ Parameters
+ ----------
+ file_path : str
+ Path of file to read data from.
+ crashed_indices : list
+ list of indices of crashed simulations.
+ """
+ with h5py.File(file_path, "a") as file:
+ file.create_dataset("crashed_snapshots", data=crashed_input, dtype=int)
+
+ def set_status(self, file_path: str, status: str):
+ """
+ Set the status of the file to "finished" to indicate that it is no longer accessed.
+
+ Parameters
+ ----------
+ file_path : str
+ Path of file to read data from.
+ """
+ with h5py.File(file_path, "a") as file:
+ file.attrs["status"] = status
diff --git a/micro_manager/snapshot/snapshot.py b/micro_manager/snapshot/snapshot.py
new file mode 100644
index 00000000..3e2be751
--- /dev/null
+++ b/micro_manager/snapshot/snapshot.py
@@ -0,0 +1,282 @@
+#!/usr/bin/env python3
+"""
+Snapshot Computation is a tool within the Micro Manager to initialize micro simulations and create a snapshot database of theirs outputs by running them with a set of prescribed input parameters.
+This files the class SnapshotComputation which has the following callable public methods:
+
+- solve
+- initialize
+
+Detailed documentation: https://precice.org/tooling-micro-manager-overview.html
+"""
+
+import importlib
+import os
+import sys
+import time
+
+import numpy as np
+
+from micro_manager.micro_manager import MicroManager
+from .dataset import ReadWriteHDF
+from micro_manager.micro_simulation import create_simulation_class
+
+sys.path.append(os.getcwd())
+
+
+class MicroManagerSnapshot(MicroManager):
+ def __init__(self, config_file: str) -> None:
+ """
+ Constructor.
+
+ Parameters
+ ----------
+ config_file : string
+ Name of the JSON configuration file (provided by the user).
+ """
+ super().__init__(config_file)
+ self._config.read_json_snapshot()
+
+ # Path to the parameter file containing input parameters for micro simulations
+ self._parameter_file = self._config.get_parameter_file_name()
+ # Get name of pos-processing script
+ self._post_processing_file_name = self._config.get_postprocessing_file_name()
+
+ # Check if simulation object can be re-used.
+ self._initialize_once = self._config.create_single_sim_object()
+ # Collect crashed indices
+ self._crashed_snapshots = [] # Declaration
+
+ # **************
+ # Public methods
+ # **************
+
+ def solve(self) -> None:
+ """
+ Solve the problem by iterating over a set macro parameters.
+ - Create micro simulation object.
+ - Post-process micro output.
+ - Write output to database.
+ - Merge output in parallel run.
+ """
+
+ # Loop over all macro parameters
+ for elems in range(self._local_number_of_sims):
+ # initialize micro simulation
+ if elems == 0:
+ self._micro_sims = create_simulation_class(self._micro_problem)(
+ self._global_ids_of_local_sims[0]
+ )
+ else:
+ if not self._initialize_once:
+ self._micro_sims = create_simulation_class(self._micro_problem)(
+ self._global_ids_of_local_sims[elems]
+ )
+
+ micro_sims_input = self._macro_parameters[elems]
+ # Solve micro simulation
+ micro_sims_output = self._solve_micro_simulation(micro_sims_input)
+
+ # Write output to file
+ if micro_sims_output is not None:
+ # Post-processing
+ if self._post_processing_file_name is not None:
+ # Attempt importing post-processing script
+ try:
+ post_processing = getattr(
+ importlib.import_module(
+ self._post_processing_file_name, "Postprocessing"
+ ),
+ "Postprocessing",
+ )
+ if hasattr(post_processing, "postprocessing") and callable(
+ getattr(post_processing, "postprocessing")
+ ):
+ micro_sims_output = post_processing.postprocessing(
+ micro_sims_output
+ )
+ else:
+ self._logger.info(
+ "No post-processing script with the provided path found. Skipping post-processing."
+ )
+ self._post_processing_file_name = None
+ except Exception:
+ self._logger.info(
+ "No post-processing script with the provided path found. Skipping post-processing."
+ )
+ self._post_processing_file_name = None
+ self._data_storage.write_output_to_hdf(
+ self._output_file_path,
+ micro_sims_input,
+ micro_sims_output,
+ elems,
+ len(self._macro_parameters),
+ )
+ # Log error and write macro data to database if simulation has crashed
+ else:
+ self._logger.info("Skipping snapshot storage for crashed simulation.")
+ self._data_storage.write_output_to_hdf(
+ self._output_file_path,
+ micro_sims_input,
+ None,
+ elems,
+ len(self._macro_parameters),
+ )
+
+ self._crashed_snapshots.append(elems)
+
+ # Write positions of crashed snapshots in database to database
+ if len(self._crashed_snapshots) > 0:
+ self._data_storage.write_crashed_snapshots(
+ self._output_file_path, self._crashed_snapshots
+ )
+ self._data_storage.set_status(self._output_file_path, "none")
+
+ # Merge output files
+ if self._is_parallel:
+ self._logger.info(
+ "Snapshots have been computed and stored. Merging output files"
+ )
+ self._data_storage.set_status(self._output_file_path, "reading/deleting")
+ list_of_output_files = self._comm.gather(self._file_name, 0)
+ if self._rank == 0:
+ self._data_storage.collect_output_files(
+ self._output_subdirectory,
+ list_of_output_files,
+ self._parameter_space_size,
+ )
+ self._logger.info("Snapshot computation completed.")
+
+ def initialize(self) -> None:
+ """
+ Initialize the Snapshot Computation by performing the following tasks:
+ - Distribute the parameter data equally if the snapshot creation is executed in parallel.
+ - Read macro parameter from parameter file.
+ - Create output subdirectory and file paths to store output.
+ - Import micro simulation.
+ """
+
+ # Create subdirectory to store output files in
+ directory = os.path.dirname(self._parameter_file)
+ self._output_subdirectory = os.path.join(directory, "output")
+ os.makedirs(self._output_subdirectory, exist_ok=True)
+
+ # Create object responsible for reading parameters and writing simulation output
+ self._data_storage = ReadWriteHDF(self._logger)
+
+ self._parameter_space_size = self._data_storage.get_parameter_space_size(
+ self._parameter_file
+ )
+ # Read macro parameters from the parameter file
+ # Decompose parameters if the snapshot creation is executed in parallel
+ if self._is_parallel:
+ equal_partition = int(self._parameter_space_size / self._size)
+ rest = self._parameter_space_size % self._size
+ if self._rank < rest:
+ start = self._rank * (equal_partition + 1)
+ end = start + equal_partition + 1
+ else:
+ start = self._rank * equal_partition + rest
+ end = start + equal_partition
+ self._macro_parameters = self._data_storage.read_hdf(
+ self._parameter_file, self._read_data_names, start, end
+ )
+ else:
+ self._macro_parameters = self._data_storage.read_hdf(
+ self._parameter_file,
+ self._read_data_names,
+ 0,
+ self._parameter_space_size,
+ )
+
+ # Create database file to store output from a rank in
+ if self._is_parallel:
+ self._file_name = "snapshot_data_{}.hdf5".format(self._rank)
+ else:
+ self._file_name = "snapshot_data.hdf5"
+ self._output_file_path = os.path.join(
+ self._output_subdirectory, self._file_name
+ )
+ self._data_storage.create_file(self._output_file_path)
+ self._logger.info("Output file created: {}".format(self._output_file_path))
+ self._local_number_of_sims = len(self._macro_parameters)
+ self._logger.info(
+ "Number of local micro simulations = {}".format(self._local_number_of_sims)
+ )
+
+ if self._local_number_of_sims == 0:
+ if self._is_parallel:
+ self._logger.info(
+ "Rank {} has no micro simulations and hence will not do any computation.".format(
+ self._rank
+ )
+ )
+ self._is_rank_empty = True
+ else:
+ raise Exception("Snapshot has no micro simulations.")
+
+ nms_all_ranks = np.zeros(self._size, dtype=np.int64)
+ # Gather number of micro simulations that each rank has, because this rank needs to know how many micro
+ # simulations have been created by previous ranks, so that it can set
+ # the correct global IDs
+ self._comm.Allgatherv(np.array(self._local_number_of_sims), nms_all_ranks)
+
+ # Get global number of micro simulations
+ self._global_number_of_sims = np.sum(nms_all_ranks)
+
+ # Create lists of local and global IDs
+ sim_id = np.sum(nms_all_ranks[: self._rank])
+ self._global_ids_of_local_sims = [] # DECLARATION
+ for i in range(self._local_number_of_sims):
+ self._global_ids_of_local_sims.append(sim_id)
+ sim_id += 1
+ self._micro_problem = getattr(
+ importlib.import_module(
+ self._config.get_micro_file_name(), "MicroSimulation"
+ ),
+ "MicroSimulation",
+ )
+
+ self._micro_sims_have_output = False
+ if hasattr(self._micro_problem, "output") and callable(
+ getattr(self._micro_problem, "output")
+ ):
+ self._micro_sims_have_output = True
+
+ # ***************
+ # Private methods
+ # ***************
+
+ def _solve_micro_simulation(self, micro_sims_input: dict) -> dict | None:
+ """
+ Solve a single micro simulation.
+
+ Parameters
+ ----------
+ micro_sims_input : dict
+ Keys are names of data and the values are the data which are required inputs to
+ solve a micro simulation.
+
+ Returns
+ -------
+ micro_sims_output : dict | None
+ Dict in which keys are names of data and the values are the data of the output of the micro
+ simulations. The return type is None if the simulation has crashed.
+ """
+ try:
+ start_time = time.time()
+ micro_sims_output = self._micro_sims.solve(micro_sims_input, self._micro_dt)
+ end_time = time.time()
+
+ if self._is_micro_solve_time_required:
+ micro_sims_output["micro_sim_time"] = end_time - start_time
+
+ return micro_sims_output
+ # Handle simulation crash
+ except Exception as e:
+ self._logger.error(
+ "Micro simulation with input {} has crashed. See next entry on this rank for error message".format(
+ micro_sims_input
+ )
+ )
+ self._logger.error(e)
+ return None
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 00000000..cee97d2a
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,47 @@
+[build-system]
+requires = ["setuptools>=41", "wheel", "setuptools-git-versioning"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name="micro-manager-precice"
+dynamic = [ "version" ]
+dependencies = [
+ "pyprecice>=3.1", "numpy", "mpi4py"
+]
+requires-python = ">=3.8"
+authors = [
+ { name = "The preCICE Developers", email="info@precice.org"}
+]
+maintainers = [
+ { name = "Ishaan Desai", email="ishaan.desai@ipvs.uni-stuttgart.de"}
+]
+description="A tool which facilitates two-scale macro-micro coupled simulations using preCICE."
+readme = "README.md"
+license={ text = "GPLv3" }
+keywords = [ "preCICE", "multiscale", "coupling" ]
+classifiers=[
+"Development Status :: 4 - Beta",
+"License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
+"Programming Language :: Python :: 3 :: Only",
+"Programming Language :: Python :: 3.8",
+"Topic :: Scientific/Engineering",
+]
+
+[project.optional-dependencies]
+snapshot = ["h5py"]
+sklearn = ["scikit-learn"]
+
+[project.urls]
+Homepage = "https://precice.org"
+Documentation = "https://precice.org/tooling-micro-manager-overview.html"
+Repository = "https://github.com/precice/micro-manager"
+"Bug Tracker" = "https://github.com/precice/micro-manager/issues"
+
+[project.scripts]
+micro-manager-precice = "micro_manager:main"
+
+[tool.setuptools]
+packages=["micro_manager", "micro_manager.adaptivity", "micro_manager.snapshot"]
+
+[tool.setuptools-git-versioning]
+enabled = true
diff --git a/setup.py b/setup.py
index 9d2f7032..60684932 100644
--- a/setup.py
+++ b/setup.py
@@ -1,33 +1,3 @@
-import os
-from setuptools import setup, find_packages
+from setuptools import setup
-# from https://stackoverflow.com/a/9079062
-import sys
-if sys.version_info[0] < 3:
- raise Exception("micromanager only supports Python3. Did you run $python setup.py