LaMEM
Documentation for LaMEM.
diff --git a/dev/.documenter-siteinfo.json b/dev/.documenter-siteinfo.json new file mode 100644 index 00000000..6e061f29 --- /dev/null +++ b/dev/.documenter-siteinfo.json @@ -0,0 +1 @@ +{"documenter":{"julia_version":"1.6.7","generation_timestamp":"2024-08-26T14:54:07","documenter_version":"1.6.0"}} \ No newline at end of file diff --git a/dev/assets/documenter.js b/dev/assets/documenter.js new file mode 100644 index 00000000..82252a11 --- /dev/null +++ b/dev/assets/documenter.js @@ -0,0 +1,1064 @@ +// Generated by Documenter.jl +requirejs.config({ + paths: { + 'highlight-julia': 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.8.0/languages/julia.min', + 'headroom': 'https://cdnjs.cloudflare.com/ajax/libs/headroom/0.12.0/headroom.min', + 'jqueryui': 'https://cdnjs.cloudflare.com/ajax/libs/jqueryui/1.13.2/jquery-ui.min', + 'katex-auto-render': 'https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.16.8/contrib/auto-render.min', + 'jquery': 'https://cdnjs.cloudflare.com/ajax/libs/jquery/3.7.0/jquery.min', + 'headroom-jquery': 'https://cdnjs.cloudflare.com/ajax/libs/headroom/0.12.0/jQuery.headroom.min', + 'katex': 'https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.16.8/katex.min', + 'highlight': 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.8.0/highlight.min', + 'highlight-julia-repl': 'https://cdnjs.cloudflare.com/ajax/libs/highlight.js/11.8.0/languages/julia-repl.min', + }, + shim: { + "highlight-julia": { + "deps": [ + "highlight" + ] + }, + "katex-auto-render": { + "deps": [ + "katex" + ] + }, + "headroom-jquery": { + "deps": [ + "jquery", + "headroom" + ] + }, + "highlight-julia-repl": { + "deps": [ + "highlight" + ] + } +} +}); +//////////////////////////////////////////////////////////////////////////////// +require(['jquery', 'katex', 'katex-auto-render'], function($, katex, renderMathInElement) { +$(document).ready(function() { + renderMathInElement( + document.body, + { + "delimiters": [ + { + "left": "$", + "right": "$", + "display": false + }, + { + "left": "$$", + "right": "$$", + "display": true + }, + { + "left": "\\[", + "right": "\\]", + "display": true + } + ] +} + + ); +}) + +}) +//////////////////////////////////////////////////////////////////////////////// +require(['jquery', 'highlight', 'highlight-julia', 'highlight-julia-repl'], function($) { +$(document).ready(function() { + hljs.highlightAll(); +}) + +}) +//////////////////////////////////////////////////////////////////////////////// +require(['jquery'], function($) { + +let timer = 0; +var isExpanded = true; + +$(document).on( + "click", + ".docstring .docstring-article-toggle-button", + function () { + let articleToggleTitle = "Expand docstring"; + const parent = $(this).parent(); + + debounce(() => { + if (parent.siblings("section").is(":visible")) { + parent + .find("a.docstring-article-toggle-button") + .removeClass("fa-chevron-down") + .addClass("fa-chevron-right"); + } else { + parent + .find("a.docstring-article-toggle-button") + .removeClass("fa-chevron-right") + .addClass("fa-chevron-down"); + + articleToggleTitle = "Collapse docstring"; + } + + parent + .children(".docstring-article-toggle-button") + .prop("title", articleToggleTitle); + parent.siblings("section").slideToggle(); + }); + } +); + +$(document).on("click", ".docs-article-toggle-button", function (event) { + let articleToggleTitle = "Expand docstring"; + let navArticleToggleTitle = "Expand all docstrings"; + let animationSpeed = event.noToggleAnimation ? 0 : 400; + + debounce(() => { + if (isExpanded) { + $(this).removeClass("fa-chevron-up").addClass("fa-chevron-down"); + $("a.docstring-article-toggle-button") + .removeClass("fa-chevron-down") + .addClass("fa-chevron-right"); + + isExpanded = false; + + $(".docstring section").slideUp(animationSpeed); + } else { + $(this).removeClass("fa-chevron-down").addClass("fa-chevron-up"); + $("a.docstring-article-toggle-button") + .removeClass("fa-chevron-right") + .addClass("fa-chevron-down"); + + isExpanded = true; + articleToggleTitle = "Collapse docstring"; + navArticleToggleTitle = "Collapse all docstrings"; + + $(".docstring section").slideDown(animationSpeed); + } + + $(this).prop("title", navArticleToggleTitle); + $(".docstring-article-toggle-button").prop("title", articleToggleTitle); + }); +}); + +function debounce(callback, timeout = 300) { + if (Date.now() - timer > timeout) { + callback(); + } + + clearTimeout(timer); + + timer = Date.now(); +} + +}) +//////////////////////////////////////////////////////////////////////////////// +require([], function() { +function addCopyButtonCallbacks() { + for (const el of document.getElementsByTagName("pre")) { + const button = document.createElement("button"); + button.classList.add("copy-button", "fa-solid", "fa-copy"); + button.setAttribute("aria-label", "Copy this code block"); + button.setAttribute("title", "Copy"); + + el.appendChild(button); + + const success = function () { + button.classList.add("success", "fa-check"); + button.classList.remove("fa-copy"); + }; + + const failure = function () { + button.classList.add("error", "fa-xmark"); + button.classList.remove("fa-copy"); + }; + + button.addEventListener("click", function () { + copyToClipboard(el.innerText).then(success, failure); + + setTimeout(function () { + button.classList.add("fa-copy"); + button.classList.remove("success", "fa-check", "fa-xmark"); + }, 5000); + }); + } +} + +function copyToClipboard(text) { + // clipboard API is only available in secure contexts + if (window.navigator && window.navigator.clipboard) { + return window.navigator.clipboard.writeText(text); + } else { + return new Promise(function (resolve, reject) { + try { + const el = document.createElement("textarea"); + el.textContent = text; + el.style.position = "fixed"; + el.style.opacity = 0; + document.body.appendChild(el); + el.select(); + document.execCommand("copy"); + + resolve(); + } catch (err) { + reject(err); + } finally { + document.body.removeChild(el); + } + }); + } +} + +if (document.readyState === "loading") { + document.addEventListener("DOMContentLoaded", addCopyButtonCallbacks); +} else { + addCopyButtonCallbacks(); +} + +}) +//////////////////////////////////////////////////////////////////////////////// +require(['jquery', 'headroom', 'headroom-jquery'], function($, Headroom) { + +// Manages the top navigation bar (hides it when the user starts scrolling down on the +// mobile). +window.Headroom = Headroom; // work around buggy module loading? +$(document).ready(function () { + $("#documenter .docs-navbar").headroom({ + tolerance: { up: 10, down: 10 }, + }); +}); + +}) +//////////////////////////////////////////////////////////////////////////////// +require(['jquery'], function($) { + +$(document).ready(function () { + let meta = $("div[data-docstringscollapsed]").data(); + + if (meta?.docstringscollapsed) { + $("#documenter-article-toggle-button").trigger({ + type: "click", + noToggleAnimation: true, + }); + } +}); + +}) +//////////////////////////////////////////////////////////////////////////////// +require(['jquery'], function($) { + +/* +To get an in-depth about the thought process you can refer: https://hetarth02.hashnode.dev/series/gsoc + +PSEUDOCODE: + +Searching happens automatically as the user types or adjusts the selected filters. +To preserve responsiveness, as much as possible of the slow parts of the search are done +in a web worker. Searching and result generation are done in the worker, and filtering and +DOM updates are done in the main thread. The filters are in the main thread as they should +be very quick to apply. This lets filters be changed without re-searching with minisearch +(which is possible even if filtering is on the worker thread) and also lets filters be +changed _while_ the worker is searching and without message passing (neither of which are +possible if filtering is on the worker thread) + +SEARCH WORKER: + +Import minisearch + +Build index + +On message from main thread + run search + find the first 200 unique results from each category, and compute their divs for display + note that this is necessary and sufficient information for the main thread to find the + first 200 unique results from any given filter set + post results to main thread + +MAIN: + +Launch worker + +Declare nonconstant globals (worker_is_running, last_search_text, unfiltered_results) + +On text update + if worker is not running, launch_search() + +launch_search + set worker_is_running to true, set last_search_text to the search text + post the search query to worker + +on message from worker + if last_search_text is not the same as the text in the search field, + the latest search result is not reflective of the latest search query, so update again + launch_search() + otherwise + set worker_is_running to false + + regardless, display the new search results to the user + save the unfiltered_results as a global + update_search() + +on filter click + adjust the filter selection + update_search() + +update_search + apply search filters by looping through the unfiltered_results and finding the first 200 + unique results that match the filters + + Update the DOM +*/ + +/////// SEARCH WORKER /////// + +function worker_function(documenterSearchIndex, documenterBaseURL, filters) { + importScripts( + "https://cdn.jsdelivr.net/npm/minisearch@6.1.0/dist/umd/index.min.js" + ); + + let data = documenterSearchIndex.map((x, key) => { + x["id"] = key; // minisearch requires a unique for each object + return x; + }); + + // list below is the lunr 2.1.3 list minus the intersect with names(Base) + // (all, any, get, in, is, only, which) and (do, else, for, let, where, while, with) + // ideally we'd just filter the original list but it's not available as a variable + const stopWords = new Set([ + "a", + "able", + "about", + "across", + "after", + "almost", + "also", + "am", + "among", + "an", + "and", + "are", + "as", + "at", + "be", + "because", + "been", + "but", + "by", + "can", + "cannot", + "could", + "dear", + "did", + "does", + "either", + "ever", + "every", + "from", + "got", + "had", + "has", + "have", + "he", + "her", + "hers", + "him", + "his", + "how", + "however", + "i", + "if", + "into", + "it", + "its", + "just", + "least", + "like", + "likely", + "may", + "me", + "might", + "most", + "must", + "my", + "neither", + "no", + "nor", + "not", + "of", + "off", + "often", + "on", + "or", + "other", + "our", + "own", + "rather", + "said", + "say", + "says", + "she", + "should", + "since", + "so", + "some", + "than", + "that", + "the", + "their", + "them", + "then", + "there", + "these", + "they", + "this", + "tis", + "to", + "too", + "twas", + "us", + "wants", + "was", + "we", + "were", + "what", + "when", + "who", + "whom", + "why", + "will", + "would", + "yet", + "you", + "your", + ]); + + let index = new MiniSearch({ + fields: ["title", "text"], // fields to index for full-text search + storeFields: ["location", "title", "text", "category", "page"], // fields to return with results + processTerm: (term) => { + let word = stopWords.has(term) ? null : term; + if (word) { + // custom trimmer that doesn't strip @ and !, which are used in julia macro and function names + word = word + .replace(/^[^a-zA-Z0-9@!]+/, "") + .replace(/[^a-zA-Z0-9@!]+$/, ""); + + word = word.toLowerCase(); + } + + return word ?? null; + }, + // add . as a separator, because otherwise "title": "Documenter.Anchors.add!", would not + // find anything if searching for "add!", only for the entire qualification + tokenize: (string) => string.split(/[\s\-\.]+/), + // options which will be applied during the search + searchOptions: { + prefix: true, + boost: { title: 100 }, + fuzzy: 2, + }, + }); + + index.addAll(data); + + /** + * Used to map characters to HTML entities. + * Refer: https://github.com/lodash/lodash/blob/main/src/escape.ts + */ + const htmlEscapes = { + "&": "&", + "<": "<", + ">": ">", + '"': """, + "'": "'", + }; + + /** + * Used to match HTML entities and HTML characters. + * Refer: https://github.com/lodash/lodash/blob/main/src/escape.ts + */ + const reUnescapedHtml = /[&<>"']/g; + const reHasUnescapedHtml = RegExp(reUnescapedHtml.source); + + /** + * Escape function from lodash + * Refer: https://github.com/lodash/lodash/blob/main/src/escape.ts + */ + function escape(string) { + return string && reHasUnescapedHtml.test(string) + ? string.replace(reUnescapedHtml, (chr) => htmlEscapes[chr]) + : string || ""; + } + + /** + * RegX escape function from MDN + * Refer: https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping + */ + function escapeRegExp(string) { + return string.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string + } + + /** + * Make the result component given a minisearch result data object and the value + * of the search input as queryString. To view the result object structure, refer: + * https://lucaong.github.io/minisearch/modules/_minisearch_.html#searchresult + * + * @param {object} result + * @param {string} querystring + * @returns string + */ + function make_search_result(result, querystring) { + let search_divider = `
`; + let display_link = + result.location.slice(Math.max(0), Math.min(50, result.location.length)) + + (result.location.length > 30 ? "..." : ""); // To cut-off the link because it messes with the overflow of the whole div + + if (result.page !== "") { + display_link += ` (${result.page})`; + } + searchstring = escapeRegExp(querystring); + let textindex = new RegExp(`${searchstring}`, "i").exec(result.text); + let text = + textindex !== null + ? result.text.slice( + Math.max(textindex.index - 100, 0), + Math.min( + textindex.index + querystring.length + 100, + result.text.length + ) + ) + : ""; // cut-off text before and after from the match + + text = text.length ? escape(text) : ""; + + let display_result = text.length + ? "..." + + text.replace( + new RegExp(`${escape(searchstring)}`, "i"), // For first occurrence + '$&' + ) + + "..." + : ""; // highlights the match + + let in_code = false; + if (!["page", "section"].includes(result.category.toLowerCase())) { + in_code = true; + } + + // We encode the full url to escape some special characters which can lead to broken links + let result_div = ` + ++ ${display_result} +
+Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
One way to generate an input geometry is to use the build-in geometrical objects in LaMEM. These geometries are used if the option
msetup = geom
is specified in the input file.
A number of geometric primitives exists within LaMEM, and can be combined, with the ones further down in the input scripts overwriting earlier ones. This allows you, for example, to first set a halfspace cooling temperature for the lithosphere and afterwards defined the crust, mantle lithosphere etc.
The following objects are available:
Specifying a sphere comes with the following options:
<SphereStart>
+ phase = 1
+ radius = 1.5
+ center = 1.0 2.0 3.0
+
+ Temperature = constant # optional: Temperature of the sphere. possibilities: [constant]
+ cstTemp = 1000 # required in case of [constant]: temperature value [in Celcius in case of GEO units]
+<SphereEnd>
Allows you to specify a horizontal layer in the model
<LayerStart>
+ phase = 1
+ top = 5.0
+ bottom = 3.0
+
+ # optional: sinusoidal perturbations
+ cosine = 0 # optional: add a cosine perturbation on top of the interface (if 1)
+ wavelength = 1 # required if cosine: wavelength in x-direction
+ amplitude = 0.1 # required if cosine: amplitude of perturbation
+
+ # optional: temperature structure
+ Temperature = halfspace # optional: Temperature structure. possibilities: [constant, linear, halfspace]
+ cstTemp = 1000 # required in case of [constant]: temperature value [in Celcius in case of GEO units]
+ topTemp = 0 # required in case of [linear,halfspace]: temperature @ top [in Celcius in case of GEO units]
+ botTemp = 1300 # required in case of [linear,halfspace]: temperature @ bottom [in Celcius in case of GEO units]
+ thermalAge = 70 # required in case of [halfspace]: thermal age of lithosphere [in Myrs if GEO units are used]
+<LayerEnd>
Allows you to define a square box:
<BoxStart>
+ phase = 1 # required; phase of box
+ bounds = 0 1 0 1 0 1 # required: left right front back bottom top
+
+ Temperature = linear # optional: Temperature structure. possibilities: [constant, linear, halfspace]
+ cstTemp = 1000 # required in case of [constant]: temperature value [in Celcius in case of GEO units]
+ topTemp = 0 # required in case of [linear,halfspace]: temperature @ top [in Celcius in case of GEO units]
+ botTemp = 1300 # required in case of [linear,halfspace]: temperature @ bottom [in Celcius in case of GEO units]
+ thermalAge = 70 # required in case of [halfspace]: thermal age of lithosphere [in Myrs if GEO units are used]
+<BoxEnd>
Allows you to define a hexahedral object (which requires you to specify 8 edge points), in the following order:
<HexStart>
+ phase = 1 # required; phase of box
+
+ # required: coordinates of the 8 edge points
+ coord = 0.75 0.75 0.75 0.9 0.75 0.75 0.9 0.9 0.75 0.75 0.9 0.75 0.75 0.75 0.9 0.9 0.75 0.9 0.9 0.9 0.9 0.75 0.9 0.9
+<HexEnd>
Allows you to insert a cylinder-like object:
<CylinderStart>
+ phase = 1 # required; phase of box
+ radius = 0.3 # required: radius of cylinder
+ bottom = 0.1 # required: z-coordinate of bottom of the layer
+ base = 0.1 0.1 0.1 # required: (x,y,z)-coordinate of point at base of cylinder
+ cap = 0.1 0.1 0.8 # required: (x,y,z)-coordinate of point at cap of cylinder
+
+ Temperature = constant # optional: Temperature of the sphere. possibilities: [constant]
+ cstTemp = 1000 # required in case of [constant]: temperature value [in Celcius in case of GEO units]
+<CylinderEnd>
<EllipsoidStart>
+ phase = 1
+ axes = 2.0 1.5 1.0 # semi-axes of ellipsoid in x, y and z
+ center = 1.0 2.0 3.0
+
+ Temperature = constant # optional: Temperature of the sphere. possibilities: [constant]
+ cstTemp = 1000 # required in case of [constant]: temperature value [in Celcius in case of GEO units]
+<EllipsoidEnd>
One of the advantages of this way of creating a LaMEM input file is that it sets the input geometry in the same file as all other options. An additional advantage is that you don't have to recreate the input geometry of the model if you change the resolution or the number of particles/cell.
One of the disadvantages is that it is sometimes more tedious to create the geometry, in particular if you have complicated setups in which you employ data from other sources (say Moho map).
For these reasons, additional methods exists to generate input geometries:
Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, sex characteristics, gender identity and expression, level of experience, education, socio-economic status, nationality, personal appearance, race, religion, or sexual identity and orientation.
Examples of behavior that contributes to creating a positive environment include:
Examples of unacceptable behavior by participants include:
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team: LaMEM
All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see https://www.contributor-covenant.org/faq
Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
[TOC]
If you are only a user of LaMEM, any text-editor is sufficient to change the *.dat LaMEM input files. Yet, if you are planning to do some more serious code development, it is quite helpful to install a more professional debugging environment. There is quite some discussion within the development team, what the best environment is to use. Boris and Tobias really like Microsoft Visual Studio Code, whereas Anton swears with Eclipse. The advantage of MVSC is that it is easy to setup and get running, but does not allow parallel debugging. Eclipse, on the other hand, does allow for parallel debugging but can be more tricky to get up and running with PETSc.
Microsoft Visual Studio Code is a recent and open-source development by Microsoft which has quickly become the number one development environment among professional programmers. It runs on Linux, Mac and windows and provides a very simple way to get debugging to work with PETSc. It also allows you to do development on a remote system almost as if it is on your local machine, which is pretty cool.
Here I will assume that PETSc, MPICH and LaMEM are installed in
$ /opt/mpich3/include
+$ /opt/petsc/petsc-3.18.6-deb/include/
+$ /local/home/boris/LaMEM/
which you obviously have to update for your system.
If you want to do remote debugging on a linux machine, you should first install the Remote SSH plugin and login to the remote server (this obviously requires you to have ssh login data to that machine). On the remote machine go to the /LaMEM/src
directory.
The first thing to do is to add a file called c_cpp_properties.json
inside the (hidden) directory $/local/home/boris/LaMEM/.vscode/$ with the following content:
{
+ "configurations": [
+ {
+ "name": "MacBook arm64",
+ "includePath": [
+ "${workspaceFolder}/**",
+ "/opt/homebrew/include",
+ "/opt/petsc/petsc-3.18.6-deb/include/"
+ ]
+ }
+ ],
+ "version": 4
+}
This will tell the code where PETSc, and will give you info about all the PETSc routines within LaMEM if you hover over a PETSc command such as $CHKERRQ$ with your cursor.
If you want to debugging as well, you need to make sure that you have a working debugger installed (for example the GNU debugger gdb, or on the newer arm64 apple systems the lldb debugger). On an arm64 apple system you would need to install the C/C++
extension as well as the CodeLLDB
extension.
Once that is the case, you need to create a file called $launch.json$ in the same hidden directory $/local/home/boris/LaMEM/.vscode/$:
{
+ // Use IntelliSense to learn about possible attributes.
+ // Hover to view descriptions of existing attributes.
+ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "name": "(lldb) Launch",
+ "type": "lldb",
+ "request": "launch",
+ "program": "/Users/kausb/WORK/LaMEM/LaMEM/bin/deb/LaMEM",
+ "args": ["-ParamFile","FallingBlock_IterativeSolver.dat","-nstep_max","2"],
+ "stopOnEntry": false,
+ "cwd": "/Users/kausb/WORK/LaMEM/LaMEM/input_models/BuildInSetups/",
+ "env": {"PETSC_DEB": "/opt/petsc/petsc-3.18.6-deb",
+ "PATH": "/opt/homebrew/bin:${env:PATH}"},
+ "preLaunchTask": "C/C++: build LaMEM deb file",
+ },
+ ]
+}
For this to work, we also need to create a task that runs $make mode=deb all$ in $/LaMEM/src$ before we start the debugger, to rebuild the code. For this, you need to create a file tasks.json
in .vscode
:
{
+ "tasks": [
+ {
+ "type": "cppbuild",
+ "label": "C/C++: build LaMEM deb file",
+ "command": "make",
+ "args": [
+ "mode=deb","all"
+ ],
+ "options": {
+ "cwd": "/Users/kausb/WORK/LaMEM/LaMEM/src",
+ "env": {"PETSC_DEB": "/opt/petsc/petsc-3.18.6-deb/",
+ "PATH": "/opt/homebrew/bin:${env:PATH}"}
+ },
+ "problemMatcher": [
+ "$gcc"
+ ],
+ "group": {
+ "kind": "build",
+ "isDefault": true
+ },
+ "detail": "Task generated by Debugger."
+ },
+ ],
+ "version": "2.0.0"
+}
After this, you should be able to run LaMEM with the debugger, and even step inside PETSc routines.
The Eclipse developing environment does allow you to parallel debug. We used to use this a lot, but please be aware that it is a bit tricky to set up. If you want more info about it, please have a look at LaMEM/doc
.
Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
This is a worked-out example of how to create a 2D subduction setup with temperature dependent rheology.
The example input file can be found under:
/input_models/SubductionWithParticles/Subduction2D_FreeSurface_MATLABParticles_Nonlinear_DirectSolver.dat
whereas the MATLAB/Octave file is here:
/input_models/SubductionWithParticles/CreateMarkers_Subduction_Tdependent_FreeSurface_parallel.m
We start with having a look at the input file. With Visual Studio Code this can be done with
$ code Subduction2D_FreeSurface_MATLABParticles_Nonlinear_DirectSolver.dat
***Units*** At the beginning of the script, we specify the units
#===============================================================================
+# Scaling
+#===============================================================================
+
+ units = geo # geological units
+
+ unit_temperature = 1000
+ unit_length = 1e3
+ unit_viscosity = 1e20
+ unit_stress = 1e9
Internally, LaMEM computes everything in non-dimensional units in order to preent round-off (numerical) errors. In order to transfer things from dimensional to non-dimensional units you need to specify these values. LaMEM has 3 modes for units
units = none
+units = SI
+units = geo
as you can imagine, the first implies that all input is in non-dimensional units, the second that all is given in SI units. The third, and the one that is most typically used, employs geodynamically sensible units, such as temperature in celcius, length scales in kilometers, times in million years, etc.
The units that you define here are in SI-units, which implies that the typical lengthscale is 1000 meters, and the typical stress is $10^9$ Pa.
***Model domain*** The model domain and numerical reoslution are specified here:
#===============================================================================
+# Grid & discretization parameters
+#===============================================================================
+
+# Number of cells for all segments
+ nel_x = 256
+ nel_y = 2
+ nel_z = 64
+
+# Coordinates of all segments (including start and end points)
+
+ coord_x = -1500 1500
+ coord_y = -10 10
+ coord_z = -660 40
Note that LaMEM is a fully 3D code; there is no pure-2D part. Yet you can nevertheless do 2D simulations by selecting only 1 or 2 cells in the y-direction as done above. If you select multigrid as a solver, you need to have at least 2 cells here. Note that for 2D, the 'thin' direction should always be the y-direction.
The remaining part of this block defines the number of elements in each direction and the left/right, front/back and bottom/top coordinates (in km, as we use geo units).
Also note that you can always specify parameters on the command-line, that will overrule whatever is written in the input file. For example:
../../bin/LaMEM -ParamFile Subduction2D_FreeSurface_MATLABParticles_Nonlinear_DirectSolver.dat -nel_x 64 -nel_z 16
will perform a simulation with 64 by 2 by 16 elements instead.
Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
Here, we list a number of worked-out examples that will teach you various aspects of using LaMEM. Note that many of the examples with build-in geometry have already been discussed under Getting Started.
Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
Here we will discuss various aspects of using LaMEM.
[5.1 Boundary conditions]
Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
[TOC]
In many cases, generating a model setup from within the LaMEM input file using build-in geometries is insufficient and we would like to have more control on what we are doing & create more complicated setups.
One way to do this is to use a MATLAB/Octave script to generate the model setup. We have provided example files in /input_models/SubductionWithParticles/
, as well as in some of the test directories.
Depending on whether your LaMEM simulation will run in parallel or not there are two different approaches:
Create the full setup on one processor. For this, the matlab scripts should be placed in the same directory as the LaMEM (*.dat)
input script.
By creating a parallel partitioning file
which allows creating a parallel input file. With this method, every processor of a parallel LaMEM simulation only reads in the piece of the input geometry it needs. This method is scalable to very high resolutions, but requires you to run LaMEM first to create the partitioning file.
If you start with a new setup/project, we recommend that you first generate the input file on one processor, and look at it using Paraview (approach 1). Once you are happy with it, you can proceed with approach 2.
Let's have a look at how these two approaches work, using the input files in /input_files/SubductionWithParticles
as an example.
The files in this directory create a setup for a subduction model with free slip and no temperature (first case) or with temperature-dependent properties (second example).
The file CreateMarkers_Subduction_Linear_FreeSlip_parallel.m
gives an example of how we can generate an input file on one processor.
At the beginning we need to set the directories such that MATLAB/Octave knowns where the matlab scripts are that it needs:
addpath('../../matlab')
This may have to be changed if you are in a different directory.
To generate the setup on one processor, please make sure that the following flag is set to zero:
LaMEM_Parallel_output = 0;
You also need to specify the LaMEM input file with:
LaMEM_input_file = 'Subduction2D_FreeSlip_MATLABParticles_Linear_DirectSolver.dat';
Once this is done, it reads the LaMEM input file to figure out the size of the computational domain, the number of elements employed in every direction as well as the number of particles in every direction. It also generates a 3D grid, which is all done with this function:
[Grid,X,Y,Z,npart_x,npart_y,npart_z,W,L,H] = LaMEM_ParseInputFile(LaMEM_input_file);
You can use the X,Y,Z
grid later to create your input geometry. Within the MATLAB script, the following 3D arrays are created:
Phase : contains the phase or rock-type at every point in the grid
+Temp : contains the temperature in degrees Celcius @ every point in the grid
After reading the dimensions of the grid, we use various ways to create the setup.
In this specific example, we actually generate a polygon and use the command inpolygon
to figure out which of the 3D points are located within the polygon to specify where the slab is. At the end of the file, we save the markers to disk with
FDSTAGSaveMarkersParallelMatlab(A,Parallel_partition);
which will put the marker files in the directory /markers
. Within the LaMEM input file, you indicate that this directory should be used with
msetup = files
+mark_load_file = ./markers/mdb # marker input file (extension is .xxxxxxxx.dat)
Moreover, we also create a 3D output file VTK_ModelSetup_paraview_binary.vtr
that we can open with Paraview:
FDSTAGWriteMatlab2VTK(A,'BINARY'); % default option
Doing that is useful as it allows you to build the setup without having to run the LaMEM model.
Once you are happy, you can run LaMEM on one processor with
../../bin/opt/LaMEM -ParamFile Subduction2D_FreeSlip_Particles_Linear_DirectSolver.dat
Interested in how to do the same on >1 processor? Keep on reading...
Generating parallel input files involves 3 steps:
LaMEM_Parallel_output = 1
in the MATLAV script.In the following we will describe the 3 steps with an example. We will assume that you are happy with generating the setup on one processor (see above). The matlab example file we will be using is
/input_models/SubductionWithParticles/CreateMarkers_Subduction_Tdependent_FreeSurface_parallel.m
Make sure that you are in the correct directory:
$ cd /input_models/SubductionWithParticles
+$ ls
+CreateMarkers_Subduction_Linear_FreeSlip_parallel.m Subduction2D_FreeSurface_MATLABParticles_Nonlinear_DirectSolver.dat
+CreateMarkers_Subduction_Tdependent_FreeSurface_parallel.m Subduction3D_FreeSlip_MATLABParticles_Linear_Multigrid.dat
+Makefile Tests
+README p_poly_dist.m
+Subduction2D_FreeSlip_Particles_Linear_DirectSolver.dat
We will use the file Subduction2DFreeSurfaceMATLABParticlesNonlinearDirectSolver.dat as LaMEM file.
First, run this file with LaMEM, but with the added command-line parameter -mode save_grid
$ mpiexec -n 4 ../../bin/opt/LaMEM -ParamFile Subduction2D_FreeSurface_MATLABParticles_Nonlinear_DirectSolver.dat -mode save_grid
This will generate a processor partitioning file as follows:
$ mpiexec -n 4 ../../bin/opt/LaMEM -ParamFile Subduction2D_FreeSurface_MATLABParticles_Nonlinear_DirectSolver.dat -mode save_grid
+--------------------------------------------------------------------------
+ Lithosphere and Mantle Evolution Model
+ Compiled: Date: Oct 25 2020 - Time: 14:02:58
+--------------------------------------------------------------------------
+ STAGGERED-GRID FINITE DIFFERENCE CANONICAL IMPLEMENTATION
+--------------------------------------------------------------------------
+Parsing input file : Subduction2D_FreeSurface_MATLABParticles_Nonlinear_DirectSolver.dat
+ Adding PETSc option: -snes_ksp_ew
+ Adding PETSc option: -js_ksp_monitor
+Finished parsing input file : Subduction2D_FreeSurface_MATLABParticles_Nonlinear_DirectSolver.dat
+--------------------------------------------------------------------------
+Scaling parameters:
+ Temperature : 1000. [C/K]
+ Length : 1000. [m]
+ Viscosity : 1e+20 [Pa*s]
+ Stress : 1e+09 [Pa]
+--------------------------------------------------------------------------
+Grid parameters:
+ Total number of cpu : 4
+ Processor grid [nx, ny, nz] : [4, 1, 1]
+ Fine grid cells [nx, ny, nz] : [256, 2, 64]
+ Number of cells : 32768
+ Number of faces : 115328
+ Maximum cell aspect ratio : 1.17188
+ Lower coordinate bounds [bx, by, bz] : [-1500., -10., -660.]
+ Upper coordinate bounds [ex, ey, ez] : [1500., 10., 40.]
+--------------------------------------------------------------------------
+Saving processor partitioning ... done (0.000728846 sec)
+--------------------------------------------------------------------------
If you look again at the content of the directory, you will see that a new file is generated:
$ ls
+CreateMarkers_Subduction_Linear_FreeSlip_parallel.m Subduction2D_FreeSlip_Particles_Linear_DirectSolver.dat
+CreateMarkers_Subduction_Tdependent_FreeSurface_parallel.m Subduction2D_FreeSurface_MATLABParticles_Nonlinear_DirectSolver.dat
+Makefile Subduction3D_FreeSlip_MATLABParticles_Linear_Multigrid.dat
+ProcessorPartitioning_4cpu_4.1.1.bin Tests
+README p_poly_dist.m
ProcessorPartitioning_4cpu_4.1.1.bin
is the processor partitioning file, which we need to indicate in the matlab/octave script. If you generate the file on a different number of processors, or use different box-sizes, the naming of the partitioning file will be different. Note that on linux, you can always get info about the date when a file was added with:
$ ls -al Proc*
+-rw-r--r-- 1 kausb admin 2668 Oct 26 15:56 ProcessorPartitioning_4cpu_4.1.1.bin
This will allow you to see which partitioning file was added most recently.
Next, open the file CreateMarkers_Subduction_Tdependent_FreeSurface_parallel.m
and make sure that the following parameters are activated/set:
LaMEM_Parallel_output = 1;
+LaMEM_input_file = 'Subduction2D_FreeSurface_MATLABParticles_Nonlinear_DirectSolver.dat';
+Parallel_partition = 'ProcessorPartitioning_4cpu_4.1.1.bin'
Once you run this file in MATLAB or Octave, it should give the following output:
>> CreateMarkers_Subduction_Tdependent_FreeSurface_parallel
+Creating setup in parallel using LaMEM file: Subduction2D_FreeSurface_MATLABParticles_Nonlinear_DirectSolver.dat
+
+Parallel_partition =
+
+ 'ProcessorPartitioning_4cpu_4.1.1.bin'
+
+Writing Appended data for binary output in Paraview ...
+Writing file -> ./markers/mdb.00000000.dat
+Writing file -> ./markers/mdb.00000001.dat
+Writing file -> ./markers/mdb.00000002.dat
+Writing file -> ./markers/mdb.00000003.dat
which shows that it created marker files for 4 processors.
If you open the file VTK_ModelSetup_paraview_binary.vtr
in Paraview, it should look like:
Which is a subduction setup as in the example before, but this time with a halfspace cooling thermal age.
Once you successfully created the marker files, you can run the LaMEM simulation in parallel with:
mpiexec -n 4 ../../bin/opt/LaMEM -ParamFile Subduction2D_FreeSurface_MATLABParticles_Nonlinear_DirectSolver.dat
Please remember that you need to re-generate the partitioning file if you:
If you only change the internal geometry or, say, the temperature of the slab, you do not need to redo the partitioning file. Simply rerunning the matlab script is ok in that case.
The MATLAB/Octave input scripts consists of several parts.
In the beginning you specify the filenames, whether the input should be in parallel or not and (if applicable) the name of the partitioning file (all in red):
In the next part, the user can do whatever to set the initial geometry. In this particular example, we create a 2D array that computes the distance of a point to the top of the slab. We also create a 2D polygon that described the slab but is set to NaN
outside.
The following part first defines the Phase
and Temp
3D arrays, and sets the phases based on the distance towards the top of the slab. We use a halfspace cooling temperature profile with a thermal age of 50 Myrs in this case.
In other scripts you will always have to define and set the properties of these two 3D arrays in one way or the other (how you do it, is up to you).
Finally, we save the data to LaMEM marker files and write a VTK output file:
This last part is the same for all scripts and should not be modified.
Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
Now that PETSc is installed and LaMEM compiled, it is time to do your first simulation. We will look at the sinking of a few high-density spheres in a lower density fluid, followed by a few examples of more complicated setups.
First, switch to the correct directory:
$ cd /input_models/BuildInSetups
after which you can run the simulation with the optimized version of LaMEM as follows:
$ mpiexec -n 1 ../../bin/opt/LaMEM -ParamFile FallingSpheres_Multigrid.dat
The model output should look like this: and finish with:
After the simulation, you will two new *.pvd
files in the directory, and two additional directories that contain info about the timesteps:
Spheres_multigrid.pvd
+Spheres_multigrid_phase.pvd
+Timestep_00000000_0.00000000e+00
+Timestep_00000001_1.10000000e+01
Running the simulation in parallel is done with:
$ mpiexec -n 4 ../../bin/opt/LaMEM -ParamFile FallingSpheres_Multigrid.dat
The LaMEM output produces VTK-files. We usually use Paraview to visualize those. Open the file in paraview and open the file Spheres_multigrid.pvd
.
The result looks like this In order to produce this picture, we used the "Slice", "Glyph" and "Contour" tools (everything you need is circled in red).
Let's change a few input parameters and have a look at the LaMEM input file. Open it FallingSpheres_Multigrid.dat
with a text editor and have a look at it.
Changing the number of timesteps is done with the parameter nstep_out
. There are two ways to run the simulation for longer
nstep_out = 19 # save output every n steps
-nstep_out 19
to the command-line and run the simulation with:$ mpiexec -n 4 ../../bin/opt/LaMEM -ParamFile FallingSpheres_Multigrid.dat -nstep_out 19
In fact, most input parameters in the LaMEM input file can be set from the command-line as well.
Note that simulations run either until the number of requires timesteps are reached, or until the computed time becomes larger than time_end
.
After the simulation finishes, you can copy the timesteps and the *.pvd
files to your local computer and reload the Spheres_multigrid.pvd
files within Paraview (File -> Reload Files). Animate all timesteps with the green play button. At the end of the simulation, it'll look like:
Hint on using Paraview: If you create a visualization in Paraview, you can safe it as a "Statefile", using File->Save State. For the next simulation, you can load this with File->Load State and choose the directory where the input files are. That save quite a bit of work and allows you to make reproducable figures.
Hint on visualizating LaMEM simulations: You don't have to wait until a LaMEM simulation is finished. The *.pvd
files are continuously updated, so make sure to copy over these files as well as all timestep directories and hit "Reload".
Next, let's change the viscosity of the spheres. For this, scroll towards the end of the input file until the following section:
#===============================================================================
+# Material phase parameters
+#===============================================================================
+
+ # Define properties of matrix
+ <MaterialStart>
+ ID = 0 # phase id
+ rho = 1 # density
+ eta = 1 # viscosity
+ <MaterialEnd>
+
+ # Define properties of spheres
+ <MaterialStart>
+ ID = 1 # phase id
+ rho = 2 # density
+ eta = 1000 # viscosity
+ <MaterialEnd>
In this simulation, the matrix has Phase ID=0 and the spheres have ID=1. Obviously, the matrix has viscosity and density both equal to 1 and the spheres have a higher viscosity (1000) and higher density (2). You can change the viscosity of the spheres by changing the according numbers in the input file and rerunning the simulation.
Note: This particular simulation is performed in non-dimensional units, as units = none
in the parameter-file. LaMEM can also use units=si
or units=geo
, where the last one is most convenient if performing geodynamic simualtions (as length scales are in km, and timescales in Myrs). Later in the tutorial you will learn more about this.
If you are a new user of LaMEM, the best way for you to get familiar with the code is to do a few exercises. The ones below will also give you a bit of info about LaMEM along the way. All exercises employ the build-in geometry options of LaMEM (implying that the input model geometry is constructed with geometrical objects that are specified in the input file). All input files we will discuss are in the directory /input_files/BuildInSetups
.
Run the Falling Block direct test from the build in setups (/BuildInSetups/FallingBlock_DirectSolver.dat
) and inspect the input file with a text editor to get a feeling for the parameters that are specified there.
This example uses a socalled direct solver, which the recommended method for 2D setups as it is more robust in cases with large viscosity contrasts. Yet, it is too slow for large 3D simulations, which will require "multigrid" solvers (that unfortunately don't work always very well with large viscosity contrasts and require somewhat more tuning and expertise). PETSc itself does not have build-in parallel direct solvers, but you can install PETSc with SUPERLU_DIST and MUMPS which are two packages that. LaMEM automatically uses those if they are available.
Once the simulation is finished, it should give a setup that is fairly similar to the falling spheres discussed above, but with a square block.
Change the input file (and save it under a new name, to not confuse GIT later) to include the following features:
Try to reproduce this; the result should look like this:
Note that in this visualiation, the blocks look a bit rounded as the resolution of the simulation was quite low.
The direct solver, we used sofar, is fine for low resolutions but not for higher ones. For those, you will want to use a multigrid solver instead. In LaMEM, this can be done by specifying solver = multigrid
in the input file. Run the Falling block multigrid test by running /BuildInSetups/FallingBlock_Multigrid.dat
.
The section in the input file that deals with this is:
#===============================================================================
+# Solver options
+#===============================================================================
+ SolverType = multigrid # solver [direct or multigrid]
+ MGLevels = 4 # number of MG levels [default=3]
+ MGSweeps = 10 # number of MG smoothening steps per level [default=10]
+ MGSmoother = chebyshev # type of smoothener used [chebyshev or jacobi]
+ MGCoarseSolver = mumps # coarse grid solver [direct/mumps/superlu_dist or redundant - more options specifiable through the command-line options -crs_ksp_type & -crs_pc_type]
And LaMEM will print the following statements
Preconditioner parameters:
+ Matrix type : monolithic
+ Preconditioner type : coupled Galerkin geometric multigrid
+ Global coarse grid [nx,ny,nz] : [4, 4, 4]
+ Local coarse grid [nx,ny,nz] : [4, 2, 2]
+ Number of multigrid levels : 4
What this states is that we use 4 levels of multgrid. Our finest resolution is given by the nel_x, nel_y, nel_z
at the beginning of the file (32,32,32 in this example). The next coarser grid will be (16,16,16), followed by (8,8,8) and the coarsest resolution is (4,4,4). Multigrid solves the governing equations at these different grids, and uses a direct solver (e.g., MUMPS) on the coarsest grid. Unfortunately, it is slightly more tricky to set up and use, and you will have to experiment a bit with the number of smoothening steps used at every level and the number of multigid levels that you employ.
What is important in typical geodynamic simulations is that the coarse grid should be able to "feel" the viscosity structure, so having an extremely coarse grid doesn't work all that well. If your coarse grid is too large, on the other hand, the coarse grid solution will start dominating the computational time, which is also not what you want. Experimenting with this is thus important for real setups.
Hint: You can add the command-line option -log_view
to get a detailed overview of your simulation, and the time spend on each of the levels. This will be shown at the end of the simulatiom. If you wish, you can only run the simulation for a few timesteps by adding the command-line option -nstep_max 5
.
The previous exercises were all performed for a non-dimensional setup. Yet, in most geoscience applications it is useful to have your input in units of kilometers, degrees Celcius, stresses in MPa, etc. For this reason, LaMEM has the geo
input units. Let's do a subduction simulation to have a look at this, using a 2D example. As the current version of LaMEM 3D-only, we simulate 2D cases by having 2 elements in the y-direction.
For this, run the dimensional subduction test setup that uses build-in geometries which is called /BuildInSetups/Subduction2D_FreeSlip_Direct.dat
. This simulation is a simple viscous subduction setup, with a free slip upper boundary and a plastic crust (such that the plate detaches from the top boundary). The simulation will take a bit longer than some of the previous simulations, but look approximately like this:
The file Subduction2D_FreeSurface_DirectSolver.dat
is an example of a 2D subduction model with a free surface. Note that in that case, also a paraview file is created that shows the internal free surface (open the file Subduction2D_FreeSurface_direct_surf.pvd
to see that). The result should look like:
Note that we used "threshold" in paraview to remove the air layer from the simulation.
As a next step, we perform a simulation of a 2D rift that forms an asymmetric fault zone/core complex. The setup consists of a lower crust, an upper crust and a sticky air layer to simulate the free surface (represented by an internal free surface).
In this case we have a slightly more complicated setup and use a multigrid solver in a 2D setting. In order to run this example use the file /BuildInSetups/Rifting2D_MultigridSolver.dat
.
New compared to previous cases is that we:
Employ elasticity (by specifying an elastic shear module in the input file)
Strain soften the friction angle
Depending on the viscosity of the lower crust, you can either get a symmetric or an asymmetric rift. The default simulation will look like this after 25 timesteps:
For this simulation, it is handy to have a somewhat larger computer that you can use. The input script is called Subduction3D_DoubleSubduction_FreeSlip_Multigrid.dat
Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
LaMEM (Lithosphere and Mantle Evolution Model) is a software package to simulate 2D/3D geological and geomechanical processes, which runs on anything from your laptop to a massively parallel machine. It takes (poro)-visco-elasto-plastic rheologies into account, and can be used to simulate anything from the collision of tectonic plates to the flow of fluids through porous rocks.
The purpose of this wiki is to get you started with installing and running LaMEM and give a few worked-out examples that explain how to run LaMEM on your local machine or cluster.
LaMEM contains a number of features, specifically tailored to simulate geological processes and complex geometries:
2D/3D parallel thermomechanical code for cartesian geometries
Build from the onset to run on MPI-parallel machines; the largest we tested had 458'752 processors
Support for both direct solvers and multigrid solvers
Marker and cell approach to simulate complex geometries and large strains
Newton solvers for nonlinear iterations
Multiple ways to create model geometries: (1) Build-in geometrical objects, (2) MATLAB/Octave input files, (3) GeomIO support to create 2D/3D input geometries from vector graphics, (4) Voxel-based input (to compute effective permeabilities of porous rocks).
Mechanical solver for visco-elasto-plastic solvers, for both (thermo)-elastic bulk compressible and incompressible cases
Nonlinear combined rock creep laws and (regularized) non-associated plasticity
Internal free surface and sticky air approach
Energy solver with shear heating
Fluid pressure and Darcy solver for groundwater flow
Phase transitions, by taking (multiple) precomputed phase diagrams into account
Partial melting
Simplified erosion/sedimentation algorithms
Breakpointing/restarting options
Adjoint formulations to perform inversions and derive scaling laws
We recommend that your start with reading the installation instructions.
The userguide consists of Markdown pages which is compiled into webpages using the julia Documenter.jl package. The pages are listed in the
/docs
directory of this repository. You can extend it by adding new pages to the repository, which can be added to the side menu by modifying make.jl
. It will be automatically compiled when you push
Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
There are several ways in which you can define the initial geometry of your model setup:
The build-in geometries and Matlab/Octave files allow you to specify the initial temperature structure. In addition, you can also independently set a temperarature profile with:
In some cases you might be interested to start with a different temperature profile, such as a steady-state temperature profile for your setup. See:
By default, the internal free surface is set to a constant value. In some cases that is insufficient and you can also set the initial topography with a file:
Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
There are several ways in which you can define the initial geometry of your model setup. Click on the blue titles to get more extended info:
3.1 Build-in geometries This is for simpler geometries and can directly be set from the LaMEM *.dat
file. The advantage is that it will work for any resolution, so if this is possible in your case go for it!
3.2 Julia based setup More complicated setups can be constructed in julia by using the GeophysicalModelGenerator package. This is the recommended way to do this.
3.3 Matlab/Octave geometries Previously, we used matlab scripts to create model setups. We keep this here for historical reasons (and because the geomIO workflow has not yet been fully ported to julia).
The build-in geometries and Matlab/Octave files allows you to specify the initial temperature setup as well. In addition, you can also, independently, set a temperarature profile with:
3.5 Temperature input file The initial thermal structure can be set in julia or the matlab scripts (as described above) but also seperately if you want, as described here.
3.6 Temperature diffusion In some cases you might be interested to start with a different temperature profile, such as a steady-state temperature profile for your setup as described here.
3.7 Topography input file By default, the internal free surface is set to a constant elevation. Instead, you can also set the initial topography with a file, as explained here.
Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
The absolute simplest way to get LaMEM working on your system is to download pre-compiled libraries which are available for over 100 architectures and systems, including essentially all systems in use at the moment. You can install it through the julia package manager, and you can run it either through julia or through the terminal (after setting the correct path):
julia> ]
+pkg> add LaMEM
+pkg> test LaMEM
More details are given here. This will work fine on your local machine or server (including in parallel). Yet, if you are planning to use LaMEM on large parallel HPC clusters you (or your system administrator) may still need to compile PETSc.
LaMEM is build in top of PETSc, which provides great support for parallel solvers and grid infrastructure. Different than other codes used in geodynamics, LaMEM does not require an excessive amount of additional packages, except for the ones that can be downloaded through the PETSc installation system.
This installation guide was created based on initial input from Giovanni Mainetti and Andrea Bistacchi (University of Milano Bicocca), with input from the Mainz team (Andrea Picollo, Boris Kaus).
The LaMEM development team uses different approaches internally, and over years many aspects of installing PETSc and using LaMEM become easier. Yet if you ask us now (october 2020), what we recommend when you are a new user it would be the following:
We have tested LaMEM on Linux, Mac and Windows 10. The development team uses Mac and Linux, so these machines are best supported. As Windows 10 now has a (still experimental) bash shell (called WLS), you can install PETSc within this shell by following the Linux installation instructions.
The most complicated step in getting LaMEM running is to install the correct version of PETSc, on your laptop or cluster. Below we give more specific info if you want to do it yourself on Mac or Linux. Yet, an alternative and newer method to install PETSc and all required compilers on a new (linux/mac) machine or even on a complicated cluster is spack. It installs everything required and consistently with the same compilers in a separate directory and works quite well in our experience (including installing additional packages). A spack tutorial can be found here.
Brief instructions: You can install spack in your home directory with:
$ git clone https://github.com/spack/spack.git ~/spack
+$ cd ~/spack
And add environmental variables:
$ . share/spack/setup-env.sh
Find the compilers you have
$ spack compilers
Get info about the PETSc package that you can install:
$ spack info petsc
Install PETSc with the correct packages, and leave out stuff we don't need. The optimized compilation of PETSc is installed with
$ spack install petsc@3.18.6 +mumps +suite-sparse -hypre -hdf5 -shared -debug
If that works out, you'll have to update your environmental variables and create the $PETSC_OPT$ variable
$ . share/spack/setup-env.sh
+$ export PETSC_OPT=$(spack location -i petsc)
You would have to redo the same for a debug version of PETSc to hve the full compilation up and running.
The alterative method is to install PETSc yourself. That is a bit more effort, but also enables you to install packages (like Pastix), that are not available in the spack distribution. Below we have installation instructions for Mac and Linux. On Windows, uses the WSL and follow the linux instructions.
On Mac, you will need to ensure that Xcode and the Xcode command line tools are installed. It is also a good idea to install compilers and MPI using a package manager. One possibility is to install MacPorts, and install compilers with:
$ sudo port install gcc7
and make it the default compilers
$ sudo port select --set gcc gcc7
Next, install the MPI library MPICH with:
$ sudo port install mpich-gcc7-fortran
and make it the default with
$ sudo port select --set mpi mpich-gcc7-fortran
You may easily have several other compilers installed on your Mac. For a correct installation of PETSc, you will need to ensure that all compilers point to the correct version (in the example above, gcc7 installed from MacPorts). Test this with
$ mpif90 --version
+$ mpicc --version
These instructions have been tested with Ubuntu. Make sure that your packages are installed correctly and are up to date, with:
$ sudo apt update
If needed, update all outdated packages with
$ sudo apt dist-upgrade
Make sure that both python 3.0 and python 2.7 is installed, for example by typing
$ which python
If it is not installed, change that which can be done on Linux with:
sudo apt install python2.7 python-pip
or on Mac, for example, by installing the anaconda package. Alternatively, you can install the Macports package manager and install it with
sudo port install python27
PETSc will need fortran and C compilers. Which fortran compiler you use is not all that important, so you are welcome to use gcc and gfortran. Once you are a more experienced LaMEM user and do production runs, you might want to try different options to see if this speeds up the simulations. In addition to the compilers, it is a good idea to install git and cmake as well.
On linux this can be done with
$ sudo apt update
+$ sudo apt install gfortran gcc git cmake
and on Mac, using macports
$ sudo port selfupdate
+$ sudo port install gfortran gcc git cmake
The most important package for LaMEM is PETSc. If you just want to give LaMEM a try, the most basic installation is sufficient. Once you do production runs, it is worthwhile to experiment a bit with more optimized solver options. Installing PETSc with those does not always work, but PETSc has a very responsive user list which is searchable, and where you can post your questions if needed. As PETSc regularly changes its syntac, LaMEM is always only compatible with a particular version of PETSc. This is typically updated once per year.
The current version of LaMEM is compatible with PETSc 3.18.6
You can download the PETSc version you need here. Do that and unzip it with
$ tar -xvf petsc-3.18.6.tar.gz
Change to the PETSc directory from the command window, for example with:
$ cd ~/Software/PETSc/petsc-3.18.6
and specify the PETSC environmental variable:
$ export PETSC_DIR=$PWD
The simplest installation of PETSc can be configured as follows (assuming you are in the PETSc directory). This will automatically download and install the MPI library as well, together with a few other packages we will use.
$ ./config/configure.py --prefix=/opt/petsc/petsc-3.18.6-opt --download-mpich=1 --download-superlu_dist=1 --download-mumps=1 --download-scalapack=1 --download-fblaslapack=1 --with-debugging=0 --FOPTFLAGS=-O3 --CXXOPTFLAGS=-O3 --COPTFLAGS=-O3 --with-shared-libraries=0 --download-cmake
This will install an optimized (fast) version of PETSc on your system in the directory opt/petsc/petsc-3.18.6-opt
. You can change this directory, obviously, but in that case please remember where you put it as we need it later.
If you want to have more control over PETSc and use the MPI version that you installed earlier on your system, using the package manager (see above), you can install it as:
$ ./config/configure.py --prefix=/opt/petsc/petsc-3.18.6-opt --download-superlu_dist=1 --doCleanup=1 --download-mumps=1 --download-suitesparse=1 --download-scalapack=1 --download-fblaslapack=1 --FOPTFLAGS=-O3 --CXXOPTFLAGS=-O3 --COPTFLAGS=-O3 --with-shared-libraries=0 --download-cmake --with-debugging=0 --with-mpi-include=/opt/local/include/mpich-gcc7/ --with-cc=/opt/local/bin/mpicc --with-cxx=/opt/local/bin/mpicxx --with-fc=/opt/local/bin/mpif90 --with-mpi-lib=/opt/local/lib/mpich-gcc7/libmpi.a
Note that the above lines assume that mpi is installed under the directory /opt/local/bin/
. You can check that this is the case for you as well by typing
$ which mpiexec
which should give you the dirtectory /opt/local/bin/mpiexec
. If it gives you a different directory, you will have to use that directory in the PETSc configuration listed above. Both methods discussed above will install the parallel direct solvers MUMPS and SUPERLU_DIST. LaMEM will also work without these parallel solvers, but we find them particularly useful for 2D simulations and as coarse grid solvers.
After the configuration step has finished succesfully (which will take some time), it should look something like
Next, make PETSc with:
$ make PETSC_DIR=/Users/kausb/Software/PETSC/petsc-3.18.6 PETSC_ARCH=arch-darwin-c-opt all
After that, you will be asked to install PETSc
sudo make PETSC_DIR=/Users/kausb/Software/PETSC/petsc-3.18.6 PETSC_ARCH=arch-darwin-c-opt install
and test whether the installation works with
$ make PETSC_DIR=/opt/petsc/petsc-3.18.6-opt PETSC_ARCH="" check
This will run a few test cases and if all is well, will tell you so.
If you only run simulations with LaMEM, the optimized version of PETSc described above will be sufficient. Yet, if you also develop routines and have to do debugging, it is a good idea to also install the debug version:
$ ./config/configure.py --prefix=/opt/petsc/petsc-3.18.6-deb --download-superlu_dist=1 --doCleanup=1 --download-mumps=1 --download-suitesparse=1 --download-scalapack=1 --download-fblaslapack=1 --FOPTFLAGS="-O0 -g" --CXXOPTFLAGS="-O0 -g" --COPTFLAGS="-O0 -g" --with-shared-libraries=0 --download-cmake --with-debugging=1 --with-mpi-include=/opt/local/include/mpich-gcc7/ --with-cc=/opt/local/bin/mpicc --with-cxx=/opt/local/bin/mpicxx --with-fc=/opt/local/bin/mpif90 --with-mpi-lib=/opt/local/lib/mpich-gcc7/libmpi.a
Compared to before, we have three changes, namely:
--prefix=/opt/petsc/petsc-3.18.6-deb
--with-debugging=1
--FOPTFLAGS="-O0 -g" --CXXOPTFLAGS="-O0 -g" --COPTFLAGS="-O0 -g"
With this you can repeat the procedure above. Just for completion, the simple configute option of above in debug mode would thus be:
$ ./config/configure.py --prefix=/opt/petsc/petsc-3.18.6-deb --download-mpich=1 --download-superlu_dist=1 --download-mumps=1 --download-scalapack=1 --download-fblaslapack=1 --download-cmake --with-debugging=1 --FOPTFLAGS="-O0 -g" --CXXOPTFLAGS="-O0 -g" --COPTFLAGS="-O0 -g" --with-shared-libraries=0
Chances exists that you want to install PETSc on a cluster. The main point to take into account is that you need to link it against the appropriate MPI compilers.
If you are lucky, a previous version of PETSc exists already on the cluster and you want to reinstall it in your home directory while adding some new packages such as SUPERLU_DIST or MUMPS. In that case, there is simple trick to find out the exact options that were used to compile PETSc on the cluster:
ex1
in the PETSc directory under /src/ksp/ksp/examples/tutorials
-log_view
Configure options: --prefix=/cluster/easybuild/broadwell/software/numlib/PETSc/3.18.6-intel-2018.02-downloaded-deps --with-mkl_pardiso=1 --with-mkl_pardiso-dir=/cluster/easybuild/broadwell/software/numlib/imkl/2018.2.199-iimpi-2018.02-GCC-6.3.0/mkl --with-hdf5=1 --with-hdf5-dir=/cluster/easybuild/broadwell/software/data/HDF5/1.8.20-intel-2018.02 --with-large-io=1 --with-c++-support=1 --with-debugging=0 --download-hypre=1 --download-triangle=1 --download-ptscotch=1 --download-pastix=1 --download-ml=1 --download-superlu=1 --download-metis=1 --download-superlu_dist=1 --download-prometheus=1 --download-mumps=1 --download-parmetis=1 --download-suitesparse=1 --download-hypre-shared=0 --download-metis-shared=0 --download-ml-shared=0 --download-mumps-shared=0 --download-parmetis-shared=0 --download-pastix-shared=0 --download-prometheus-shared=0 --download-ptscotch-shared=0 --download-suitesparse-shared=0 --download-superlu-shared=0 --download-superlu_dist-shared=0 --with-cc=mpiicc --with-cxx=mpiicpc --with-c++-support --with-fc=mpiifort --CFLAGS="-O3 -xCORE-AVX2 -ftz -fp-speculation=safe -fp-model source -fPIC" --CXXFLAGS="-O3 -xCORE-AVX2 -ftz -fp-speculation=safe -fp-model source -fPIC" --FFLAGS="-O2 -xCORE-AVX2 -ftz -fp-speculation=safe -fp-model source -fPIC" --with-gnu-compilers=0 --with-mpi=1 --with-build-step-np=4 --with-shared-libraries=1 --with-debugging=0 --with-pic=1 --with-x=0 --with-windows-graphics=0 --with-fftw=1 --with-fftw-include=/cluster/easybuild/broadwell/software/numlib/imkl/2018.2.199-iimpi-2018.02-GCC-6.3.0/mkl/include/fftw --with-fftw-lib="[/cluster/easybuild/broadwell/software/numlib/imkl/2018.2.199-iimpi-2018.02-GCC-6.3.0/mkl/lib/intel64/libfftw3xc_intel_pic.a,libfftw3x_cdft_lp64_pic.a,libmkl_cdft_core.a,libmkl_blacs_intelmpi_lp64.a,libmkl_intel_lp64.a,libmkl_sequential.a,libmkl_core.a]" --with-scalapack=1 --with-scalapack-include=/cluster/easybuild/broadwell/software/numlib/imkl/2018.2.199-iimpi-2018.02-GCC-6.3.0/mkl/include --with-scalapack-lib="[/cluster/easybuild/broadwell/software/numlib/imkl/2018.2.199-iimpi-2018.02-GCC-6.3.0/mkl/lib/intel64/libmkl_scalapack_lp64.a,libmkl_blacs_intelmpi_lp64.a,libmkl_intel_lp64.a,libmkl_sequential.a,libmkl_core.a]" --with-blaslapack-lib="[/cluster/easybuild/broadwell/software/numlib/imkl/2018.2.199-iimpi-2018.02-GCC-6.3.0/mkl/lib/intel64/libmkl_intel_lp64.a,libmkl_sequential.a,libmkl_core.a]" --with-hdf5=1 --with-hdf5-dir=/cluster/easybuild/broadwell/software/data/HDF5/1.8.20-intel-2018.02
Once you successfully installed the correct version of PETSc, installing LaMEM should be straightforward. You can download the latest version of LaMEM with
git clone https://bitbucket.org/bkaus/lamem.git ./LaMEM
Next you need to specify the environmental variables PETSC_OPT
and PETSC_DEB
:
export PETSC_OPT=/opt/petsc/petsc-3.18.6-opt
+export PETSC_DEB=/opt/petsc/petsc-3.18.6-deb
Note that this may need to be adapted, depending on the machine you use. You may also want to specify this in your .bashrc
files.
Next you can install an optimized version of LaMEM by going to the /src
directory in the LaMEM directory, and typing:
make mode=opt all
At the end of the installation, it should look like:
Similarly, you can install a debug version of LaMEM with
make mode=deb all
The binaries are located in:
/LaMEM/bin/opt/LaMEM
+/LaMEM/bin/deb/LaMEM
You have succesfully installed LaMEM and should try if everything works correctly by running the tests:
$cd ../tests
+$make test
The first time you do this, it will download the python package pyTestHarness, by Dave May and Patrick Sanan, which we use for testing. If it fails to download it autimatically, you may have to download it manually.
Next, we start the python script runLaMEM_tests.py
which runs the test-suite. The summary at the end should only show passed tests.
If this works, we are ready to run a first simulation. Navigate to the following directory:
$cd ../input_models/BuildInSetups
The *.dat
files in that directory (list them with typing ls
on the command-line) are standard LaMEM input files. To start a simulation the only thing to do is to call the code:
$mpiexec -n 1 ../../bin/opt/LaMEM -ParamFile FallingSpheres_Multigrid.dat
which should looke like:
The output of LaMEM is in VTK format, which can be read and visualized with any software that can handle this filetype. For us, the our choice of code is Paraview, which is very well maintained package that runs on all systems, and even allows you to do parallel rendering. We usually simply download the binaries from the webpage. If you want to render on a large-scale cluster instead, we recommend that you buy your system administrator a beer.
After opening, paraview looks like this:
You can open a LaMEM simulation by opening the *.pvd files in the directory from where you started the simulation. Hitting the "play" button will show you an animation of all available timesteps.
Having the correct graphics card is important to have Paraview run efficiently and create output. Paraview will also run without graphics card, but not quite as smoothly. These instructions apply to workstations with Nvidia graphic cards. To check if your Nvidia driver is updated, type:
$ ubuntu-drivers devices
This results in a list of devices where you should see your Nvidia graphic card and drivers, e.g.:
$ == /sys/devices/pci0000:00/0000:00:05.0/0000:02:00.0 ==
+$ modalias : pci:v000010DEd00001180sv00001043sd0000842Ebc03sc00i00
+$ vendor : NVIDIA Corporation
+$ model : GK104 [GeForce GTX 680]
+$ driver : nvidia-340 - distro non-free
+$ driver : nvidia-driver-396 - third-party free recommended
+$ driver : nvidia-driver-390 - third-party free
+$ driver : nvidia-304 - third-party free
+$ driver : xserver-xorg-video-nouveau - distro free builtin
To install the recommended driver for your card, type:
$ sudo ubuntu-drivers autoinstall
Alternatively to install a specific driver (e.g. nvidia-340):
$ sudo apt install nvidia-340
Then reboot to use the new driver.
Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
[TOC]
In general, we try to follow much of the guidelines that PETSc has in place, even when we are somewhat less strict on some topics. Please have a look here, for more detailed info.
The master
branch contains all features and bug fixes that are believed to be stable. New feature branches should start from master
. Note that we do not have LaMEM release versions yet, but will introduce them once we find time to come up with a more decent documentation of LaMEM.
Most external users of LaMEM do not have writing access rights to LaMEM, to prevent mistakes from happening. Yet you can still contribute code in a rather straightforward manner, by using forking. An overall description of what forking does is given here. Below, we give specific instructions.
What forking does is create a copy of LaMEM within your own bitbucket account on which you can do your own work, create branches etc. (or also give other access if you wish). Once you are ready to push a local branch back to LaMEM master, you can create a pull request.
In order to fork, please follow the following steps:
https://bitbucket.org/bkaus/lamem/src/master/
+
on the left side of the webpage and select Fork this repository
. It will ask you to give it a Project Name (you could use LaMEM_Project
, for example). You can make this repo private or public.Clone
and copy the clone command. Next go to your terminal and type thisgit clone https://<username>@bitbucket.org/username/lamem.git ./LaMEM
where username
should be your Bitbucket username (done automatically if you copy it from the wen interface).cd ./LaMEM
git remote add upstream https://bitbucket.org/bkaus/lamem/src/master/
git pull upstream master
If you want to introduce a new feature to the code, you should always create a new branch for that.
The workflow is as follows:
Make sure you start from your local master by going to your local directory and typing
git checkout master
Alternatively, you can also push a button in te GUI (which is what we tend to do). Many of us use SourceTree which is provided by bitbucket.
Download the main changes of LaMEM into your own copy of the code
git pull upstream master
Create and switch to a new feature branch:
git checkout -b <loginname>/<goal>-<short-description>
here goal
should be either bugfix
or feature
to clarify whether it is to fix a bug or to add a new feature to the code.
For example, the new feature branch of Andrea on passive tracers should be called
git checkout -b andrea_piccolo/feature-passive_tracers
Use all lowercase.
Write code
Inspect changes:
git status
or use one of the GUI's to do this
Regularly commit code:
git commit -a
orgit commit file1 file2 file1
orgit add file1 file2
followed by git commit
. Modified files can be added to a commit in the same way.Push the feature branch from your local harddisk to your online bibucket account, such that others (with access) can see it: git push -u origin andrea_piccolo/feature-passive_tracers
(or equivalently, git push --set-upstream origin andrea_piccolo/feature-passive_tracers
). Note that this step will still be in your own fork of LaMEM, and not in the main version of LaMEM.s
master
back into your feature or bugfix branch. This is easiest done with SourceTree. On a regular basis you should also pull the latest updates of the main LaMEM into your forked repository (step 2 above)Once your branch is ready and you would like to push it back to the main version of LaMEM, you should create a Pull Request
, as described below.
git checkout <branchname>
, for example git checkout boris/feature-add_phase_transitions
git branch -a
git remote -v
git ls-remote
. Use git remote show origin
for a complete summary.git branch -d <branchname>
(only after merge to master is complete)git push origin :<branchname>
(mind the colon in front of the branch name)Note that LaMEM is an open source-code, and the GPL license states that changes you made to the code must be pushed back to LaMEM. We think this is fair, because we have spend a considerable amount of time developping it without having received specific funding to create an open-source community code. By pushing back your contributions to master
other users can benefit from your additions. If the additions are part of a paper that you would like to be cited, feel free to add the reference in the source code. The LaMEM development team will make sure that things in master work and that tests will keep on running. By adding appropiate tests for your features it will also work in some time from now. Our experience shows that if you don't do this, or wait too long to push changes back to master, you will find that it becomes increasingly difficult to keep your branch in line with LaMEM/master
.
LaMEM/tests
and add the test itself to runLaMEM_Tests.py
. You will have to create a python file for each new test directory, and will have to add *.expected
files. Please make sure that these tests run reasonably fast, as it will otherwise significantly slow down the full testing framework (in most cases it is sufficient to have a low resolution case for testing). Tests can also involve python based plotting or postprocessing, or even running MATLAB to create a setup, but this is not a requirement. If you do plotting and postprocessing you need to make sure that these tests will also work on machines that do not have the python plotting packages installed. You can most likely get inspiration by looking at the existing examples.make test
in the /LaMEM/tests
directory before a pull request. All tests should pass; if not ensure that. make mode=deb clean_all; make mode=deb all
LaMEM/input_models/input/lamem_input.dat
. Note that the nomenclature of the new parameters must be unique and case-independent. It is thus not allowed to call a paramater K
, since we already have thermal conductivity k
. The reason for this is that new parameters are automatically part of the adjoint inversion framework which otherwise gets confused. New parameters should also have a clearly recognizable name (so everything related to your new plume inflow boundary condition should be called something like Plume_
). Note that any new parameter in the input file can also automatically be called/overruled from the command line.-log_view
at the end. This will give you a picture such as this one The number of creations must be the same as the number of destructions. The only exception is the Viewer, as the log_view itself is also a viewer. If there is a mismatch, you likely forgot to do a Destroy somewhere. Note that it is more difficult to track down PetscMalloc
statements without corresponding PetscFree
. Doing that is important as otherwise the memory of a simulation will go up with every timestep, which ultimately makes the simulations run out of memory. Once you are ready to push back your branch to the main version of LaMEM, you should create a pull request. Creating a pull request is best done through the bitbucket web page:
branches
on the left side and select the branch.Create Pull Request
bkaus/lamem
and master
Create Pull Request
master
and your branch will be closed. The tests will ensure that the new features will keep working.Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
Microsoft Visual Studio Code is a releatively new IDE, which we found quite useful in combination with LaMEM. Here, we provide some instruction that helped us do this on a Mac. Linux instructions are likely similar (probably easier).
We assume that you download and install VSC to your machine and installed the C/C++ package as well.
First, you will have to make sure that you installed the gnu debugger (provided that your version of PETSc was installed with gcc compilers). Important is ofcourse that the versions are compatible. One possibility is to install MacPorts, and install compilers with:
$ sudo port install gdb
Once installed we have to ensure that it actually works on a mac, as the safety precautions of mac do not allow gdb to control other processes. Therefore, you need to codesign gdb by creating a certificate that allows it to hack your executable.
An explanation of how that can be done is given here:
https://sourceware.org/gdb/wiki/BuildingOnDarwin#Givinggdbpermissiontocontrolotherprocesses%7Cofficial
Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
Setups that were created in geomIO (https://bitbucket.org/geomio/geomio/src/master/) can easily be used as input geometries for LaMEM. If the options opt.getPolygons
and opt.writePolygons
are enabled in the geomIO file, a file named Polygons.bin will be created in the geomIO output directory.
% settings
+inputFile = ['Simple.EW.svg'];
+opt = geomIO_Options();
+opt.inputDir = ['./Input'];
+opt.outputDir = ['./Output'];
+opt.inputFileName = inputFile;
+opt.LaMEMinputFileName ='Simple.dat';
+opt.readLaMEM = true;
+opt.writeParaview = true;
+opt.writePolygons = true;
+opt.interp = true;
+opt.zi = [0:5:200];
+opt.getPolygons= true;
+opt.gravity.lenUnit = 'm';
+
+% Density assignment
+paths = {
+ 'Air', 0, 0
+ 'Sediments', 2500, 1
+ 'Crust', 2700, 2
+ 'Magma', 2400, 3
+ };
+opt.pathNames = {paths{:,1}};
+opt.gravity.drho = [paths{:,2}];
+opt.phase = [paths{:,3}];
+
+% Run geomIO
+[PathCoord,Volumes,opt] = run_geomIO(opt,'default');
Note that the polygon file depends on the number of elements and markers in each directions, so it is wise to label them accordingly. Also Note that the polygons in the .svg file should overlap to avoid voids in the geometry. They will later overwrite each other in the order specified in geomIO.m.
In the LaMEM input file, set:
msetup = polygons
+poly_file = ./Output/Simple_32x32x32.bin
Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
LaMEM is a parallel 3D numerical code that can be used to model various thermomechanical geodynamical processes such as mantle-lithosphere interaction for rocks that have visco-elasto-plastic rheologies. It was developed to better understand geological processes, particularly related to the dynamics of the crust and lithosphere and their interaction with the mantle. It can, however, also be used to solve geomechanical problems, includes (compressible) poroelasticity, can be used to compute gravity anomalies and has an (adjoint) inversion framework. The code uses a marker-in-cell approach with a staggered finite difference discretization and is build on top of PETSc such that it can run on anything from a laptop to a massively parallel machine.
A range of (Galerkin) multigrid and iterative solvers are available, for both linear and non-linear rheologies, using Picard and quasi-Newton solvers (provided through the PETSc interface).
LaMEM has been tested on a variety of machines ranging from laptops to a massively parallel cluster with 458'752 cores.
LaMEM consists of the following directories:
/bin - Contains binaries after compilation (/deb contains the debug version and /opt the optimized)
+/docs - Some documentation and the current webpage
+/input_models - Various input models (run with ../bin/LaMEM -ParamFile *.dat). See the README file in that directory.
+/matlab - Various matlab files to change initial mesh and read binary output into matlab. [n]
+/scripts - Various scripts, currently mainly related to Paraview VTK files.
+/src - LaMEM source code; compile with "make mode=opt all" and "make mode=deb all"
+/test - Directory with the testing framework.
+/utils - Various non-essential files.
You can download pre-compiled binaries of LaMEM and directly use it (also in parallel), through the LaMEM julia package, as explained below. You can also compile LaMEM manually to get the latest update of the code.
For changing input files, logging in to remote machines etc. etc, we recommend Visual Studio Code, along with the remoteSSH and julia plugins.
The recommended way to install LaMEM on your machine (windows/mac/linux) is to use the julia package manager. For this download a recent version of julia, and start it.
julia>]
+pkg> add LaMEM
+pkg> test LaMEM
Once this is installed, you can use it from within julia with:
julia> using LaMEM
(Note: use the backspace to go back from the package manager to the julia REPL.) Running a simulation can be done with
julia> ParamFile="../../input_models/BuildInSetups/FallingBlock_Multigrid.dat";
+julia> run_lamem(ParamFile, 2, "-nstep_max = 1")
This will run the simulation on 2 processors for 1 timestep. You do have to make sure that the path to the input *.dat
file is correct in your system. Most likely you will have to change the path, which can be done with the build-in terminal (or PowerShell) in julia:
julia>;
+shell> cd ~/WORK/LaMEM/input_models/BuildInSetups/
If you wish, you can also directly run the downloaded binaries from your terminal without using julia. In that case you'll need to set the correct paths to the required binaries (LaMEM
,mpiexec
) and required dynamic libraries, which you can show with:
julia> show_paths_LaMEM()
+LaMEM & mpiexec executables path : /Users/kausb/.julia/artifacts/26630bc992637321a5e5d3c0bc66005163370db6/bin:/Users/kausb/.julia/artifacts/483cb6f025b5a8266429afcb3f4ad498c58aaaee/bin
+Dynamic libraries : /Applications/Julia-1.7.app/Contents/Resources/julia/lib/julia:
The LaMEM julia package has a number of other functions that may come in handy, such as reading timesteps back into julia.
If you want test some of the LaMEM examples in this repository, either clone the repo (below) or download it (three dots at the top of this page).
Limitations Whereas the pre-build libraries are quite handy, there are some limitations:
If want, you can ofcourse also compile LaMEM yourself, which will give you the latest version of the code. On large HPC clusters, this is often necessary as you need to link PETSc to the optimized MPI implementation on that system.
LaMEM crucially relies on:
and to a minor extend on:
We develop LaMEM on Linux and Mac machines, but we also have had success on Windows 10, where we recommend installing it through the (new) bash shell. In order for LaMEM to work, you'll need to install the correct version of PETSc first. PETSc is usually not backwards compatible, so it won't work with the incorrect version. Also please note that we do not always immediately update LaMEM to the latest version of PETSc, so you may have to download/install an older version.
PETSc: See http://www.mcs.anl.gov/petsc/petsc-as/documentation/installation.html for installation instructions. We also provide some installation instructions on how to compile PETSc (and mpich, gcc, as well as PETSc) on various machines in /doc/installation. The simplest manner is sometimes to let PETSc install all additional packages (like MPICH), but that often does not result in the most efficient code. You also have to make sure that the path is set correctly in that case (for MPICH, for example). On a large scale cluster, you will have to link against the cluster libraries (for MPI, for example).
If you want to do debugging of LaMEM as well, it is a good idea to install both a DEBUG and an OPTIMIZED version of PETSc, in separate directories.
Nothing else - if the correct version of PETSc is installed, LaMEM will work fine.
$ git clone https://bkaus@bitbucket.org/bkaus/lamem.git ./LaMEM
export PETSC_OPT=DIRECTORY_WHERE_YOU_INSTALLED_YOUR_OPTIMIZED_PETSC
+ export PETSC_DEB=DIRECTORY_WHERE_YOU_INSTALLED_YOUR_DEBUG_PETSC
$ make mode=opt all
$ cd /tests
+ $ make test
Note that we use the PythonTestHarness framework, which is a set of Python scripts that simplify regression testing (developed by Dave May and Patrick Sanan). The first time you run the makefile, it will download the git repository and put it in the directory ./pythontestharness
. If this fails for some reason, you can download it directly from the Dave's bitbucket repository and put it in the directory. In that case, it will ask you which batch queuing system to use, which should be <none>
.
Ideally, the tests should run all with no problems. Depending on which external direct solver packages you installed, some may fail (for example, if you did not install PASTIX). The python test harness will give some hints as where the issues came from.
In case you happen to have MATLAB installed on your system, additional tests will be performed in which an input setup is generated from within MATLAB. In order for this to work, you will have to let the system know where the MATLAB-binary is located by setting the following environmental variable (which can also be set in your .bashrc file):
$ which matlab
+ /path_to_your_matlab_library/matlab
+ $ export MATLAB=/path_to_your_matlab_library/matlab
+ $ make test
Note that you can look at the tests
directory contains subdirectories that are named: t?_***
(for example ./t1_FB1_Direct/
which contains a test for a Falling Block setup). Within each of these directories you will find a working LaMEM input file (*.dat
), and a python file that runs the actual tests in that directory (such as test_1_FB1.py
). Have a look at these files to learn more on how to run LaMEM.
You can run your first LaMEM simulation with
$ cd /input_models/BuildInSetups
+ $ ../../bin/opt/LaMEM -ParamFile FallingBlock_IterativeSolver.dat
which will run a setup that has a falling block of higher density embedded in a lower density fluid for 10 timesteps. Run the same in parallel with:
$ mpiexec -n 4 ../../bin/opt/LaMEM -ParamFile FallingBlock_IterativeSolver.dat
You can visualize the results with Paraview by opening the file FB_iterative.pvd
, and pushing the play button (after selecting the appropriate view, such as surface and the appropriate field such as velocity).
You can change the input parameters (such as resolution) by opening the file FallingBlock_IterativeSolver.dat
with a texteditor and changing the parameters.
As we do not have an extensive user-guide yet (it takes time to create one, but will come at some point..), the best way to learn LaMEM is by looking at the input files in the order that is recommended in the README files. Start with /BuildInSetups
, which shows various example with geometries that are specified in the LaMEM input file.
In addition, you can also look at the Wiki page (left menu). This will be the location where we will add more extensive documentation on how to use LaMEM.
All possible input parameters in LaMEM are listed in the file /input_models/input/lamem_input.dat
, which is worthwhile having a look at. Note that not all of these parameters have to be set (we select useful default options in most cases).
The main developers of the current version are:
Older versions of LaMEM included a finite element solver as well, and were developed by:
Development work was supported by the European Research Council, with ERC Starting Grant 258830, ERC Proof of Concept Grant 713397 and ERC Consolidator Grant 771143, as well as by BMBF projects SECURE and PERMEA awarded to Boris Kaus.
LaMEM is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
LaMEM is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with LaMEM. If not, see http://www.gnu.org/licenses/.
Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
Use the boundary conditions temp_top
and temp_bot
as well as init_temp
to achieve a linear T-profile between top and bottom of your domain. Side boundary conditions are always flux-free.
temp_top = 0
+temp_bot = 1250
+init_temp = 1
To get a steady-state temperature distribution depending on the thermal properties of your materials, you need to specify the thermal parameters k
and Cp
for all materials and activate the act_steady_temp
and act_temp_diff
flag.
temp_top = 0
+temp_bot = 1250
+act_steady_temp = 1
+act_temp_diff = 1
+...
+<MaterialStart>
+...
+# Thermal parameters
+ k = 3
+ Cp = 1000
+<MaterialEnd>
To assign an independent temperature to a material (like an intruding magma body), set the thermal parameter T
for this material.
temp_top = 0
+temp_bot = 1250
+act_steady_temp = 1
+act_temp_diff = 1
+...
+<MaterialStart>
+...
+# Thermal parameters
+ T = 980
+<MaterialEnd>
It is possible to allow for a certain amount of temperature diffusion between the anomalous material and its environment before the start of the forward model by using the steady_temp_dt
parameter.
temp_top = 0
+temp_bot = 1250
+act_steady_temp = 1
+act_temp_diff = 1
+steady_temp_dt = 0.1
+...
+<MaterialStart>
+...
+# Thermal parameters
+ T = 980
+<MaterialEnd>
The right slice shows the temperature distribution with additional diffusion, whereas the left slice shows it without.
Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
A temperature input file can be used to introduce a custom temperature field at the start of the simulation. The file can be created in Matlab as a 3D matrix for convenience. The matrix is expected to cover the entire model (not more and not less). The size of the matrix does not matter but if you want to assign a temperature to a certain body, it is recommended to fit the marker resolution.
%% Setup temperature
+% get bounds of LaMEM box
+x_box = opt.setup.coord_x;
+y_box = opt.setup.coord_y;
+z_box = opt.setup.coord_z;
+
+% fit marker distribution
+nx = length(opt.setup.marker_x);
+ny = length(opt.setup.marker_y);
+nz = length(opt.setup.marker_z);
+
+% initialize matrix
+T3D = zeros(nx,ny,nz);
+
+% set up coordinate grid (for assigning specific temperatures based on coordinates)
+dx = (x_box(2)-x_box(1))/nx;
+dy = (y_box(2)-y_box(1))/ny;
+dz = (z_box(2)-z_box(1))/nz;
+x_vec = (x_box(1)+dx/2:dx:x_box(2)-dx/2);
+y_vec = (y_box(1)+dy/2:dy:y_box(2)-dy/2);
+z_vec = (z_box(1)+dz/2:dz:z_box(2)-dz/2);
+% DO NOT USE MESHGRID HERE!
+[X,Y,Z] = ndgrid(x_vec,y_vec,z_vec);
+
+%% setup linear Temperature profile starting at the surface
+z_surf = 3;
+gradient = 30; % [K/km]
+zeros(nz,1);
+ind_surf = find(z_vec < z_surf);
+T_vec = zeros(nz,1);
+T_vec(ind_surf) = (z_surf - z_vec(ind_surf)) * gradient;
+
+for i = 1 : nz
+ T3D(:,:,i) = T_vec(i);
+end
+
+%% assign Temperature to volume
+vol = Volumes.Magma;
+T_vol = 1180;
+
+% run through all depths
+for k = 1 : nz
+ depth = z_vec(k);
+ Slices = {};
+ % find slices at that depth
+ for s = 1 : length(vol.Polygons)
+ if vol.Polygons{s}(1,3) <= depth + dz/10 && vol.Polygons{s}(1,3) >= depth - dz/10
+ Slices = [Slices; vol.Polygons{s}];
+ end
+ end
+
+ % if slices of the body exist at that depth
+ if ~isempty(Slices)
+ allInds = false(nx,ny);
+ % add all markers that lie inside the body
+ for s = 1 : length(Slices)
+ inds = inpolygon(X(:,:,k),Y(:,:,k),Slices{s}(:,1),Slices{s}(:,2));
+ allInds = allInds | inds;
+ end
+ % assign the markers a temperature
+ T_Slice = squeeze(T3D(:,:,k));
+ T_Slice(allInds) = T_vol;
+ T3D(:,:,k) = T_Slice;
+ end
+
+end
+
+%% write binary to be read by LaMEM
+%(Filename,[# of points in x-dir, # of points in y-dir, # of points in z-dir, Temp-Matrix in vector form])
+petscBinaryWrite('T3D.dat',[nx;ny;nz;T3D(:)]);
In the LaMEM input file, set:
temp_file = ./T3D.dat
Options init_temp
and act_steady_temp
need to be disabled
init_temp = 0
+act_steady_temp = 0
Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.
A topography file can be used to introduce an initial topography into a LaMEM model. This can be set up as a 2D matrix in Matlab. The matrix has to cover the entire LaMEM box, or can be bigger. The number of points in the matrix does not matter but it's dimensions have to be included in the file as well as the coordinate of the SW corner and the gridspacing. Make sure that the units fit the LaMEM length unit (km for geo units, m for SI units).
%% Setup topography
+
+% import your topography data
+% Create example data:
+Topo = peaks(301);
+Topo = Topo/4;
+Easting = (0:1:300);
+Northing = (0:1:300);
+
+% For orientation
+% Topo(1,1): SW
+% Topo(1,end): SE
+% Topo(end,1): NW
+% Topo(end,end): NE
+
+% compute grid spacing
+dx = (max(Easting) - min(Easting))/(length(Easting)-1);
+dy = (max(Northing) - min(Northing))/(length(Northing)-1);
+
+% possibly add offset in data
+Easting = Easting - 200;
+Northing = Northing -100;
+
+% transpose Topo to be read correctly by LaMEM
+Topo = Topo';
+
+% write binary to be read by LaMEM
+% (FILENAME,[# of points in x-dir, # of points in y-dir, x-coord of SW corner, y_coord of SW corner,
+% grid spacing in x-dir, grid spacing in y-dir, TopoMatrix in vector form])
+petscBinaryWrite('Topo.dat', [size(Topo,1); size(Topo,2); min(Easting);min(Northing); dx; dy; Topo(:)]);
Settings
This document was generated with Documenter.jl version 1.6.0 on Monday 26 August 2024. Using Julia version 1.6.7.