diff --git a/notebooks/nl-kunstwerken.ipynb b/notebooks/nl-kunstwerken.ipynb
index dc881b2..95cb1bd 100644
--- a/notebooks/nl-kunstwerken.ipynb
+++ b/notebooks/nl-kunstwerken.ipynb
@@ -192,7 +192,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.12.0"
+ "version": "3.12.1"
}
},
"nbformat": 4,
diff --git a/notebooks/rijkswaterstaat/1_create_bathymetrie.py b/notebooks/rijkswaterstaat/1_create_bathymetrie.py
new file mode 100644
index 0000000..7f03efc
--- /dev/null
+++ b/notebooks/rijkswaterstaat/1_create_bathymetrie.py
@@ -0,0 +1,236 @@
+# %%
+import itertools
+import math
+from functools import partial
+
+import fiona
+import geopandas as gpd
+import numpy as np
+import rasterio
+from geocube.api.core import make_geocube
+from geocube.rasterize import rasterize_points_griddata
+from rasterio import features, merge # noqa: F401
+from rasterio.enums import Resampling
+from rasterio.transform import from_origin
+from rasterio.windows import from_bounds
+from ribasim_nl import CloudStorage
+from shapely.geometry import MultiPolygon, box
+
+cloud = CloudStorage()
+
+
+out_dir = cloud.joinpath("Rijkswaterstaat", "verwerkt", "bathymetrie")
+out_dir.mkdir(exist_ok=True)
+baseline_file = cloud.joinpath(
+ "baseline-nl_land-j23_6-v1", "baseline.gdb"
+) # dit bestand is read-only voor D2HYDRO ivm verwerkersovereenkomst
+layer = "bedlevel_points"
+
+krw_poly_gpkg = cloud.joinpath(
+ "Basisgegevens", "KRW", "krw_oppervlaktewaterlichamen_nederland_vlakken.gpkg"
+)
+
+bathymetrie_nl = cloud.joinpath("Rijkswaterstaat", "aangeleverd", "bathymetrie")
+
+res = 5
+tile_size = 10000
+bounds = (0, 300000, 320000, 660000)
+nodata = -9999
+
+# %%
+
+datasets = [rasterio.open(i) for i in bathymetrie_nl.glob("*NAP.tif")]
+
+data, transform = rasterio.merge.merge(
+ datasets, bounds=bounds, res=(res, res), nodata=nodata
+)
+
+data = np.where(data != nodata, data * 100, nodata).astype("int16")
+
+profile = {
+ "driver": "GTiff",
+ "dtype": "int16",
+ "nodata": nodata,
+ "width": data.shape[2],
+ "height": data.shape[1],
+ "count": 1,
+ "crs": 28992,
+ "transform": transform,
+ "blockxsize": 256,
+ "blockysize": 256,
+ "compress": "deflate",
+}
+
+with rasterio.open(out_dir / "bathymetrie-nl.tif", mode="w", **profile) as dst:
+ dst.write(data)
+ dst.build_overviews([5, 20], Resampling.average)
+ dst.update_tags(ns="rio_overview", resampling="average")
+ dst.scales = (0.01,)
+
+
+# %%
+print("read mask")
+water_geometries = gpd.read_file(out_dir / "water-mask.gpkg", engine="pyogrio")
+with fiona.open(baseline_file, layer=layer) as src:
+ xmin, ymin, xmax, ymax = src.bounds
+ xmin = math.floor(xmin / tile_size) * tile_size
+ ymin = math.floor(ymin / tile_size) * tile_size
+ xmax = math.ceil(xmax / tile_size) * tile_size
+ ymax = math.ceil(ymax / tile_size) * tile_size
+ xmins = list(range(xmin, xmax, tile_size))
+ ymins = list(range(ymin, ymax, tile_size))
+ xymins = itertools.product(xmins, ymins)
+ transform = from_origin(xmin, ymax, res, res)
+
+# %%
+with rasterio.open(
+ out_dir / f"{layer}_{xmin}_{ymin}_{xmax}_{ymax}.tif",
+ mode="w",
+ driver="GTiff",
+ dtype="int16",
+ nodata=-32768.0,
+ count=1,
+ crs=28992,
+ compress="lzw",
+ tiled=True,
+ width=int((xmax - xmin) / res),
+ height=int((ymax - ymin) / res),
+ transform=transform,
+) as dst:
+ profile = dst.profile
+ dst_bounds = dst.bounds
+ for xmin, ymin in xymins:
+ xmax = xmin + tile_size
+ ymax = ymin + tile_size
+
+ print(f"doing {xmin}, {ymin}, {xmax}, {ymax}")
+
+ bounds = (xmin, ymin, xmax, ymax)
+ area_poly = box(*bounds)
+ print("select water-mask geometries")
+ water_geometries_select = water_geometries[
+ water_geometries.intersects(area_poly)
+ ]
+ if not water_geometries_select.empty:
+ area_poly_buffer = area_poly.buffer(res * 4)
+ print("read points")
+ gdf = gpd.read_file(
+ baseline_file,
+ layer=layer,
+ engine="pyogrio",
+ bbox=area_poly_buffer.bounds,
+ )
+ gdf = gdf.loc[(gdf.ELEVATION > -60) & (gdf.ELEVATION < 50)]
+
+ if not gdf.empty:
+ # we drop duplicated points within the same meter
+ print("drop duplicates")
+ gdf["x"] = gdf.geometry.x.astype(int)
+ gdf["y"] = gdf.geometry.y.astype(int)
+ gdf.drop_duplicates(["x", "y"], inplace=True)
+
+ print("make cube")
+ cube = make_geocube(
+ gdf,
+ measurements=["ELEVATION"],
+ resolution=(res, -res),
+ rasterize_function=partial(
+ rasterize_points_griddata, method="linear"
+ ),
+ interpolate_na_method="nearest",
+ )
+
+ print("scale cube")
+ cube["ELEVATION"] = (cube.ELEVATION * 100).astype("int16")
+ cube.ELEVATION.attrs["_FillValue"] = -32768
+ cube.ELEVATION.attrs["scale_factor"] = 0.01
+
+ print("clip cube")
+ mask = water_geometries_select.geometry.unary_union.intersection(
+ area_poly
+ )
+ convex_hull = gdf.unary_union.convex_hull
+ if isinstance(mask, MultiPolygon):
+ mask = [
+ i.intersection(convex_hull)
+ for i in mask.geoms
+ if i.intersects(convex_hull)
+ ]
+
+ else: # is one polygon
+ mask = [mask.intersection(convex_hull)]
+
+ cube = cube.rio.clip(mask)
+
+ if cube.ELEVATION.size > 0:
+ print("add to tiff")
+ window = from_bounds(*cube.rio.bounds(), transform)
+ dst.write(
+ np.fliplr(np.flipud(cube.ELEVATION)), window=window, indexes=1
+ )
+ else:
+ print("no cube.ELEVATION points within water-mask")
+ else:
+ print("no samples within boundary")
+ else:
+ print("no water-mask within bounds")
+
+ dst.build_overviews([5, 20], Resampling.average)
+ dst.update_tags(ns="rio_overview", resampling="average")
+ dst.scales = (0.01,)
+
+# %%
+
+# read bathymetrie-nl and its spatial characteristics
+with rasterio.open(out_dir / "bathymetrie-nl.tif") as src:
+ bathymetry_nl_data = src.read(1)
+ bathymetry_nl_data = np.where(
+ bathymetry_nl_data == src.nodata, nodata, bathymetry_nl_data
+ )
+ bounds = src.bounds
+ transform = src.transform
+ profile = src.profile
+ scales = src.scales
+
+
+with rasterio.open(out_dir / "bedlevel_points_-20000_300000_320000_660000.tif") as src:
+ window = from_bounds(*bounds, src.transform)
+ baseline_data = src.read(1, window=window)
+ baseline_data = np.where(baseline_data == src.nodata, nodata, baseline_data)
+
+
+# data = baseline_data
+
+shapes = (i.geometry for i in water_geometries.itertuples() if i.baseline)
+
+baseline_mask = rasterio.features.rasterize(
+ shapes,
+ out_shape=bathymetry_nl_data.shape,
+ transform=transform,
+ fill=0,
+ default_value=1,
+ all_touched=True,
+ dtype="int8",
+).astype(bool)
+
+data = np.where(baseline_mask, baseline_data, bathymetry_nl_data)
+
+profile = {
+ "driver": "GTiff",
+ "dtype": "int16",
+ "nodata": nodata,
+ "width": data.shape[1],
+ "height": data.shape[0],
+ "count": 1,
+ "crs": 28992,
+ "transform": transform,
+ "blockxsize": 256,
+ "blockysize": 256,
+ "compress": "deflate",
+}
+
+with rasterio.open(out_dir / "bathymetrie-merged.tif", mode="w", **profile) as dst:
+ dst.write(data, 1)
+ dst.build_overviews([5, 20], Resampling.average)
+ dst.update_tags(ns="rio_overview", resampling="average")
+ dst.scales = (0.01,)
diff --git a/notebooks/rijkswaterstaat/2_krw_naar_basins.ipynb b/notebooks/rijkswaterstaat/2_krw_naar_basins.ipynb
new file mode 100644
index 0000000..6ee162e
--- /dev/null
+++ b/notebooks/rijkswaterstaat/2_krw_naar_basins.ipynb
@@ -0,0 +1,174 @@
+{
+ "cells": [
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import geopandas as gpd\n",
+ "import pandas as pd\n",
+ "from ribasim_nl import CloudStorage\n",
+ "from ribasim_nl.geodataframe import join_by_poly_overlay, split_basins\n",
+ "from ribasim_nl.raster import sample_level_area\n",
+ "\n",
+ "cloud = CloudStorage()"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Bewerken krw oppervlaktewaterlichamen tot basins\n",
+ "- Opsplitsen van `krw_oppervlaktewaterlichamen_nederland_vlakken.gpkg` met `krw_split_lijnen.gpkg`\n",
+ "- Toevoegen van `waterlichaam` namen uit `oppervlaktewaterlichamen_rijk.gpkg`"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "exclude_owmident = [\n",
+ " \"NL95_1A\",\n",
+ " \"NL95_2A\",\n",
+ " \"NL95_3A\",\n",
+ " \"NL95_4A\",\n",
+ " \"NL81_1\",\n",
+ " \"NL81_10\",\n",
+ " \"NL81_3\",\n",
+ " \"NL81_2\",\n",
+ " \"NL89_zwin\",\n",
+ "]\n",
+ "krw_poly_gdf = gpd.read_file(\n",
+ " cloud.joinpath(\n",
+ " \"Basisgegevens\", \"KRW\", \"krw_oppervlaktewaterlichamen_nederland_vlakken.gpkg\"\n",
+ " )\n",
+ ")\n",
+ "\n",
+ "krw_poly_gdf = krw_poly_gdf.loc[~krw_poly_gdf.owmident.isin(exclude_owmident)]\n",
+ "krw_poly_gdf = krw_poly_gdf.explode(index_parts=False)\n",
+ "krw_poly_gdf[\"shapeArea\"] = krw_poly_gdf.area\n",
+ "krw_poly_gdf.sort_values(\"shapeArea\", ascending=False)\n",
+ "krw_poly_gdf.drop_duplicates(\"owmident\", keep=\"first\", inplace=True)\n",
+ "\n",
+ "krw_poly_gdf.to_file(\n",
+ " cloud.joinpath(\"Rijkswaterstaat\", \"verwerkt\", \"hdb.gpkg\"), layer=\"krw_waterlichamen\"\n",
+ ")\n",
+ "krw_split_lines_gdf = gpd.read_file(\n",
+ " cloud.joinpath(\"Rijkswaterstaat\", \"verwerkt\", \"model_user_data.gpkg\"),\n",
+ " layer=\"krw_split_lijnen\",\n",
+ ")\n",
+ "\n",
+ "\n",
+ "rws_opp_poly_gdf = gpd.read_file(\n",
+ " cloud.joinpath(\n",
+ " \"Rijkswaterstaat\", \"aangeleverd\", \"oppervlaktewaterlichamen_rijk.gpkg\"\n",
+ " )\n",
+ ")\n",
+ "\n",
+ "krw_poly_gdf = pd.concat(\n",
+ " [krw_poly_gdf, rws_opp_poly_gdf[rws_opp_poly_gdf.waterlichaam == \"Maximakanaal\"]]\n",
+ ")\n",
+ "rws_krw_poly_gdf = split_basins(krw_poly_gdf, krw_split_lines_gdf)\n",
+ "\n",
+ "\n",
+ "rws_krw_poly_gdf = join_by_poly_overlay(\n",
+ " rws_krw_poly_gdf,\n",
+ " rws_opp_poly_gdf[[\"waterlichaam\", \"geometry\"]],\n",
+ " select_by=\"poly_area\",\n",
+ ")\n",
+ "\n",
+ "merge_basin_polygons_gdf = gpd.read_file(\n",
+ " cloud.joinpath(\"Rijkswaterstaat\", \"verwerkt\", \"model_user_data.gpkg\"),\n",
+ " layer=\"merge_basin_polygons\",\n",
+ ")\n",
+ "\n",
+ "merge_lines_gdf = gpd.read_file(\n",
+ " cloud.joinpath(\"Rijkswaterstaat\", \"verwerkt\", \"model_user_data.gpkg\"),\n",
+ " layer=\"krw_merge_lijnen\",\n",
+ ")\n",
+ "\n",
+ "for row in rws_krw_poly_gdf[\n",
+ " rws_krw_poly_gdf.intersects(merge_basin_polygons_gdf.unary_union)\n",
+ "].itertuples():\n",
+ " geometry = row.geometry\n",
+ " merge_poly_gdf = merge_basin_polygons_gdf[\n",
+ " merge_basin_polygons_gdf.buffer(-10).intersects(geometry)\n",
+ " ]\n",
+ " if not merge_poly_gdf.empty:\n",
+ " merge_geometry = merge_poly_gdf.unary_union\n",
+ " rws_krw_poly_gdf.loc[row.Index, [\"geometry\"]] = geometry.union(merge_geometry)\n",
+ "\n",
+ "\n",
+ "# %%\n",
+ "\n",
+ "for line in merge_lines_gdf.geometry:\n",
+ " point_from, point_to = line.boundary.geoms\n",
+ " idx_from = rws_krw_poly_gdf[rws_krw_poly_gdf.contains(point_from)].index[0]\n",
+ " idx_to = rws_krw_poly_gdf[rws_krw_poly_gdf.contains(point_to)].index[0]\n",
+ " rws_krw_poly_gdf.loc[idx_to, [\"geometry\"]] = rws_krw_poly_gdf.loc[\n",
+ " [idx_from, idx_to]\n",
+ " ].unary_union\n",
+ " rws_krw_poly_gdf = rws_krw_poly_gdf[rws_krw_poly_gdf.index != idx_from]\n",
+ "\n",
+ "\n",
+ "rws_krw_poly = cloud.joinpath(\"Rijkswaterstaat\", \"verwerkt\", \"krw_basins_vlakken.gpkg\")\n",
+ "\n",
+ "rws_krw_poly_gdf[\"basin_id\"] = rws_krw_poly_gdf.reset_index().index + 1\n",
+ "rws_krw_poly_gdf.to_file(rws_krw_poly)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Aanmaken hoogte-oppervlakte-relaties"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "raster_path = cloud.joinpath(\n",
+ " \"Rijkswaterstaat\", \"verwerkt\", \"bathymetrie\", \"bathymetrie-merged.tif\"\n",
+ ")\n",
+ "\n",
+ "dfs = [\n",
+ " sample_level_area(raster_path, row.geometry, ident=row.basin_id)\n",
+ " for row in rws_krw_poly_gdf.itertuples()\n",
+ "]\n",
+ "\n",
+ "df = pd.concat(dfs)\n",
+ "\n",
+ "df.to_csv(\n",
+ " cloud.joinpath(\"Rijkswaterstaat\", \"verwerkt\", \"krw_basins_vlakken_level_area.csv\")\n",
+ ")"
+ ]
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.12.0"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 2
+}
diff --git a/notebooks/rijkswaterstaat/3__kunstwerk.py b/notebooks/rijkswaterstaat/3__kunstwerk.py
new file mode 100644
index 0000000..62fc9a3
--- /dev/null
+++ b/notebooks/rijkswaterstaat/3__kunstwerk.py
@@ -0,0 +1,215 @@
+# %%
+# We extraheren kunstwerken voor het hoofdwatersysteem uit RWS data. Methode:
+# 1. We pakken de kunstwerken uit Netwerk Informatie Systeem ( NIS ) van Rijkswaterstaat (nis_all_kunstwerken_hws_2019.gpkg)
+# 2. We selecteren de kunstwerken die binnen het KRW lichamen vallen
+# 3. Daarnaast voegen we extra kunstwerken die niet in de nis voorkomen maar wel noodzakelijk zijn voor modellering hoofdwatersysteem.
+# Deze extra kunstwereken komen uit: baseline.gdb ; layer = "stuctures" en uit de lrws-legger:kunstwerken_primaire_waterkeringen)
+# Ook voegen we drinkwateronttrekkingen toe
+
+import geopandas as gpd
+import pandas as pd
+from ribasim_nl import CloudStorage
+
+cloud = CloudStorage()
+
+
+# %%
+def photo_url(code):
+ if code in kwk_media.index:
+ return kwk_media.at[code, "photo_url"] # noqa: PD008
+ else:
+ return r"https://www.hydrobase.nl/static/icons/photo_placeholder.png"
+
+
+krw_lichaam = cloud.joinpath(
+ "Basisgegevens", "KRW", "krw_oppervlaktewaterlichamen_nederland_vlakken.gpkg"
+)
+
+nis_hws = cloud.joinpath(
+ "Rijkswaterstaat", "aangeleverd", "NIS", "nis_all_kunstwerken_hws_2019.gpkg"
+)
+baseline = cloud.joinpath("baseline-nl_land-j23_6-v1", "baseline.gdb")
+nis_hwvn = cloud.joinpath(
+ "Rijkswaterstaat", "aangeleverd", "NIS", "nis_alle_kunstwerken_hwvn_2019.gpkg"
+)
+primaire_kunstwerken = cloud.joinpath(
+ "Rijkswaterstaat", "aangeleverd", "kunstwerken_primaire_waterkeringen.gpkg"
+)
+
+drinkwater = cloud.joinpath("drinkwaterbedrijven", "drinkwater_inlets.gpkg")
+
+# load media
+kwk_media = pd.read_csv(cloud.joinpath("Rijkswaterstaat", "verwerkt", "kwk_media.csv"))
+kwk_media.set_index("code", inplace=True)
+
+# Load GeoDataFrames
+(
+ krw_lichaam_gdf,
+ nis_hws_gdf,
+ nis_hwvn_gdf,
+ primaire_kunstwerken_gdf,
+) = (
+ gpd.read_file(file)
+ for file in [krw_lichaam, nis_hws, nis_hwvn, primaire_kunstwerken]
+)
+drinkwater_gdf = gpd.read_file(drinkwater, layer="inlet__netherlands")
+baseline_kunstwerken_gdf = gpd.read_file(baseline, layer="structure_lines")
+
+# Spatial joins
+baseline_in_model = gpd.sjoin_nearest(
+ baseline_kunstwerken_gdf, krw_lichaam_gdf, how="inner", max_distance=50
+)
+selected_hws_points = gpd.sjoin_nearest(
+ nis_hws_gdf, krw_lichaam_gdf, how="inner", max_distance=100
+)
+selected_hwvn_points = gpd.sjoin_nearest(
+ nis_hwvn_gdf, krw_lichaam_gdf, how="inner", max_distance=20
+)
+
+# Combine selected points
+selected_points = pd.concat([selected_hws_points, selected_hwvn_points])
+
+# Filter points
+desired_kw_soort = [
+ "Stuwen",
+ "Stormvloedkeringen",
+ "Spuisluizen",
+ "Gemalen",
+ "keersluizen",
+ "Waterreguleringswerken",
+ "Schutsluizen",
+]
+filtered_points = selected_points[
+ selected_points["kw_soort"].isin(desired_kw_soort)
+].copy()
+
+# Calculate nearest distance
+filtered_points.loc[:, ["nearest_distance"]] = [
+ filtered_point["geometry"].distance(baseline_in_model.unary_union)
+ for _, filtered_point in filtered_points.iterrows()
+]
+
+# Filter points based on distance
+filtered_nearest_points = filtered_points[
+ filtered_points["nearest_distance"] < 500
+].copy()
+
+# Remove specific values
+filtered_nearest_points = filtered_nearest_points[
+ ~filtered_nearest_points["complex_code"].isin(["40D-350", "48H-353", "42D-001"])
+]
+filtered_nearest_points = filtered_nearest_points.drop(["index_right"], axis=1)
+
+
+# Additional structures from Baseline, primaire waterkeringen and NIS and drinking water inlets
+def name_from_baseline(string):
+ last_underscore_index = string.rfind("_")
+ if last_underscore_index != -1:
+ last_underscore_index += 1
+ return string[last_underscore_index:]
+ else:
+ return string
+
+
+baseline_kwk_add_gdf = baseline_kunstwerken_gdf[
+ baseline_kunstwerken_gdf["NAME"].isin(
+ [
+ "DM_68.30_R_US_Reevespuisluis",
+ "OS_SK_Oosterscheldekering39_Geul-van-Roggenplaat",
+ "KR_C_HK_Kadoelen",
+ ]
+ )
+]
+
+baseline_kwk_add_gdf.loc[:, ["geometry"]] = baseline_kwk_add_gdf.centroid
+baseline_kwk_add_gdf.loc[:, ["kw_naam"]] = baseline_kwk_add_gdf["NAME"].apply(
+ name_from_baseline
+)
+baseline_kwk_add_gdf.loc[:, ["bron"]] = "baseline"
+baseline_kwk_add_gdf.loc[:, ["complex_naam"]] = baseline_kwk_add_gdf["kw_naam"]
+baseline_kwk_add_gdf.loc[:, ["kw_code"]] = baseline_kwk_add_gdf["NAME"]
+
+primaire_kwk_add_gdf = primaire_kunstwerken_gdf[
+ primaire_kunstwerken_gdf["kd_code1"].isin(["KD.32.gm.015", "KD.44.gm.001"])
+]
+
+primaire_kwk_add_gdf.rename(
+ columns={"kd_code1": "kw_code", "kd_naam": "kw_naam", "naam_compl": "complex_naam"},
+ inplace=True,
+)
+# %%
+# Add drinking water inlets (OSM)
+primaire_kwk_add_gdf.loc[:, ["bron"]] = "kunsterken primaire waterkeringen"
+drinkwater_gdf.loc[:, ["bron"]] = "OSM"
+drinkwater_gdf.rename(
+ columns={
+ "osm_id": "kw_code",
+ "name": "kw_naam",
+ "operator": "beheerder",
+ },
+ inplace=True,
+)
+
+additional_points = pd.concat(
+ [
+ primaire_kwk_add_gdf,
+ nis_hws_gdf[
+ nis_hws_gdf["complex_code"].isin(
+ [
+ "49D-400",
+ "44D-002",
+ "58C-001",
+ "45B-352",
+ "33F-001",
+ "21G-350",
+ "58D-001",
+ "51F-001",
+ ]
+ )
+ ],
+ nis_hwvn_gdf[
+ nis_hwvn_gdf["complex_code"].isin(
+ [
+ "49D-400",
+ "44D-002",
+ "58C-001",
+ "45B-352",
+ "33F-001",
+ "21G-350",
+ "58D-001",
+ "51F-001",
+ ]
+ )
+ ],
+ baseline_kwk_add_gdf,
+ drinkwater_gdf,
+ ]
+)
+
+# add_baseline = gpd.sjoin_nearest(
+# baseline_kunstwerken_gdf, filtered_nearest_points, how="inner", max_distance=1000
+# )
+
+# Concatenate additional points
+final_filtered_points = pd.concat([filtered_nearest_points, additional_points])
+
+final_filtered_points.rename(
+ columns={"kw_naam": "naam", "kw_code": "code"}, inplace=True
+)
+
+final_filtered_points.loc[final_filtered_points["bron"].isna(), ["bron"]] = "NIS"
+# Add the lines from baseline_in_model to final_filtered_points
+
+# add photo_url
+final_filtered_points.loc[:, ["photo_url"]] = final_filtered_points["code"].apply(
+ photo_url
+)
+
+# Save results to GeoPackage files
+output_file = cloud.joinpath("Rijkswaterstaat", "verwerkt", "hydamo.gpkg")
+final_filtered_points.to_file(
+ output_file, layer="kunstwerken", driver="GPKG", engine="pyogrio"
+)
+
+
+# %%
diff --git a/notebooks/rijkswaterstaat/4_build_model.py b/notebooks/rijkswaterstaat/4_build_model.py
new file mode 100644
index 0000000..bc1a029
--- /dev/null
+++ b/notebooks/rijkswaterstaat/4_build_model.py
@@ -0,0 +1,647 @@
+# %% init
+import geopandas as gpd
+import pandas as pd
+import ribasim
+from ribasim_nl import CloudStorage, Network, reset_index
+from ribasim_nl.rating_curve import read_rating_curve
+from ribasim_nl.verdeelsleutels import (
+ read_verdeelsleutel,
+ verdeelsleutel_to_fractions,
+)
+
+cloud = CloudStorage()
+node_list = []
+edge_list = []
+
+boundaries_passed = []
+PRECIPITATION = 0.005 / 86400 # m/s
+EVAPORATION = 0.001 / 86400 # m/s
+
+network = Network.from_network_gpkg(
+ cloud.joinpath("Rijkswaterstaat", "verwerkt", "netwerk.gpkg")
+)
+boundary_gdf = gpd.read_file(
+ cloud.joinpath("Rijkswaterstaat", "verwerkt", "model_user_data.gpkg"),
+ engine="pyogrio",
+ layer="boundary",
+ fid_as_index=True,
+)
+
+structures_gdf = gpd.read_file(
+ cloud.joinpath("Rijkswaterstaat", "verwerkt", "hydamo.gpkg"),
+ layer="kunstwerken",
+ engine="pyogrio",
+ fid_as_index=True,
+)
+
+basin_poly_gdf = gpd.read_file(
+ cloud.joinpath("Rijkswaterstaat", "verwerkt", "krw_basins_vlakken.gpkg"),
+ engine="pyogrio",
+ fid_as_index=True,
+)
+
+structures_df = pd.read_excel(
+ cloud.joinpath("Rijkswaterstaat", "verwerkt", "kunstwerk_complexen.xlsx")
+)
+
+level_area_df = pd.read_csv(
+ cloud.joinpath("Rijkswaterstaat", "verwerkt", "krw_basins_vlakken_level_area.csv")
+)
+
+rating_curves_gdf = gpd.read_file(
+ cloud.joinpath("Rijkswaterstaat", "verwerkt", "rating_curves.gpkg"),
+ engine="pyogrio",
+ fid_as_index=True,
+)
+
+verdeelsleutel_gdf = gpd.read_file(
+ cloud.joinpath("Rijkswaterstaat", "verwerkt", "verdeelsleutel.gpkg"),
+ engine="pyogrio",
+ fid_as_index=True,
+)
+
+verdeelsleutel_df = read_verdeelsleutel(
+ cloud.joinpath("Rijkswaterstaat", "verwerkt", "verdeelsleutel_driel.xlsx")
+)
+
+
+# %% define nodes and links
+nodes_gdf = network.nodes
+links_gdf = network.links
+network_union_lines = links_gdf.unary_union
+network.overlay(basin_poly_gdf[["basin_id", "geometry"]])
+
+basin_admin = {
+ i: {"nodes_from": [], "nodes_to": [], "neighbors": []} for i in basin_poly_gdf.index
+}
+
+
+for row in rating_curves_gdf.itertuples():
+ basin_id = basin_poly_gdf[basin_poly_gdf.contains(row.geometry)].index[0]
+ basin_admin[basin_id]["rating_curve"] = row.curve_id
+
+# %%
+boundary_gdf["node_id"] = boundary_gdf["geometry"].apply(
+ lambda x: network.move_node(
+ x,
+ max_distance=10,
+ allign_distance=10,
+ node_types=["downstream_boundary", "connection", "upstream_boundary"],
+ )
+)
+
+
+def get_structure_codes(structures_gdf, complex_codes, line_string):
+ """Return list of nearest structure_codes from a list of complex_codes and a linestring."""
+ structure_codes = []
+ for complex_code, gdf in structures_gdf[
+ structures_gdf.complex_code.isin(complex_codes)
+ ].groupby("complex_code"):
+ gdf_select = gdf[
+ gdf.kw_soort.isin(
+ [
+ "Stuwen",
+ "Spuisluizen",
+ "Stormvloedkeringen",
+ "keersluizen",
+ "Gemalen",
+ ]
+ )
+ ]
+ if gdf_select.empty:
+ gdf_select = gdf[gdf.kw_soort == "Schutsluizen"]
+ if gdf_select.empty:
+ raise Exception(f"kan geen kunstwerk vinden voor {complex_code}")
+ structure_codes += [
+ gdf_select.at[
+ gdf_select.distance(line_string).sort_values().index[0], "code"
+ ]
+ ]
+ return structure_codes
+
+
+def get_type_and_value(code, structures_gdf, structures_df):
+ if code not in structures_df.code.to_numpy():
+ complex_code = structures_gdf.set_index("code").loc[code].complex_code
+ row = structures_df.set_index("code").loc[complex_code]
+ else:
+ row = structures_df.set_index("code").loc[code]
+ return row.ribasim_type, row.ribasim_waarde
+
+
+# %% make lijst met kunstwerk-codes
+structure_codes = structures_df[structures_df.code.isin(structures_gdf.code)][
+ "code"
+].to_list()
+
+complex_codes = structures_df[~structures_df.code.isin(structures_gdf.code)][
+ "code"
+].to_list()
+structure_codes += get_structure_codes(
+ structures_gdf,
+ complex_codes=complex_codes,
+ line_string=network_union_lines,
+)
+
+
+# % itereer over kunstwerken
+kwk_select_gdf = structures_gdf[structures_gdf.code.isin(structure_codes)]
+
+for kwk in kwk_select_gdf.itertuples():
+ # get network_node_id
+ node_id = network.move_node(
+ kwk.geometry,
+ max_distance=150,
+ allign_distance=100,
+ node_types=["connection", "upstream_boundary"],
+ )
+ if node_id is None:
+ node_id = network.add_node(kwk.geometry, max_distance=150, allign_distance=100)
+
+ # add to node_list
+ node_type, node_value = get_type_and_value(kwk.code, structures_gdf, structures_df)
+
+ if network.has_upstream_nodes(node_id) and network.has_downstream_nodes(
+ node_id
+ ): # normal case, structure is between basins
+ upstream, downstream = network.get_upstream_downstream(node_id, "basin_id")
+ if upstream is not None:
+ basin_admin[upstream]["nodes_to"] += [node_id]
+ if downstream is not None:
+ basin_admin[upstream]["neighbors"] += [downstream]
+ else:
+ raise ValueError(
+ f"Kan geen boven en benedenstroomse knoop vindern voor {kwk.naam}"
+ )
+ if downstream is not None:
+ basin_admin[downstream]["nodes_from"] += [node_id]
+ if upstream is not None:
+ basin_admin[downstream]["neighbors"] += [upstream]
+ else:
+ print(f"{kwk.naam} is een outlet")
+ boundary = boundary_gdf.loc[
+ boundary_gdf[boundary_gdf["type"] == "LevelBoundary"]
+ .distance(kwk.geometry)
+ .sort_values()
+ .index[0]
+ ]
+ boundaries_passed += [boundary.name]
+ node_list += [
+ {
+ "type": "LevelBoundary",
+ "value": boundary.level,
+ "is_structure": False,
+ "code_waterbeheerder": boundary.code,
+ "waterbeheerder": "Rijkswaterstaat",
+ "name": boundary.naam,
+ "node_id": boundary.node_id,
+ "geometry": network.nodes.at[boundary.node_id, "geometry"],
+ }
+ ]
+ print(f"Processing node_id: {node_id}")
+ edge_list += [
+ {
+ "from_node_id": node_id,
+ "to_node_id": boundary.node_id,
+ "edge_type": "flow",
+ "geometry": network.get_line(node_id, boundary.node_id),
+ }
+ ]
+
+ elif network.has_downstream_nodes(node_id):
+ first, second = network.get_downstream(node_id, "basin_id")
+ if first is not None:
+ basin_admin[first]["nodes_from"] += [node_id]
+ if second is not None:
+ basin_admin[first]["neighbors"] += [second]
+ else:
+ raise ValueError(f"{kwk.naam} heeft problemen")
+ if second is not None:
+ basin_admin[second]["nodes_from"] += [node_id]
+ if first is not None:
+ basin_admin[second]["neighbors"] += [first]
+ else:
+ raise ValueError(f"{kwk.naam} heeft problemen")
+
+ node_list += [
+ {
+ "type": node_type,
+ "value": node_value,
+ "is_structure": True,
+ "code_waterbeheerder": kwk.code,
+ "waterbeheerder": "Rijkswaterstaat",
+ "name": kwk.naam,
+ "node_id": node_id,
+ "geometry": network.nodes.at[node_id, "geometry"],
+ }
+ ]
+
+
+# %%
+for basin in basin_poly_gdf.itertuples():
+ # for basin in basin_poly_gdf.loc[[23, 42, 46, 93, 94]].itertuples():
+ print(basin.owmnaam)
+ boundary_nodes = network.nodes[
+ network.nodes.distance(basin.geometry.boundary) < 0.01
+ ]
+ us_ds = [
+ network.get_upstream_downstream(i, "basin_id") for i in boundary_nodes.index
+ ]
+ boundary_nodes["upstream"] = [i[0] for i in us_ds]
+ boundary_nodes["downstream"] = [i[1] for i in us_ds]
+ boundary_nodes = boundary_nodes.dropna(subset=["upstream", "downstream"], how="any")
+ boundary_nodes.loc[:, ["count"]] = [
+ len(network.upstream_nodes(i)) + len(network.downstream_nodes(i))
+ for i in boundary_nodes.index
+ ]
+
+ boundary_nodes.sort_values(by="count", ascending=False, inplace=True)
+ boundary_nodes.loc[:, ["sum"]] = (
+ boundary_nodes["upstream"] + boundary_nodes["downstream"]
+ )
+ boundary_nodes.drop_duplicates("sum", inplace=True)
+ connected = basin_admin[basin.Index]["neighbors"]
+ boundary_nodes = boundary_nodes[~boundary_nodes["upstream"].isin(connected)]
+ boundary_nodes = boundary_nodes[~boundary_nodes["downstream"].isin(connected)]
+
+ for node in boundary_nodes.itertuples():
+ neighbor = next(
+ i for i in [node.upstream, node.downstream] if not i == basin.Index
+ )
+ if basin.Index not in basin_admin[neighbor]["neighbors"]:
+ ds_node = node.upstream == basin.Index
+ node_type = "ManningResistance"
+
+ # in case basin has rating curve we use FractionalFlow
+ if ds_node and ("rating_curve" in basin_admin[basin.Index].keys()):
+ node_type = "FractionalFlow"
+ # in us_case basin has rating curve we use FractionalFlow
+ elif (not ds_node) and ("rating_curve" in basin_admin[neighbor].keys()):
+ node_type = "FractionalFlow"
+
+ if node_type == "FractionalFlow":
+ code = verdeelsleutel_gdf.at[
+ verdeelsleutel_gdf.distance(
+ network.nodes.at[node.Index, "geometry"]
+ )
+ .sort_values()
+ .index[0],
+ "fractie",
+ ]
+ else:
+ code = None
+
+ node_list += [
+ {
+ "type": node_type,
+ "is_structure": False,
+ "node_id": node.Index,
+ "code_waterbeheerder": code,
+ "geometry": network.nodes.at[node.Index, "geometry"],
+ }
+ ]
+ if node.upstream != basin.Index:
+ basin_admin[basin.Index]["nodes_from"] += [node.Index]
+ basin_admin[basin.Index]["neighbors"] += [node.upstream]
+ elif node.downstream != basin.Index:
+ basin_admin[basin.Index]["nodes_to"] += [node.Index]
+ basin_admin[basin.Index]["neighbors"] += [node.downstream]
+ else:
+ raise Exception(f"iets gaat fout bij {basin.Index}")
+
+ nodes_from = basin_admin[basin.Index]["nodes_from"]
+ nodes_to = basin_admin[basin.Index]["nodes_to"]
+
+ if (len(nodes_from) > 0) and (len(nodes_to) > 0):
+ gdf = network.subset_nodes(
+ nodes_from,
+ nodes_to,
+ duplicated_nodes=True,
+ directed=True,
+ inclusive=False,
+ )
+ if gdf.empty:
+ gdf = network.subset_nodes(
+ nodes_from,
+ nodes_to,
+ duplicated_nodes=True,
+ directed=False,
+ inclusive=False,
+ )
+ elif (len(nodes_from) > 0) or (len(nodes_to) > 0):
+ gdf = network.nodes[network.nodes.basin_id == basin.Index]
+
+ gdf = gdf[gdf["basin_id"] == basin.Index]
+
+ basin_node_id = gdf.distance(basin.geometry.centroid).sort_values().index[0]
+ basin_admin[basin.Index]["node_id"] = basin_node_id
+ node_list += [
+ {
+ "type": "Basin",
+ "is_structure": False,
+ "code_waterbeheerder": basin.owmident,
+ "waterbeheerder": "Rijkswaterstaat",
+ "name": basin.owmnaam,
+ "node_id": basin_node_id,
+ "basin_id": basin.Index,
+ "geometry": network.nodes.at[basin_node_id, "geometry"],
+ }
+ ]
+
+ # add rating-curve and extra edge
+ if "rating_curve" in basin_admin[basin.Index].keys():
+ rc_node_id = next(
+ i for i in network.downstream_nodes(basin_node_id) if i in gdf.index
+ )
+ node_list += [
+ {
+ "type": "TabulatedRatingCurve",
+ "is_structure": False,
+ "waterbeheerder": "Rijkswaterstaat",
+ "code_waterbeheerder": basin_admin[basin.Index]["rating_curve"],
+ "name": basin_admin[basin.Index]["rating_curve"],
+ "node_id": rc_node_id,
+ "basin_id": basin.Index,
+ "geometry": network.nodes.at[rc_node_id, "geometry"],
+ }
+ ]
+ edge_list += [
+ {
+ "from_node_id": basin_node_id,
+ "to_node_id": rc_node_id,
+ "name": basin.owmnaam,
+ "krw_id": basin.owmident,
+ "edge_type": "flow",
+ "geometry": network.get_line(basin_node_id, rc_node_id, directed=True),
+ }
+ ]
+ from_id = rc_node_id
+ else:
+ from_id = basin_node_id
+
+ for node_from in nodes_from:
+ edge_list += [
+ {
+ "from_node_id": node_from,
+ "to_node_id": basin_node_id,
+ "name": basin.owmnaam,
+ "krw_id": basin.owmident,
+ "edge_type": "flow",
+ "geometry": network.get_line(node_from, basin_node_id, directed=False),
+ }
+ ]
+
+ for node_to in nodes_to:
+ edge_list += [
+ {
+ "from_node_id": from_id,
+ "to_node_id": node_to,
+ "name": basin.owmnaam,
+ "krw_id": basin.owmident,
+ "edge_type": "flow",
+ "geometry": network.get_line(from_id, node_to, directed=False),
+ }
+ ]
+# %% finish boundaries
+for boundary in boundary_gdf[~boundary_gdf.index.isin(boundaries_passed)].itertuples():
+ if boundary.type == "FlowBoundary":
+ basin_id = network.find_downstream(boundary.node_id, "basin_id")
+ value = boundary.flow_rate
+ edge_list += [
+ {
+ "from_node_id": boundary.node_id,
+ "to_node_id": basin_admin[basin_id]["node_id"],
+ "name": basin_poly_gdf.loc[basin_id].owmnaam,
+ "krw_id": basin_poly_gdf.loc[basin_id].owmident,
+ "edge_type": "flow",
+ "geometry": network.get_line(
+ boundary.node_id, basin_admin[basin_id]["node_id"], directed=True
+ ),
+ }
+ ]
+ basin_admin[basin_id]["nodes_from"] += [boundary.node_id]
+ else:
+ basin_id = network.find_upstream(boundary.node_id, "basin_id")
+ basin_node_id = basin_admin[basin_id]["node_id"]
+
+ gdf = network.nodes[network.nodes["basin_id"] == basin_id]
+ manning_node_id = (
+ gdf.distance(
+ network.get_line(basin_node_id, boundary.node_id).interpolate(
+ 0.5, normalized=True
+ )
+ )
+ .sort_values()
+ .index[0]
+ ) # get manning node, closest to half-way on the line between basin and boundary
+
+ value = boundary.level
+ node_list += [
+ {
+ "type": "ManningResistance",
+ "is_structure": False,
+ "node_id": manning_node_id,
+ "geometry": network.nodes.at[manning_node_id, "geometry"],
+ }
+ ]
+ edge_list += [
+ {
+ "from_node_id": basin_node_id,
+ "to_node_id": manning_node_id,
+ "name": basin_poly_gdf.loc[basin_id].owmnaam,
+ "krw_id": basin_poly_gdf.loc[basin_id].owmident,
+ "edge_type": "flow",
+ "geometry": network.get_line(
+ basin_node_id, manning_node_id, directed=True
+ ),
+ },
+ {
+ "from_node_id": manning_node_id,
+ "to_node_id": boundary.node_id,
+ "name": basin_poly_gdf.loc[basin_id].owmnaam,
+ "krw_id": basin_poly_gdf.loc[basin_id].owmident,
+ "edge_type": "flow",
+ "geometry": network.get_line(
+ manning_node_id, boundary.node_id, directed=True
+ ),
+ },
+ ]
+ basin_admin[basin_id]["nodes_to"] += [boundary.node_id]
+
+ boundaries_passed += [boundary.Index]
+ node_list += [
+ {
+ "type": boundary.type,
+ "value": value,
+ "is_structure": False,
+ "code_waterbeheerder": boundary.code,
+ "waterbeheerder": "Rijkswaterstaat",
+ "name": boundary.naam,
+ "node_id": boundary.node_id,
+ "geometry": network.nodes.at[boundary.node_id, "geometry"],
+ }
+ ]
+
+# %%
+gpd.GeoDataFrame(node_list, crs=28992).to_file(
+ cloud.joinpath("Rijkswaterstaat", "verwerkt", "ribasim_intermediate.gpkg"),
+ layer="Node",
+)
+gpd.GeoDataFrame(edge_list, crs=28992).to_file(
+ cloud.joinpath("Rijkswaterstaat", "verwerkt", "ribasim_intermediate.gpkg"),
+ layer="Edge",
+) # %%
+
+# %% define Network
+node_df = gpd.GeoDataFrame(node_list, crs=28992).drop_duplicates()
+node = ribasim.Node(df=node_df)
+node.df.set_index("meta_node_id", drop=False, inplace=True)
+node.df.index.name = "fid"
+edge_df = gpd.GeoDataFrame(edge_list, crs=28992)
+edge = ribasim.Edge(df=edge_df)
+network = ribasim.Network(node=node, edge=edge)
+
+# %% define Basin
+static_df = node_df[node_df["type"] == "Basin"][["node_id", "basin_id"]].set_index(
+ "basin_id"
+)
+level_area_df.drop_duplicates(["level", "area", "id"], inplace=True)
+profile_df = level_area_df[level_area_df["id"].isin(static_df.index)]
+profile_df["node_id"] = profile_df["id"].apply(lambda x: static_df.at[x, "node_id"])
+profile_df = profile_df[["node_id", "area", "level"]]
+
+static_df["precipitation"] = PRECIPITATION
+static_df["potential_evaporation"] = EVAPORATION
+static_df["drainage"] = 0
+static_df["infiltration"] = 0
+static_df["urban_runoff"] = 0
+
+state_df = profile_df.groupby("node_id").min()["level"].reset_index()
+state_df.loc[:, ["level"]] = state_df["level"].apply(lambda x: max(x + 1, 0))
+
+basin = ribasim.Basin(profile=profile_df, static=static_df, state=state_df)
+
+# % define Resistance
+# node_df.loc[node_df["type"] == "Outlet", ["type", "value"]] = (
+# "LinearResistance",
+# 1000,
+# ) # FIXME: Nijkerkersluis als goede type meenemen
+
+resistance_df = node_df[node_df["type"] == "LinearResistance"][["node_id", "value"]]
+resistance_df.rename(columns={"value": "resistance"}, inplace=True)
+linear_resistance = ribasim.LinearResistance(static=resistance_df)
+
+# % define Pump
+pump_df = node_df[node_df["type"] == "Pump"][["node_id", "value"]]
+pump_df.rename(columns={"value": "flow_rate"}, inplace=True)
+pump = ribasim.Pump(static=pump_df)
+
+
+# % define Outlet
+outlet_df = node_df[node_df["type"] == "Outlet"][["node_id", "value"]]
+outlet_df.rename(columns={"value": "flow_rate"}, inplace=True)
+outlet = ribasim.Outlet(static=outlet_df)
+
+# % define fraction
+node_index = node_df[node_df["type"] == "FractionalFlow"][
+ ["code_waterbeheerder", "node_id"]
+].set_index("code_waterbeheerder")["node_id"]
+
+fractional_flow_df = verdeelsleutel_to_fractions(verdeelsleutel_df, node_index)
+fractional_flow = ribasim.FractionalFlow(static=fractional_flow_df)
+
+# %
+node_index = node_df[node_df["type"] == "TabulatedRatingCurve"][
+ ["code_waterbeheerder", "node_id"]
+].set_index("code_waterbeheerder")["node_id"]
+tabulated_rating_curve_df = read_rating_curve(
+ cloud.joinpath("Rijkswaterstaat", "verwerkt", "rating_curves.xlsx"),
+ node_index,
+)
+tabulated_rating_curve = ribasim.TabulatedRatingCurve(static=tabulated_rating_curve_df)
+
+# % define Manning
+manning_df = node_df[node_df["type"] == "ManningResistance"][["node_id"]]
+manning_df["length"] = 10000
+manning_df["manning_n"] = 0.04
+manning_df["profile_width"] = 10000
+manning_df["profile_slope"] = 1
+
+manning_resistance = ribasim.ManningResistance(static=manning_df)
+
+# % define FlowBoundary
+flow_boundary_df = node_df[node_df["type"] == "FlowBoundary"][["node_id", "value"]]
+flow_boundary_df.rename(columns={"value": "flow_rate"}, inplace=True)
+flow_boundary = ribasim.FlowBoundary(static=flow_boundary_df)
+
+# % define LevelBoundary
+level_boundary_df = node_df[node_df["type"] == "LevelBoundary"][["node_id", "value"]]
+level_boundary_df.rename(columns={"value": "level"}, inplace=True)
+level_boundary = ribasim.LevelBoundary(static=level_boundary_df)
+
+
+# % write model
+model = ribasim.Model(
+ network=network,
+ basin=basin,
+ flow_boundary=flow_boundary,
+ level_boundary=level_boundary,
+ linear_resistance=linear_resistance,
+ manning_resistance=manning_resistance,
+ tabulated_rating_curve=tabulated_rating_curve,
+ fractional_flow=fractional_flow,
+ pump=pump,
+ outlet=outlet,
+ # terminal=terminal,
+ starttime="2020-01-01 00:00:00",
+ endtime="2021-01-01 00:00:00",
+)
+
+# %%
+model = reset_index(model)
+
+# %% verwijderen Nijkerkersluis
+
+nijkerk_idx = model.network.node.df[
+ model.network.node.df["meta_code_waterbeheerder"] == "32E-001-04"
+].index
+
+# model.network.node.df = model.network.node.df[
+# ~(model.network.node.df["meta_node_id"].isin(nijkerk_idx))
+# ]
+
+# model.network.edge.df = model.network.edge.df[
+# ~(model.network.edge.df["from_node_id"].isin(nijkerk_idx))
+# ]
+
+# model.network.edge.df = model.network.edge.df[
+# ~(model.network.edge.df["to_node_id"].isin(nijkerk_idx))
+# ]
+
+# model.linear_resistance.static.df = model.linear_resistance.static.df[
+# ~model.linear_resistance.static.df["node_id"].isin(nijkerk_idx)
+# ]
+
+# model = reset_index(model)
+
+model.linear_resistance.static.df.loc[
+ model.linear_resistance.static.df["node_id"].isin(nijkerk_idx), ["active"]
+] = False
+
+model.linear_resistance.static.df.loc[
+ ~model.linear_resistance.static.df["node_id"].isin(nijkerk_idx), ["active"]
+] = True
+
+
+# %%%
+model.solver.algorithm = "RK4"
+model.solver.adaptive = False
+model.solver.dt = 10.0
+model.solver.saveat = 360
+
+# %%
+print("write ribasim model")
+ribasim_toml = cloud.joinpath("Rijkswaterstaat", "modellen", "hws_network", "hws.toml")
+model.write(ribasim_toml)
+
+# %%
diff --git a/notebooks/rijkswaterstaat/5_add_control.py b/notebooks/rijkswaterstaat/5_add_control.py
new file mode 100644
index 0000000..37feaa9
--- /dev/null
+++ b/notebooks/rijkswaterstaat/5_add_control.py
@@ -0,0 +1,178 @@
+# %%
+from datetime import datetime
+
+import pandas as pd
+import ribasim
+from ribasim_nl import CloudStorage, discrete_control
+from ribasim_nl.model import add_control_node_to_network, update_table
+from ribasim_nl.verdeelsleutels import read_verdeelsleutel, verdeelsleutel_to_control
+
+cloud = CloudStorage()
+waterbeheerder = "Rijkswaterstaat"
+
+ribasim_toml = cloud.joinpath("Rijkswaterstaat", "modellen", "hws_network", "hws.toml")
+model = ribasim.Model.read(ribasim_toml)
+
+verdeelsleutel_df = read_verdeelsleutel(
+ cloud.joinpath("Rijkswaterstaat", "verwerkt", "verdeelsleutel_driel.xlsx")
+)
+
+# %% start adding
+# % add Driel
+name = "Driel"
+code_waterbeheerder = "40A-004-02"
+
+offset_node_id = model.network.node.df[
+ model.network.node.df["meta_code_waterbeheerder"] == code_waterbeheerder
+].index[0]
+model = verdeelsleutel_to_control(
+ verdeelsleutel_df,
+ model,
+ name=name,
+ code_waterbeheerder=code_waterbeheerder,
+ offset_node_id=offset_node_id,
+ waterbeheerder="Rijkswaterstaat",
+)
+
+# % add Haringvliet
+name = "Haringvlietsluizen"
+code_waterbeheerder = "37C-350-09"
+flow_rate_haringvliet = [0, 0, 50, 50, 400, 2500, 3800, 5200, 6800, 8000, 9000]
+flow_rate_lobith = [0, 1100, 1100, 1700, 2000, 4000, 6000, 8000, 10000, 12000, 15000]
+
+node_ids = model.network.node.df[
+ model.network.node.df["meta_code_waterbeheerder"] == code_waterbeheerder
+].index
+
+
+ctrl_node_id = add_control_node_to_network(
+ model.network,
+ node_ids,
+ ctrl_node_geom=(62430, 427430),
+ meta_waterbeheerder="Rijkswaterstaat",
+ name=name,
+ meta_code_waterbeheerder=name,
+)
+
+listen_feature_id = model.network.node.df[
+ model.network.node.df["meta_code_waterbeheerder"] == "LOBH"
+].index[0]
+
+condition_df = discrete_control.condition(
+ values=flow_rate_lobith,
+ node_id=ctrl_node_id,
+ listen_feature_id=listen_feature_id,
+ name=name,
+)
+
+model.discrete_control.condition.df = update_table(
+ model.discrete_control.condition.df, condition_df
+)
+
+logic_df = discrete_control.logic(
+ node_id=ctrl_node_id,
+ length=len(flow_rate_haringvliet),
+ name=name,
+)
+
+model.discrete_control.logic.df = update_table(
+ model.discrete_control.logic.df, ribasim.DiscreteControl(logic=logic_df).logic.df
+)
+
+outlet_df = discrete_control.node_table(
+ values=flow_rate_lobith, variable="flow_rate", name=name, node_id=node_ids[0]
+)
+
+model.outlet.static.df = update_table(
+ model.outlet.static.df, ribasim.Outlet(static=outlet_df).static.df
+)
+
+# % add Ijsselmeer via water-level waddenzee
+node_ids = model.level_boundary.static.df[
+ model.level_boundary.static.df["node_id"].isin([2, 4])
+]["node_id"].to_numpy()
+
+time = pd.date_range(model.starttime, model.endtime)
+
+day_of_year = [
+ "01-01",
+ "03-01",
+ "03-11",
+ "03-21",
+ "04-01",
+ "04-10",
+ "04-15",
+ "08-11",
+ "08-21",
+ "08-31",
+ "09-11",
+ "09-15",
+ "09-21",
+ "10-01",
+ "10-11",
+ "10-21",
+ "10-31",
+ "12-31",
+]
+
+level = [
+ -0.4,
+ -0.2,
+ -0.1,
+ -0.1,
+ -0.15,
+ -0.15,
+ -0.15,
+ -0.15,
+ -0.2,
+ -0.25,
+ -0.28,
+ -0.3,
+ -0.32,
+ -0.35,
+ -0.4,
+ -0.4,
+ -0.4,
+ -0.4,
+]
+level_cycle_df = pd.DataFrame(
+ {
+ "dayofyear": [
+ datetime.strptime(i, "%m-%d").timetuple().tm_yday for i in day_of_year
+ ],
+ "level": level,
+ }
+).set_index("dayofyear")
+
+
+def get_level(timestamp, level_cycle_df):
+ return level_cycle_df.at[
+ level_cycle_df.index[level_cycle_df.index <= timestamp.dayofyear].max(), "level"
+ ]
+
+
+# %%
+time = pd.date_range(model.starttime, model.endtime)
+level_df = pd.DataFrame(
+ {"time": time, "level": [get_level(i, level_cycle_df) for i in time]}
+)
+
+level_df = pd.concat(
+ [
+ pd.concat(
+ [level_df, pd.DataFrame({"node_id": [node_id] * len(level_df)})], axis=1
+ )
+ for node_id in node_ids
+ ],
+ ignore_index=True,
+)
+model.level_boundary.time.df = level_df
+model.level_boundary.static.df = model.level_boundary.static.df[
+ ~model.level_boundary.static.df.node_id.isin(node_ids)
+]
+
+# %% write model
+ribasim_toml = cloud.joinpath("Rijkswaterstaat", "modellen", "hws", "hws.toml")
+model.write(ribasim_toml)
+
+# %%
diff --git a/notebooks/rijkswaterstaat/6_netwerk.py b/notebooks/rijkswaterstaat/6_netwerk.py
new file mode 100644
index 0000000..62bc6ca
--- /dev/null
+++ b/notebooks/rijkswaterstaat/6_netwerk.py
@@ -0,0 +1,243 @@
+# %%
+import geopandas as gpd
+import numpy as np
+import pandas as pd
+from ribasim_nl import CloudStorage, Network
+from shapely.geometry import LineString, Point, Polygon
+from shapely.ops import snap, split
+
+cloud = CloudStorage()
+
+# %% read files
+
+print("read basins")
+krw_basins_gdf = gpd.read_file(
+ cloud.joinpath("Rijkswaterstaat", "verwerkt", "krw_basins_vlakken.gpkg"),
+ engine="pyogrio",
+)
+
+print("read rijkspolygonen")
+rws_opp_poly_gdf = gpd.read_file(
+ cloud.joinpath(
+ "Rijkswaterstaat", "aangeleverd", "oppervlaktewaterlichamen_rijk.gpkg"
+ )
+)
+
+print("read osm fairway")
+fairway_osm_gdf = gpd.read_file(
+ cloud.joinpath("basisgegevens", "OSM", "waterway_fairway_the_netherlands.gpkg"),
+ engine="pyogrio",
+)
+
+print("read osm river")
+river_osm_gdf = gpd.read_file(
+ cloud.joinpath("basisgegevens", "OSM", "waterway_river_the_netherlands.gpkg"),
+ engine="pyogrio",
+)
+
+print("read osm canals")
+canal_osm_gdf = gpd.read_file(
+ cloud.joinpath("basisgegevens", "OSM", "waterway_canal_the_netherlands.gpkg"),
+ engine="pyogrio",
+)
+
+print("read extra lijnen")
+extra_lines_gdf = gpd.read_file(
+ cloud.joinpath("Rijkswaterstaat", "verwerkt", "model_user_data.gpkg"),
+ layer="extra_netwerk_lijnen_2",
+ engine="pyogrio",
+)
+
+print("read verwijder lijnen")
+remove_lines_gdf = gpd.read_file(
+ cloud.joinpath("Rijkswaterstaat", "verwerkt", "model_user_data.gpkg"),
+ layer="verwijder_lijn_2",
+ engine="pyogrio",
+)
+
+print("read toevoegen knoop")
+add_nodes_gdf = gpd.read_file(
+ cloud.joinpath("Rijkswaterstaat", "verwerkt", "model_user_data.gpkg"),
+ layer="toevoegen knoop",
+ engine="pyogrio",
+)
+
+
+# %% Create vaarwegen and osm basins filters and masks
+
+ijsselmeer_basins = [
+ "NL92_MARKERMEER",
+ "NL92_IJSSELMEER",
+]
+# KRW dekt sommige kunstwerken niet geheel waardoor rijks_waterlichamen zijn toegevoegd
+rijks_waterlichamen = [
+ "Maximakanaal",
+ "Kanaal Wessem-Nederweert",
+ "Noordzeekanaal",
+ "Buiten-IJ",
+ "Buitenhaven van IJmuiden",
+]
+
+exclude_osm_basins = ijsselmeer_basins
+
+print("create osm mask polygon")
+filtered_osm_basins_gdf = krw_basins_gdf[
+ ~krw_basins_gdf["owmident"].isin(exclude_osm_basins)
+]
+
+osm_basins_mask = (
+ filtered_osm_basins_gdf.explode(index_parts=False)
+ .geometry.exterior.apply(Polygon)
+ .unary_union
+)
+
+rws_opp_poly_mask = rws_opp_poly_gdf[
+ rws_opp_poly_gdf.waterlichaam.isin(rijks_waterlichamen)
+].unary_union
+
+osm_mask = osm_basins_mask.union(rws_opp_poly_mask)
+rws_opp_poly_mask_gdf = gpd.GeoDataFrame(geometry=[rws_opp_poly_mask])
+osm_basins_mask_gdf = gpd.GeoDataFrame(geometry=[osm_basins_mask])
+
+# Create a GeoDataFrame from the union result
+osm_mask_gdf = gpd.GeoDataFrame(geometry=[osm_mask])
+
+
+# %% Overlay lines with krw-basins
+
+print("extra lines basin overlay")
+extra_basin_gdf = gpd.overlay(extra_lines_gdf, krw_basins_gdf, how="union")
+
+print("river osm basin overlay")
+river_osm_gdf = river_osm_gdf[river_osm_gdf.intersects(osm_mask)]
+river_osm_basin_gdf = gpd.overlay(river_osm_gdf, filtered_osm_basins_gdf, how="union")
+
+print("canal osm basin overlay")
+canal_osm_gdf = canal_osm_gdf[canal_osm_gdf.intersects(osm_mask)]
+canal_osm_basin_gdf = gpd.overlay(canal_osm_gdf, filtered_osm_basins_gdf, how="union")
+
+print("canal osm fairway overlay")
+fairway_osm_gdf = fairway_osm_gdf[fairway_osm_gdf.intersects(osm_mask)]
+fairway_osm_basin_gdf = gpd.overlay(
+ fairway_osm_gdf, filtered_osm_basins_gdf, how="union"
+)
+
+
+# %% Samenvoegen tot 1 lijnenbestand
+river_osm_basin_gdf.rename(columns={"osm_id": "id"}, inplace=True)
+canal_osm_basin_gdf.rename(columns={"osm_id": "id"}, inplace=True)
+fairway_osm_basin_gdf.rename(columns={"osm_id": "id"}, inplace=True)
+extra_basin_gdf.rename(columns={"naam": "name"}, inplace=True)
+# Concatenate GeoDataFrames
+print("concat")
+network_lines_gdf = pd.concat(
+ [
+ river_osm_basin_gdf,
+ canal_osm_basin_gdf,
+ fairway_osm_basin_gdf,
+ ],
+ ignore_index=True,
+)
+
+remove_indices = []
+for geometry in remove_lines_gdf.geometry:
+ remove_indices += network_lines_gdf.loc[
+ network_lines_gdf.geometry.within(geometry.buffer(0.1))
+ ].index.to_list()
+network_lines_gdf = network_lines_gdf[~network_lines_gdf.index.isin(remove_indices)]
+
+data = []
+for geometry in add_nodes_gdf.geometry:
+ lines_select_gdf = network_lines_gdf[
+ network_lines_gdf.geometry.buffer(0.01).intersects(geometry.buffer(0.01))
+ ]
+ for idx, row in lines_select_gdf.iterrows():
+ feature = row.to_dict()
+ us_geometry, ds_geometry = split(
+ snap(row.geometry, geometry, 0.01), geometry
+ ).geoms
+ network_lines_gdf.loc[idx, "geometry"] = us_geometry
+ feature["geometry"] = ds_geometry
+ data += [feature]
+
+network_lines_gdf = pd.concat(
+ [
+ network_lines_gdf,
+ extra_basin_gdf,
+ gpd.GeoDataFrame(data, crs=network_lines_gdf.crs),
+ ],
+ ignore_index=True,
+)
+
+# %% wegschrijven als netwerk
+
+
+def subdivide_line(line, max_length):
+ total_length = line.length
+
+ num_segments = int(np.ceil(total_length / max_length))
+
+ if num_segments == 1:
+ return [line]
+
+ segments = []
+ for i in range(num_segments):
+ start_frac = i / num_segments
+ end_frac = (i + 1) / num_segments
+ start_point = line.interpolate(start_frac, normalized=True)
+ start_dist = line.project(start_point)
+ end_point = line.interpolate(end_frac, normalized=True)
+ end_dist = line.project(end_point)
+
+ points = (
+ [start_point]
+ + [
+ Point(i)
+ for i in line.coords
+ if (line.project(Point(i)) > start_dist)
+ and (line.project(Point(i)) < end_dist)
+ ]
+ + [end_point]
+ )
+ segment = LineString(points)
+ segments.append(segment)
+
+ return segments
+
+
+def subdivide_geodataframe(gdf, max_length):
+ data = []
+
+ for row in gdf.explode().itertuples():
+ row_dict = row._asdict()
+ row_dict.pop("geometry")
+ lines = subdivide_line(row.geometry, max_length)
+ data += [{**row_dict, "geometry": line} for line in lines]
+
+ return gpd.GeoDataFrame(data=data, crs=gdf.crs)
+
+
+# Assuming network_lines_gdf is defined somewhere before this point
+network_lines_gdf = network_lines_gdf[
+ ~network_lines_gdf["name"].isin(["Geul", "Derde Diem"])
+] # brute verwijdering wegens sifon onder Julianakanaal
+
+network = Network(network_lines_gdf, tolerance=1, id_col="id", name_col="name")
+
+print("write to hydamo")
+lines = network.links
+lines.rename(columns={"name": "naam"}, inplace=True)
+lines.to_file(
+ cloud.joinpath("Rijkswaterstaat", "verwerkt", "hydamo.gpkg"),
+ layer="hydroobject",
+ engine="pyogrio",
+)
+
+
+gdf_subdivided = subdivide_geodataframe(network_lines_gdf, max_length=450)
+
+# %%
+network = Network(gdf_subdivided, tolerance=1, id_col="id", name_col="name")
+
+print("write network")
+network.to_file(cloud.joinpath("Rijkswaterstaat", "verwerkt", "netwerk.gpkg"))
diff --git a/notebooks/rijkswaterstaat/7_upload_hws_model.py b/notebooks/rijkswaterstaat/7_upload_hws_model.py
new file mode 100644
index 0000000..93ff8dc
--- /dev/null
+++ b/notebooks/rijkswaterstaat/7_upload_hws_model.py
@@ -0,0 +1,7 @@
+# %%
+from ribasim_nl import CloudStorage
+
+cloud = CloudStorage()
+
+cloud.upload_model("Rijkswaterstaat", "hws")
+# %%
diff --git a/notebooks/rijkswaterstaat/netwerk.py b/notebooks/rijkswaterstaat/netwerk.py
deleted file mode 100644
index 7f717db..0000000
--- a/notebooks/rijkswaterstaat/netwerk.py
+++ /dev/null
@@ -1,84 +0,0 @@
-# %%
-import geopandas as gpd
-from ribasim_nl import CloudStorage
-from ribasim_nl.geodataframe import direct_basins, join_by_poly_overlay, split_basins
-
-cloud = CloudStorage()
-
-# %% Prepare RWS krw_basin_polygons
-
-krw_poly_gdf = gpd.read_file(
- cloud.joinpath(
- "Basisgegevens", "KRW", "krw_oppervlaktewaterlichamen_nederland_vlakken.gpkg"
- )
-)
-
-krw_split_lines_gdf = gpd.read_file(
- cloud.joinpath("Rijkswaterstaat", "verwerkt", "krw_split_lijnen.gpkg")
-)
-
-rws_opp_poly_gdf = gpd.read_file(
- cloud.joinpath(
- "Rijkswaterstaat", "aangeleverd", "oppervlaktewaterlichamen_rijk.gpkg"
- )
-)
-
-
-rws_krw_poly_gdf = split_basins(krw_poly_gdf, krw_split_lines_gdf)
-
-rws_krw_poly_gdf = join_by_poly_overlay(
- rws_krw_poly_gdf,
- rws_opp_poly_gdf[["waterlichaam", "geometry"]],
- select_by="poly_area",
-)
-
-
-rws_krw_poly = cloud.joinpath("Rijkswaterstaat", "verwerkt", "krw_basins_vlakken.gpkg")
-
-rws_krw_poly_gdf.to_file(rws_krw_poly)
-
-
-# %% create overlay with krw_lines and polygons
-krw_line_gdf = gpd.read_file(
- cloud.joinpath(
- "Basisgegevens", "KRW", "krw_oppervlaktewaterlichamen_nederland_lijnen.gpkg"
- )
-)
-
-rws_krw_lines = cloud.joinpath("Rijkswaterstaat", "verwerkt", "krw_basins_lijnen.gpkg")
-
-rws_krw_line_gdf = join_by_poly_overlay(
- gpd.GeoDataFrame(krw_line_gdf.explode()["geometry"]), rws_krw_poly_gdf
-)
-
-rws_krw_line_gdf.to_file(rws_krw_lines)
-
-# %% direct basins
-
-basin_ident = "owmident"
-link_ident = "Name"
-
-basins_gdf = gpd.read_file(
- cloud.joinpath("Rijkswaterstaat", "verwerkt", "krw_basins_vlakken.gpkg")
-)
-
-network_gdf = gpd.read_file(
- cloud.joinpath("Basisgegevens", "lsm3-j18_5v6", "shapes", "network_Branches.shp")
-)
-network_gdf.set_crs(28992, inplace=True)
-drop_duplicates = True
-
-poly_directions_gdf = direct_basins(basins_gdf, network_gdf, basin_ident, link_ident)
-
-
-poly_directions_gdf.to_file(
- cloud.joinpath("Rijkswaterstaat", "verwerkt", "krw_basins_verbindingen.gpkg")
-)
-
-# %% snap nodes
-
-# %% build graph
-
-# %% build_network
-
-# %% A(h)
diff --git a/notebooks/samenvoegen_modellen.py b/notebooks/samenvoegen_modellen.py
new file mode 100644
index 0000000..42607eb
--- /dev/null
+++ b/notebooks/samenvoegen_modellen.py
@@ -0,0 +1,218 @@
+# %%
+import sqlite3
+from datetime import datetime
+
+import pandas as pd
+import ribasim
+from ribasim_nl import CloudStorage
+from ribasim_nl.concat import concat
+
+# %%
+cloud = CloudStorage()
+readme = f"""# Model voor het Landelijk Hydrologisch Model
+
+Gegenereerd: {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}
+Ribasim-Python versie: {ribasim.__version__}
+Getest (u kunt simuleren): Nee
+
+** Samengevoegde modellen (beheerder: modelnaam (versie)**
+"""
+
+
+def fix_rating_curve(database_path):
+ # Connect to the SQLite database
+ conn = sqlite3.connect(database_path)
+
+ tables = ["TabulatedRatingCurve / static", "TabulatedRatingCurve / time"]
+
+ for table in tables:
+ # Read the table into a pandas DataFrame
+ try:
+ df = pd.read_sql_query(f"SELECT * FROM '{table}'", conn)
+ except: # noqa: E722
+ continue
+
+ # Rename the column in the DataFrame
+ df_renamed = df.rename(columns={"discharge": "flow_rate"})
+
+ # Write the DataFrame back to the SQLite table
+ df_renamed.to_sql(table, conn, if_exists="replace", index=False)
+
+ # Close the connection
+ conn.close()
+
+
+def add_basin_state(toml):
+ # load a model without Basin / state
+ model = ribasim.Model(filepath=toml)
+ basin = model.basin
+
+ # set initial level to (for example) 1 meter above Basin bottom
+ basin.state.df = pd.DataFrame(
+ (basin.profile.df.groupby("node_id").min("level").level) + 1.0
+ ).reset_index()
+
+ # remove geopackage key in model if exists
+ if hasattr(model, "geopackage"):
+ model.geopackage = None
+
+ # write it back
+ model.write(toml)
+
+
+models = [
+ {
+ "authority": "Rijkswaterstaat",
+ "model": "hws",
+ "find_toml": False,
+ "update": False,
+ "zoom_level": 0,
+ },
+ {
+ "authority": "AmstelGooienVecht",
+ "model": "AmstelGooienVecht_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ },
+ {
+ "authority": "Delfland",
+ "model": "Delfland_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ },
+ {
+ "authority": "HollandseDelta",
+ "model": "HollandseDelta_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ },
+ {
+ "authority": "HollandsNoorderkwartier",
+ "model": "HollandsNoorderkwartier_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ },
+ {
+ "authority": "Rijnland",
+ "model": "Rijnland_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ },
+ {
+ "authority": "Rivierenland",
+ "model": "Rivierenland_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ },
+ {
+ "authority": "Scheldestromen",
+ "model": "Scheldestromen_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ },
+ {
+ "authority": "SchielandendeKrimpenerwaard",
+ "model": "SchielandendeKrimpenerwaard_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ },
+ {
+ "authority": "WetterskipFryslan",
+ "model": "WetterskipFryslan_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ },
+ {
+ "authority": "Zuiderzeeland",
+ "model": "Zuiderzeeland_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ },
+]
+
+for idx, model in enumerate(models):
+ print(f"{model["authority"]} - {model["model"]}")
+ model_versions = [
+ i
+ for i in cloud.uploaded_models(model["authority"])
+ if i.model == model["model"]
+ ]
+ if model_versions:
+ model_version = sorted(model_versions, key=lambda x: x.version)[-1]
+ else:
+ raise ValueError(f"No models with name {model["model"]} in the cloud")
+
+ model_path = cloud.joinpath(
+ model["authority"], "modellen", model_version.path_string
+ )
+
+ # download model if not yet downloaded
+ if not model_path.exists():
+ print(f"Downloaden versie: {model_version.version}")
+ url = cloud.joinurl(model["authority"], "modellen", model_version.path_string)
+ cloud.download_content(url)
+
+ # find toml
+ if model["find_toml"]:
+ tomls = list(model_path.glob("*.toml"))
+ if len(tomls) > 1:
+ raise ValueError(
+ f"User provided more than one toml-file: {len(tomls)}, remove one!"
+ )
+ else:
+ model_path = tomls[0]
+ else:
+ model_path = model_path.joinpath(f"{model["model"]}.toml")
+
+ # update model to v0.7.0
+ if model["update"] and (not model_path.parent.joinpath("updated").exists()):
+ print("updating model")
+ # rename db_file if incorrect
+ db_file = model_path.parent.joinpath(f"{tomls[0].stem}.gpkg")
+ if db_file.exists():
+ db_file = db_file.rename(model_path.parent.joinpath("database.gpkg"))
+ else:
+ db_file = model_path.parent.joinpath("database.gpkg")
+ if not db_file.exists():
+ raise FileNotFoundError(f"{db_file} doesn't exist")
+ # fix rating_curve
+ fix_rating_curve(db_file)
+
+ # add basin-state
+ add_basin_state(model_path)
+
+ model_path.parent.joinpath("updated").write_text("true")
+
+ # read model
+ ribasim_model = ribasim.Model.read(model_path)
+ ribasim_model.network.node.df.loc[:, "meta_zoom_level"] = model["zoom_level"]
+ ribasim_model.network.edge.df.loc[:, "meta_zoom_level"] = model["zoom_level"]
+ if idx == 0:
+ lhm_model = ribasim_model
+ else:
+ cols = [i for i in lhm_model.network.edge.df.columns if i != "meta_index"]
+ lhm_model.network.edge.df = lhm_model.network.edge.df[cols]
+ ribasim_model.network.node.df.loc[:, "meta_waterbeheerder"] = model["authority"]
+ ribasim_model.network.edge.df.loc[:, "meta_waterbeheerder"] = model["authority"]
+ lhm_model = concat([lhm_model, ribasim_model])
+
+ readme += f"""
+**{model["authority"]}**: {model["model"]} ({model_version.version})"""
+
+# %%
+print("write lhm model")
+ribasim_toml = cloud.joinpath("Rijkswaterstaat", "modellen", "lhm", "lhm.toml")
+lhm_model.write(ribasim_toml)
+cloud.joinpath("Rijkswaterstaat", "modellen", "lhm", "readme.md").write_text(readme)
+# %%
+cloud.upload_model("Rijkswaterstaat", model="lhm")
diff --git a/notebooks/samenvoegen_overig.py b/notebooks/samenvoegen_overig.py
new file mode 100644
index 0000000..40266e3
--- /dev/null
+++ b/notebooks/samenvoegen_overig.py
@@ -0,0 +1,162 @@
+# %%
+import geopandas as gpd
+import pandas as pd
+from ribasim_nl import CloudStorage
+from ribasim_nl.geometry import drop_z
+from shapely.geometry import MultiPolygon
+
+# %%
+cloud = CloudStorage()
+
+
+models = [
+ {
+ "authority": "Rijkswaterstaat",
+ "model": "hws",
+ "find_toml": False,
+ "update": False,
+ "zoom_level": 0,
+ "area_file": "krw_basins_vlakken.gpkg",
+ "area_layer": "krw_basins_vlakken",
+ },
+ {
+ "authority": "AmstelGooienVecht",
+ "model": "AmstelGooienVecht_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ "area_file": "AmstelGooienVecht_poldermodel_checks.gpkg",
+ "area_layer": "peilgebied_met_streefpeil",
+ },
+ {
+ "authority": "Delfland",
+ "model": "Delfland_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ "area_file": "Delfland_poldermodel_checks.gpkg",
+ "area_layer": "peilgebied_met_streefpeil",
+ },
+ {
+ "authority": "HollandseDelta",
+ "model": "HollandseDelta_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ "area_file": "HollandseDelta_poldermodel_checks.gpkg",
+ "area_layer": "peilgebied_met_streefpeil",
+ },
+ {
+ "authority": "HollandsNoorderkwartier",
+ "model": "HollandsNoorderkwartier_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ "area_file": "HollandsNoorderkwartier_poldermodel_checks.gpkg",
+ "area_layer": "peilgebied_met_streefpeil",
+ },
+ {
+ "authority": "Rijnland",
+ "model": "Rijnland_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ "area_file": "Rijnland_poldermodel_checks.gpkg",
+ "area_layer": "peilgebied_met_streefpeil",
+ },
+ {
+ "authority": "Rivierenland",
+ "model": "Rivierenland_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ "area_file": "Rivierenland_poldermodel_checks.gpkg",
+ "area_layer": "peilgebied_met_streefpeil",
+ },
+ {
+ "authority": "Scheldestromen",
+ "model": "Scheldestromen_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ "area_file": "Scheldestromen_poldermodel_checks.gpkg",
+ "area_layer": "peilgebied_met_streefpeil",
+ },
+ {
+ "authority": "SchielandendeKrimpenerwaard",
+ "model": "SchielandendeKrimpenerwaard_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ "area_file": "SchielandendeKrimpenerwaard_poldermodel_checks.gpkg",
+ "area_layer": "peilgebied_met_streefpeil",
+ },
+ {
+ "authority": "WetterskipFryslan",
+ "model": "WetterskipFryslan_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ "area_file": "WetterskipFryslan_poldermodel_checks.gpkg",
+ "area_layer": "peilgebied_met_streefpeil",
+ },
+ {
+ "authority": "Zuiderzeeland",
+ "model": "Zuiderzeeland_poldermodel",
+ "find_toml": True,
+ "update": True,
+ "zoom_level": 3,
+ "area_file": "Zuiderzeeland_poldermodel_checks.gpkg",
+ "area_layer": "peilgebied_met_streefpeil",
+ },
+]
+
+gdfs = []
+for model in models:
+ print(model["authority"])
+ model_versions = [
+ i
+ for i in cloud.uploaded_models(model["authority"])
+ if i.model == model["model"]
+ ]
+ if model_versions:
+ model_version = sorted(model_versions, key=lambda x: x.version)[-1]
+ else:
+ raise ValueError(f"No models with name {model["model"]} in the cloud")
+
+ gpkg_file = cloud.joinpath(
+ model["authority"], "modellen", model_version.path_string, model["area_file"]
+ )
+
+ gdf = gpd.read_file(gpkg_file, layer=model["area_layer"], engine="pyogrio")
+ if gdf.crs is None:
+ gdf.crs = 28992
+ elif gdf.crs.to_epsg() != 28992:
+ gdf.to_crs(28992, inplace=True)
+ gdf.loc[:, ["waterbeheerder"]] = model["authority"]
+ gdf.rename(
+ columns={"waterhoogte": "level", "owmident": "code", "owmnaam": "name"},
+ inplace=True,
+ )
+ gdfs += [gdf]
+
+# %%
+gdf = pd.concat(gdfs, ignore_index=True)
+
+# drop z-coordinates
+gdf.loc[gdf.has_z, "geometry"] = gdf.loc[gdf.has_z, "geometry"].apply(
+ lambda x: drop_z(x)
+)
+
+# drop non-polygons
+mask = gdf.geom_type == "GeometryCollection"
+gdf.loc[mask, "geometry"] = gdf.loc[mask, "geometry"].apply(
+ lambda x: MultiPolygon(
+ [i for i in x.geoms if i.geom_type in ["Polygon", "MultiPolygon"]]
+ )
+)
+
+gpkg_file = cloud.joinpath("Rijkswaterstaat", "modellen", "lhm", "project_data.gpkg")
+gdf.to_file(gpkg_file, layer="area", engine="pyogrio")
+
+# %%
diff --git a/pixi.lock b/pixi.lock
index c978f48..8758d27 100644
--- a/pixi.lock
+++ b/pixi.lock
@@ -1,4 +1,3 @@
-version: 3
metadata:
content_hash:
linux-64: e90c2ee71ad70fc0a1c8302029533a7d1498f2bffcd0eaa8d2934700e775dc1d
@@ -276,6 +275,66 @@ package:
noarch: python
size: 100177
timestamp: 1700835583246
+- platform: linux-64
+ name: appdirs
+ version: 1.4.4
+ category: main
+ manager: conda
+ dependencies:
+ - python
+ url: https://conda.anaconda.org/conda-forge/noarch/appdirs-1.4.4-pyh9f0ad1d_0.tar.bz2
+ hash:
+ md5: 5f095bc6454094e96f146491fd03633b
+ sha256: ae9fb8f68281f84482f2c234379aa12405a9e365151d43af20b3ae1f17312111
+ build: pyh9f0ad1d_0
+ arch: x86_64
+ subdir: linux-64
+ build_number: 0
+ license: MIT
+ license_family: MIT
+ noarch: python
+ size: 12840
+ timestamp: 1603108499239
+- platform: osx-64
+ name: appdirs
+ version: 1.4.4
+ category: main
+ manager: conda
+ dependencies:
+ - python
+ url: https://conda.anaconda.org/conda-forge/noarch/appdirs-1.4.4-pyh9f0ad1d_0.tar.bz2
+ hash:
+ md5: 5f095bc6454094e96f146491fd03633b
+ sha256: ae9fb8f68281f84482f2c234379aa12405a9e365151d43af20b3ae1f17312111
+ build: pyh9f0ad1d_0
+ arch: x86_64
+ subdir: osx-64
+ build_number: 0
+ license: MIT
+ license_family: MIT
+ noarch: python
+ size: 12840
+ timestamp: 1603108499239
+- platform: win-64
+ name: appdirs
+ version: 1.4.4
+ category: main
+ manager: conda
+ dependencies:
+ - python
+ url: https://conda.anaconda.org/conda-forge/noarch/appdirs-1.4.4-pyh9f0ad1d_0.tar.bz2
+ hash:
+ md5: 5f095bc6454094e96f146491fd03633b
+ sha256: ae9fb8f68281f84482f2c234379aa12405a9e365151d43af20b3ae1f17312111
+ build: pyh9f0ad1d_0
+ arch: x86_64
+ subdir: win-64
+ build_number: 0
+ license: MIT
+ license_family: MIT
+ noarch: python
+ size: 12840
+ timestamp: 1603108499239
- platform: osx-64
name: appnope
version: 0.1.3
@@ -2485,6 +2544,66 @@ package:
noarch: python
size: 11065
timestamp: 1615209567874
+- platform: linux-64
+ name: cachetools
+ version: 5.3.2
+ category: main
+ manager: conda
+ dependencies:
+ - python >=3.7
+ url: https://conda.anaconda.org/conda-forge/noarch/cachetools-5.3.2-pyhd8ed1ab_0.conda
+ hash:
+ md5: 185cc1bf1d5af90020292888a3c7eb5d
+ sha256: cb8a6688d5650e4546a5f3c5b825bfe3c82594f1f588a93817f1bdb23e74baad
+ build: pyhd8ed1ab_0
+ arch: x86_64
+ subdir: linux-64
+ build_number: 0
+ license: MIT
+ license_family: MIT
+ noarch: python
+ size: 14739
+ timestamp: 1698197442391
+- platform: osx-64
+ name: cachetools
+ version: 5.3.2
+ category: main
+ manager: conda
+ dependencies:
+ - python >=3.7
+ url: https://conda.anaconda.org/conda-forge/noarch/cachetools-5.3.2-pyhd8ed1ab_0.conda
+ hash:
+ md5: 185cc1bf1d5af90020292888a3c7eb5d
+ sha256: cb8a6688d5650e4546a5f3c5b825bfe3c82594f1f588a93817f1bdb23e74baad
+ build: pyhd8ed1ab_0
+ arch: x86_64
+ subdir: osx-64
+ build_number: 0
+ license: MIT
+ license_family: MIT
+ noarch: python
+ size: 14739
+ timestamp: 1698197442391
+- platform: win-64
+ name: cachetools
+ version: 5.3.2
+ category: main
+ manager: conda
+ dependencies:
+ - python >=3.7
+ url: https://conda.anaconda.org/conda-forge/noarch/cachetools-5.3.2-pyhd8ed1ab_0.conda
+ hash:
+ md5: 185cc1bf1d5af90020292888a3c7eb5d
+ sha256: cb8a6688d5650e4546a5f3c5b825bfe3c82594f1f588a93817f1bdb23e74baad
+ build: pyhd8ed1ab_0
+ arch: x86_64
+ subdir: win-64
+ build_number: 0
+ license: MIT
+ license_family: MIT
+ noarch: python
+ size: 14739
+ timestamp: 1698197442391
- platform: linux-64
name: cairo
version: 1.18.0
@@ -3953,6 +4072,66 @@ package:
license_family: MIT
size: 3430198
timestamp: 1693244283213
+- platform: linux-64
+ name: et_xmlfile
+ version: 1.1.0
+ category: main
+ manager: conda
+ dependencies:
+ - python >=3.6
+ url: https://conda.anaconda.org/conda-forge/noarch/et_xmlfile-1.1.0-pyhd8ed1ab_0.conda
+ hash:
+ md5: a2f2138597905eaa72e561d8efb42cf3
+ sha256: 0c7bb50e1382615a660468dc531b8b17c5b91b88a02ed131c8e3cc63db507ce2
+ build: pyhd8ed1ab_0
+ arch: x86_64
+ subdir: linux-64
+ build_number: 0
+ license: MIT
+ license_family: MIT
+ noarch: python
+ size: 10602
+ timestamp: 1674664251571
+- platform: osx-64
+ name: et_xmlfile
+ version: 1.1.0
+ category: main
+ manager: conda
+ dependencies:
+ - python >=3.6
+ url: https://conda.anaconda.org/conda-forge/noarch/et_xmlfile-1.1.0-pyhd8ed1ab_0.conda
+ hash:
+ md5: a2f2138597905eaa72e561d8efb42cf3
+ sha256: 0c7bb50e1382615a660468dc531b8b17c5b91b88a02ed131c8e3cc63db507ce2
+ build: pyhd8ed1ab_0
+ arch: x86_64
+ subdir: osx-64
+ build_number: 0
+ license: MIT
+ license_family: MIT
+ noarch: python
+ size: 10602
+ timestamp: 1674664251571
+- platform: win-64
+ name: et_xmlfile
+ version: 1.1.0
+ category: main
+ manager: conda
+ dependencies:
+ - python >=3.6
+ url: https://conda.anaconda.org/conda-forge/noarch/et_xmlfile-1.1.0-pyhd8ed1ab_0.conda
+ hash:
+ md5: a2f2138597905eaa72e561d8efb42cf3
+ sha256: 0c7bb50e1382615a660468dc531b8b17c5b91b88a02ed131c8e3cc63db507ce2
+ build: pyhd8ed1ab_0
+ arch: x86_64
+ subdir: win-64
+ build_number: 0
+ license: MIT
+ license_family: MIT
+ noarch: python
+ size: 10602
+ timestamp: 1674664251571
- platform: linux-64
name: exceptiongroup
version: 1.2.0
@@ -5189,6 +5368,96 @@ package:
license: MIT
size: 1579654
timestamp: 1701621250532
+- platform: linux-64
+ name: geocube
+ version: 0.4.2
+ category: main
+ manager: conda
+ dependencies:
+ - appdirs
+ - click >=6.0
+ - geopandas >=0.7
+ - numpy >=1.20
+ - odc-geo
+ - pyproj >=2
+ - python >=3.9
+ - rasterio
+ - rioxarray >=0.4
+ - scipy
+ - xarray >=0.17
+ url: https://conda.anaconda.org/conda-forge/noarch/geocube-0.4.2-pyhd8ed1ab_1.conda
+ hash:
+ md5: 0d021ab36a19d5c7b90b25945ed737f1
+ sha256: 6d59c88c1bd4897f4eaee53fb85c09c46bac8e975e8e8ea36b9bb3567744532e
+ build: pyhd8ed1ab_1
+ arch: x86_64
+ subdir: linux-64
+ build_number: 1
+ license: BSD-3-Clause
+ license_family: BSD
+ noarch: python
+ size: 22836
+ timestamp: 1684946573164
+- platform: osx-64
+ name: geocube
+ version: 0.4.2
+ category: main
+ manager: conda
+ dependencies:
+ - appdirs
+ - click >=6.0
+ - geopandas >=0.7
+ - numpy >=1.20
+ - odc-geo
+ - pyproj >=2
+ - python >=3.9
+ - rasterio
+ - rioxarray >=0.4
+ - scipy
+ - xarray >=0.17
+ url: https://conda.anaconda.org/conda-forge/noarch/geocube-0.4.2-pyhd8ed1ab_1.conda
+ hash:
+ md5: 0d021ab36a19d5c7b90b25945ed737f1
+ sha256: 6d59c88c1bd4897f4eaee53fb85c09c46bac8e975e8e8ea36b9bb3567744532e
+ build: pyhd8ed1ab_1
+ arch: x86_64
+ subdir: osx-64
+ build_number: 1
+ license: BSD-3-Clause
+ license_family: BSD
+ noarch: python
+ size: 22836
+ timestamp: 1684946573164
+- platform: win-64
+ name: geocube
+ version: 0.4.2
+ category: main
+ manager: conda
+ dependencies:
+ - appdirs
+ - click >=6.0
+ - geopandas >=0.7
+ - numpy >=1.20
+ - odc-geo
+ - pyproj >=2
+ - python >=3.9
+ - rasterio
+ - rioxarray >=0.4
+ - scipy
+ - xarray >=0.17
+ url: https://conda.anaconda.org/conda-forge/noarch/geocube-0.4.2-pyhd8ed1ab_1.conda
+ hash:
+ md5: 0d021ab36a19d5c7b90b25945ed737f1
+ sha256: 6d59c88c1bd4897f4eaee53fb85c09c46bac8e975e8e8ea36b9bb3567744532e
+ build: pyhd8ed1ab_1
+ arch: x86_64
+ subdir: win-64
+ build_number: 1
+ license: BSD-3-Clause
+ license_family: BSD
+ noarch: python
+ size: 22836
+ timestamp: 1684946573164
- platform: linux-64
name: geopandas
version: 0.14.1
@@ -14805,6 +15074,84 @@ package:
license_family: BSD
size: 6393785
timestamp: 1700875379700
+- platform: linux-64
+ name: odc-geo
+ version: 0.4.1
+ category: main
+ manager: conda
+ dependencies:
+ - affine
+ - cachetools
+ - numpy
+ - pyproj >=3.0.0
+ - python >=3.8
+ - shapely
+ - xarray >=0.19
+ url: https://conda.anaconda.org/conda-forge/noarch/odc-geo-0.4.1-pyhd8ed1ab_0.conda
+ hash:
+ md5: ab03d1e7d156b588fa5c0d232949b8a9
+ sha256: 036f1dde110f0de541e25b8cfbbbc3e91e7f4e8171b4374f90357e5eede8b3d6
+ build: pyhd8ed1ab_0
+ arch: x86_64
+ subdir: linux-64
+ build_number: 0
+ license: Apache-2.0
+ license_family: APACHE
+ noarch: python
+ size: 106404
+ timestamp: 1687839858568
+- platform: osx-64
+ name: odc-geo
+ version: 0.4.1
+ category: main
+ manager: conda
+ dependencies:
+ - affine
+ - cachetools
+ - numpy
+ - pyproj >=3.0.0
+ - python >=3.8
+ - shapely
+ - xarray >=0.19
+ url: https://conda.anaconda.org/conda-forge/noarch/odc-geo-0.4.1-pyhd8ed1ab_0.conda
+ hash:
+ md5: ab03d1e7d156b588fa5c0d232949b8a9
+ sha256: 036f1dde110f0de541e25b8cfbbbc3e91e7f4e8171b4374f90357e5eede8b3d6
+ build: pyhd8ed1ab_0
+ arch: x86_64
+ subdir: osx-64
+ build_number: 0
+ license: Apache-2.0
+ license_family: APACHE
+ noarch: python
+ size: 106404
+ timestamp: 1687839858568
+- platform: win-64
+ name: odc-geo
+ version: 0.4.1
+ category: main
+ manager: conda
+ dependencies:
+ - affine
+ - cachetools
+ - numpy
+ - pyproj >=3.0.0
+ - python >=3.8
+ - shapely
+ - xarray >=0.19
+ url: https://conda.anaconda.org/conda-forge/noarch/odc-geo-0.4.1-pyhd8ed1ab_0.conda
+ hash:
+ md5: ab03d1e7d156b588fa5c0d232949b8a9
+ sha256: 036f1dde110f0de541e25b8cfbbbc3e91e7f4e8171b4374f90357e5eede8b3d6
+ build: pyhd8ed1ab_0
+ arch: x86_64
+ subdir: win-64
+ build_number: 0
+ license: Apache-2.0
+ license_family: APACHE
+ noarch: python
+ size: 106404
+ timestamp: 1687839858568
- platform: linux-64
name: openjpeg
version: 2.5.0
@@ -14874,6 +15221,73 @@ package:
license_family: BSD
size: 236847
timestamp: 1694708878963
+- platform: linux-64
+ name: openpyxl
+ version: 3.1.2
+ category: main
+ manager: conda
+ dependencies:
+ - et_xmlfile
+ - libgcc-ng >=12
+ - python >=3.12.0rc3,<3.13.0a0
+ - python_abi 3.12.* *_cp312
+ url: https://conda.anaconda.org/conda-forge/linux-64/openpyxl-3.1.2-py312h98912ed_1.conda
+ hash:
+ md5: 7f4e257ea4714e9166a93b733b26c2da
+ sha256: e0c61ee0f467aa2ee93315f0612d86a1b62551572d4a38cb69fcde8b81d73d14
+ build: py312h98912ed_1
+ arch: x86_64
+ subdir: linux-64
+ build_number: 1
+ license: MIT
+ license_family: MIT
+ size: 694521
+ timestamp: 1695464937608
+- platform: osx-64
+ name: openpyxl
+ version: 3.1.2
+ category: main
+ manager: conda
+ dependencies:
+ - et_xmlfile
+ - python >=3.12.0rc3,<3.13.0a0
+ - python_abi 3.12.* *_cp312
+ url: https://conda.anaconda.org/conda-forge/osx-64/openpyxl-3.1.2-py312h104f124_1.conda
+ hash:
+ md5: 37d146e56f7725d3865b72fe2d389484
+ sha256: cd8bf57b42fe405f1849f840f3ef0d818e1c070466e26cdd9e569236bf30d65d
+ build: py312h104f124_1
+ arch: x86_64
+ subdir: osx-64
+ build_number: 1
+ license: MIT
+ license_family: MIT
+ size: 656343
+ timestamp: 1695465049261
+- platform: win-64
+ name: openpyxl
+ version: 3.1.2
+ category: main
+ manager: conda
+ dependencies:
+ - et_xmlfile
+ - python >=3.12.0rc3,<3.13.0a0
+ - python_abi 3.12.* *_cp312
+ - ucrt >=10.0.20348.0
+ - vc >=14.2,<15
+ - vc14_runtime >=14.29.30139
+ url: https://conda.anaconda.org/conda-forge/win-64/openpyxl-3.1.2-py312he70551f_1.conda
+ hash:
+ md5: 62d8cadbe4cad81bfe3088168c434b14
+ sha256: 4a766f6f23e76ad2fb76233d3f23b533309b99b9823127179ba98eaa8e60ae58
+ build: py312he70551f_1
+ arch: x86_64
+ subdir: win-64
+ build_number: 1
+ license: MIT
+ license_family: MIT
+ size: 629289
+ timestamp: 1695465331584
- platform: linux-64
name: openssl
version: 3.2.0
@@ -19451,7 +19865,7 @@ package:
timestamp: 1598024297745
- platform: linux-64
name: ribasim
- version: 0.6.1
+ version: 0.7.0
category: main
manager: conda
dependencies:
@@ -19467,21 +19881,22 @@ package:
- shapely >=2.0
- tomli
- tomli-w
- url: https://conda.anaconda.org/conda-forge/noarch/ribasim-0.6.1-pyhd8ed1ab_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/ribasim-0.7.0-pyhd8ed1ab_0.conda
hash:
- md5: c6f7cda724cd5b63a3293d80e8cfba6d
- sha256: e6dd3f375fc914dc6d657d46463fcbac3f01570d9f904cde3b8cfdcadd98efb6
+ md5: 852993160e0975307bbc7c935afe4e83
+ sha256: 7a8a4abb58d10c2d32d5ea961ebe4aa56f8a455aabfcd7d60b3146a53ecf0cea
build: pyhd8ed1ab_0
arch: x86_64
subdir: linux-64
build_number: 0
license: MIT
+ license_family: MIT
noarch: python
- size: 23898
- timestamp: 1702048238025
+ size: 23721
+ timestamp: 1707142009641
- platform: osx-64
name: ribasim
- version: 0.6.1
+ version: 0.7.0
category: main
manager: conda
dependencies:
@@ -19497,21 +19912,22 @@ package:
- shapely >=2.0
- tomli
- tomli-w
- url: https://conda.anaconda.org/conda-forge/noarch/ribasim-0.6.1-pyhd8ed1ab_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/ribasim-0.7.0-pyhd8ed1ab_0.conda
hash:
- md5: c6f7cda724cd5b63a3293d80e8cfba6d
- sha256: e6dd3f375fc914dc6d657d46463fcbac3f01570d9f904cde3b8cfdcadd98efb6
+ md5: 852993160e0975307bbc7c935afe4e83
+ sha256: 7a8a4abb58d10c2d32d5ea961ebe4aa56f8a455aabfcd7d60b3146a53ecf0cea
build: pyhd8ed1ab_0
arch: x86_64
subdir: osx-64
build_number: 0
license: MIT
+ license_family: MIT
noarch: python
- size: 23898
- timestamp: 1702048238025
+ size: 23721
+ timestamp: 1707142009641
- platform: win-64
name: ribasim
- version: 0.6.1
+ version: 0.7.0
category: main
manager: conda
dependencies:
@@ -19527,18 +19943,97 @@ package:
- shapely >=2.0
- tomli
- tomli-w
- url: https://conda.anaconda.org/conda-forge/noarch/ribasim-0.6.1-pyhd8ed1ab_0.conda
+ url: https://conda.anaconda.org/conda-forge/noarch/ribasim-0.7.0-pyhd8ed1ab_0.conda
hash:
- md5: c6f7cda724cd5b63a3293d80e8cfba6d
- sha256: e6dd3f375fc914dc6d657d46463fcbac3f01570d9f904cde3b8cfdcadd98efb6
+ md5: 852993160e0975307bbc7c935afe4e83
+ sha256: 7a8a4abb58d10c2d32d5ea961ebe4aa56f8a455aabfcd7d60b3146a53ecf0cea
build: pyhd8ed1ab_0
arch: x86_64
subdir: win-64
build_number: 0
license: MIT
+ license_family: MIT
noarch: python
- size: 23898
- timestamp: 1702048238025
+ size: 23721
+ timestamp: 1707142009641
+- platform: linux-64
+ name: rioxarray
+ version: 0.15.0
+ category: main
+ manager: conda
+ dependencies:
+ - numpy >=1.21
+ - packaging
+ - pyproj >=2.2
+ - python >=3.9
+ - rasterio >=1.2
+ - scipy
+ - xarray >=0.17
+ url: https://conda.anaconda.org/conda-forge/noarch/rioxarray-0.15.0-pyhd8ed1ab_0.conda
+ hash:
+ md5: d0ce69099167a03cf0a1241f40284307
+ sha256: 6230b475046bd74c7b12c0c9121c57a8e18337b40265813ba9bef0866ec20866
+ build: pyhd8ed1ab_0
+ arch: x86_64
+ subdir: linux-64
+ build_number: 0
+ license: Apache-2.0
+ license_family: Apache
+ noarch: python
+ size: 47286
+ timestamp: 1692047062481
+- platform: osx-64
+ name: rioxarray
+ version: 0.15.0
+ category: main
+ manager: conda
+ dependencies:
+ - numpy >=1.21
+ - packaging
+ - pyproj >=2.2
+ - python >=3.9
+ - rasterio >=1.2
+ - scipy
+ - xarray >=0.17
+ url: https://conda.anaconda.org/conda-forge/noarch/rioxarray-0.15.0-pyhd8ed1ab_0.conda
+ hash:
+ md5: d0ce69099167a03cf0a1241f40284307
+ sha256: 6230b475046bd74c7b12c0c9121c57a8e18337b40265813ba9bef0866ec20866
+ build: pyhd8ed1ab_0
+ arch: x86_64
+ subdir: osx-64
+ build_number: 0
+ license: Apache-2.0
+ license_family: Apache
+ noarch: python
+ size: 47286
+ timestamp: 1692047062481
+- platform: win-64
+ name: rioxarray
+ version: 0.15.0
+ category: main
+ manager: conda
+ dependencies:
+ - numpy >=1.21
+ - packaging
+ - pyproj >=2.2
+ - python >=3.9
+ - rasterio >=1.2
+ - scipy
+ - xarray >=0.17
+ url: https://conda.anaconda.org/conda-forge/noarch/rioxarray-0.15.0-pyhd8ed1ab_0.conda
+ hash:
+ md5: d0ce69099167a03cf0a1241f40284307
+ sha256: 6230b475046bd74c7b12c0c9121c57a8e18337b40265813ba9bef0866ec20866
+ build: pyhd8ed1ab_0
+ arch: x86_64
+ subdir: win-64
+ build_number: 0
+ license: Apache-2.0
+ license_family: Apache
+ noarch: python
+ size: 47286
+ timestamp: 1692047062481
- platform: linux-64
name: rpds-py
version: 0.13.2
@@ -22883,6 +23378,138 @@ package:
license_family: BSD
size: 61358
timestamp: 1699533495284
+- platform: linux-64
+ name: xarray
+ version: 2023.11.0
+ category: main
+ manager: conda
+ dependencies:
+ - numpy >=1.22
+ - packaging >=21.3
+ - pandas >=1.4
+ - python >=3.9
+ url: https://conda.anaconda.org/conda-forge/noarch/xarray-2023.11.0-pyhd8ed1ab_0.conda
+ hash:
+ md5: f445b20bac3db8f604a48592087b2d8f
+ sha256: 71a2000fd5f5065e8c9a184c3f9262b27c4a5eeb5366a6d7e4d267d28e9f07d9
+ build: pyhd8ed1ab_0
+ arch: x86_64
+ subdir: linux-64
+ build_number: 0
+ constrains:
+ - zarr >=2.12
+ - sparse >=0.13
+ - netcdf4 >=1.6.0
+ - cartopy >=0.20
+ - toolz >=0.12
+ - h5netcdf >=1.0
+ - distributed >=2022.7
+ - scipy >=1.8
+ - matplotlib-base >=3.5
+ - seaborn >=0.11
+ - hdf5 >=1.12
+ - numba >=0.55
+ - h5py >=3.6
+ - flox >=0.5
+ - cftime >=1.6
+ - pint >=0.19
+ - iris >=3.2
+ - dask-core >=2022.7
+ - nc-time-axis >=1.4
+ - bottleneck >=1.3
+ license: Apache-2.0
+ license_family: APACHE
+ noarch: python
+ size: 713393
+ timestamp: 1700303846370
+- platform: osx-64
+ name: xarray
+ version: 2023.11.0
+ category: main
+ manager: conda
+ dependencies:
+ - numpy >=1.22
+ - packaging >=21.3
+ - pandas >=1.4
+ - python >=3.9
+ url: https://conda.anaconda.org/conda-forge/noarch/xarray-2023.11.0-pyhd8ed1ab_0.conda
+ hash:
+ md5: f445b20bac3db8f604a48592087b2d8f
+ sha256: 71a2000fd5f5065e8c9a184c3f9262b27c4a5eeb5366a6d7e4d267d28e9f07d9
+ build: pyhd8ed1ab_0
+ arch: x86_64
+ subdir: osx-64
+ build_number: 0
+ constrains:
+ - zarr >=2.12
+ - sparse >=0.13
+ - netcdf4 >=1.6.0
+ - cartopy >=0.20
+ - toolz >=0.12
+ - h5netcdf >=1.0
+ - distributed >=2022.7
+ - scipy >=1.8
+ - matplotlib-base >=3.5
+ - seaborn >=0.11
+ - hdf5 >=1.12
+ - numba >=0.55
+ - h5py >=3.6
+ - flox >=0.5
+ - cftime >=1.6
+ - pint >=0.19
+ - iris >=3.2
+ - dask-core >=2022.7
+ - nc-time-axis >=1.4
+ - bottleneck >=1.3
+ license: Apache-2.0
+ license_family: APACHE
+ noarch: python
+ size: 713393
+ timestamp: 1700303846370
+- platform: win-64
+ name: xarray
+ version: 2023.11.0
+ category: main
+ manager: conda
+ dependencies:
+ - numpy >=1.22
+ - packaging >=21.3
+ - pandas >=1.4
+ - python >=3.9
+ url: https://conda.anaconda.org/conda-forge/noarch/xarray-2023.11.0-pyhd8ed1ab_0.conda
+ hash:
+ md5: f445b20bac3db8f604a48592087b2d8f
+ sha256: 71a2000fd5f5065e8c9a184c3f9262b27c4a5eeb5366a6d7e4d267d28e9f07d9
+ build: pyhd8ed1ab_0
+ arch: x86_64
+ subdir: win-64
+ build_number: 0
+ constrains:
+ - zarr >=2.12
+ - sparse >=0.13
+ - netcdf4 >=1.6.0
+ - cartopy >=0.20
+ - toolz >=0.12
+ - h5netcdf >=1.0
+ - distributed >=2022.7
+ - scipy >=1.8
+ - matplotlib-base >=3.5
+ - seaborn >=0.11
+ - hdf5 >=1.12
+ - numba >=0.55
+ - h5py >=3.6
+ - flox >=0.5
+ - cftime >=1.6
+ - pint >=0.19
+ - iris >=3.2
+ - dask-core >=2022.7
+ - nc-time-axis >=1.4
+ - bottleneck >=1.3
+ license: Apache-2.0
+ license_family: APACHE
+ noarch: python
+ size: 713393
+ timestamp: 1700303846370
- platform: linux-64
name: xcb-util
version: 0.4.0
@@ -23802,3 +24429,4 @@ package:
license_family: BSD
size: 343428
timestamp: 1693151615801
+version: 1
diff --git a/pixi.toml b/pixi.toml
index 0c91a2f..a45ffaa 100644
--- a/pixi.toml
+++ b/pixi.toml
@@ -51,11 +51,13 @@ tests = { depends_on = [
[dependencies]
fiona = "*"
+geocube = "*"
geopandas = "*"
ipykernel = "*"
jupyterlab = "*"
matplotlib = "*"
mypy = "*"
+openpyxl = "*"
pandas = "*"
pip = "*"
pre-commit = "*"
@@ -70,7 +72,7 @@ quarto = "*"
quartodoc = "*"
rasterstats = "*"
requests = "*"
-ribasim = ">=0.6.1,<0.7"
+ribasim = "==0.7.0"
ruff = "*"
shapely = ">=2"
tomli = "*"
diff --git a/ruff.toml b/ruff.toml
index 2d1f0de..6c6cc6a 100644
--- a/ruff.toml
+++ b/ruff.toml
@@ -1,6 +1,6 @@
# See https://docs.astral.sh/ruff/rules/
select = ["D", "E", "F", "NPY", "PD", "C4", "I", "UP"]
-ignore = ["D1", "D202", "D205", "D400", "D404", "E501", "PD002", "PD901"]
+ignore = ["D1", "D202", "D205", "D400", "D404", "E501", "PD002", "PD008", "PD901"]
fixable = ["I"]
extend-include = ["*.ipynb"]
exclude = ["scripts/*"]
diff --git a/src/ribasim_nl/ribasim_nl/__init__.py b/src/ribasim_nl/ribasim_nl/__init__.py
index 6fa0cce..5c0c49f 100644
--- a/src/ribasim_nl/ribasim_nl/__init__.py
+++ b/src/ribasim_nl/ribasim_nl/__init__.py
@@ -2,5 +2,6 @@
from ribasim_nl.cloud import CloudStorage
from ribasim_nl.network import Network
+from ribasim_nl.reset_index import reset_index
-__all__ = ["CloudStorage", "Network"]
+__all__ = ["CloudStorage", "Network", "reset_index"]
diff --git a/src/ribasim_nl/ribasim_nl/case_conversions.py b/src/ribasim_nl/ribasim_nl/case_conversions.py
new file mode 100644
index 0000000..5a7bee1
--- /dev/null
+++ b/src/ribasim_nl/ribasim_nl/case_conversions.py
@@ -0,0 +1,13 @@
+def snake_to_pascal_case(snake_case: str) -> str:
+ """Convert snake_case to PascalCase"""
+
+ words = snake_case.split("_")
+ return "".join(i.title() for i in words)
+
+
+def pascal_to_snake_case(pascal_case: str) -> str:
+ """Convert PascalCase to snake_case"""
+
+ return "".join(["_" + i.lower() if i.isupper() else i for i in pascal_case]).lstrip(
+ "_"
+ )
diff --git a/src/ribasim_nl/ribasim_nl/cloud.py b/src/ribasim_nl/ribasim_nl/cloud.py
index ed25649..34b156c 100644
--- a/src/ribasim_nl/ribasim_nl/cloud.py
+++ b/src/ribasim_nl/ribasim_nl/cloud.py
@@ -1,4 +1,3 @@
-# %%
import logging
import os
import re
@@ -58,6 +57,14 @@ class ModelVersion:
month: int
revision: int
+ @property
+ def version(self):
+ return f"{self.year}.{self.month}.{self.revision}"
+
+ @property
+ def path_string(self):
+ return f"{self.model}_{self.year}_{self.month}_{self.revision}"
+
@dataclass
class CloudStorage:
diff --git a/src/ribasim_nl/ribasim_nl/concat.py b/src/ribasim_nl/ribasim_nl/concat.py
new file mode 100644
index 0000000..5e214c2
--- /dev/null
+++ b/src/ribasim_nl/ribasim_nl/concat.py
@@ -0,0 +1,77 @@
+import pandas as pd
+import ribasim
+from ribasim import Model
+
+from ribasim_nl import reset_index
+from ribasim_nl.case_conversions import pascal_to_snake_case
+from ribasim_nl.model import CLASS_TABLES, get_table
+
+
+def concat(models: list[Model]) -> Model:
+ """Concat existing models to one Ribasim-model
+
+ Parameters
+ ----------
+ models : list[Model]
+ List with ribasim.Model
+
+ Returns
+ -------
+ Model
+ concatenated ribasim.Model
+ """
+
+ # models will be concatenated to first model.
+ model = reset_index(models[0])
+ # determine node_start of next model
+ node_start = model.network.node.df.index.max() + 1
+ # concat all other models into model
+
+ for merge_model in models[1:]:
+ # reset index
+ merge_model = reset_index(merge_model, node_start)
+
+ # determine node_start of next model
+ node_start = model.network.node.df.index.max() + 1
+
+ # merge network
+ model.network.node = ribasim.Node(
+ df=pd.concat([model.network.node.df, merge_model.network.node.df])
+ )
+ model.network.edge = ribasim.Edge(
+ df=pd.concat(
+ [model.network.edge.df, merge_model.network.edge.df], ignore_index=True
+ ).reset_index()
+ )
+
+ # merge all non-spatial tables
+ for class_name, attrs in CLASS_TABLES.items():
+ # convert class-name to model attribute
+ class_attr = pascal_to_snake_case(class_name)
+
+ # read all tables and temporary store them in a dict
+ tables = {}
+ for attr in attrs:
+ # table string
+ table = f"{class_attr}.{attr}.df"
+
+ # see if there is a table to concatenate
+ merge_model_df = get_table(merge_model, table)
+ model_df = get_table(model, table)
+
+ if merge_model_df is not None:
+ if model_df is not None:
+ # make sure we concat both df's into the correct ribasim-object
+ df = pd.concat([model_df, merge_model_df], ignore_index=True)
+ else:
+ df = merge_model_df
+ tables[attr] = df
+ elif model_df is not None:
+ tables[attr] = model_df
+
+ # now we gently update the Ribasim class with new tables
+ if tables:
+ table_class = getattr(ribasim, class_name)
+ setattr(model, class_attr, table_class(**tables))
+
+ return model
diff --git a/src/ribasim_nl/ribasim_nl/data/styles/links.qml b/src/ribasim_nl/ribasim_nl/data/styles/links.qml
new file mode 100644
index 0000000..a893050
--- /dev/null
+++ b/src/ribasim_nl/ribasim_nl/data/styles/links.qml
@@ -0,0 +1,620 @@
+
+
+
+ 1
+ 1
+ 1
+ 0
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 0
+ 0
+ 1
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 0
+
+
+ 0
+ generatedlayout
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ "fid"
+
+ 1
+
diff --git a/src/ribasim_nl/ribasim_nl/data/styles/links.sld b/src/ribasim_nl/ribasim_nl/data/styles/links.sld
new file mode 100644
index 0000000..8c554c9
--- /dev/null
+++ b/src/ribasim_nl/ribasim_nl/data/styles/links.sld
@@ -0,0 +1,34 @@
+
+
+
+ links
+
+ links
+
+
+ Single symbol
+
+
+
+
+
+ name
+
+
+ Arial
+ 13
+
+
+
+ true
+
+
+
+ #323232
+
+
+
+
+
+
+
diff --git a/src/ribasim_nl/ribasim_nl/data/styles/nodes.qml b/src/ribasim_nl/ribasim_nl/data/styles/nodes.qml
new file mode 100644
index 0000000..b994809
--- /dev/null
+++ b/src/ribasim_nl/ribasim_nl/data/styles/nodes.qml
@@ -0,0 +1,632 @@
+
+
+
+ 1
+ 1
+ 1
+ 0
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 0
+ 0
+ 1
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ 0
+
+
+ 0
+ generatedlayout
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ "fid"
+
+ 0
+
diff --git a/src/ribasim_nl/ribasim_nl/data/styles/nodes.sld b/src/ribasim_nl/ribasim_nl/data/styles/nodes.sld
new file mode 100644
index 0000000..1281e9c
--- /dev/null
+++ b/src/ribasim_nl/ribasim_nl/data/styles/nodes.sld
@@ -0,0 +1,115 @@
+
+
+
+ nodes
+
+ nodes
+
+
+ connection
+
+ connection
+
+
+
+ type
+ connection
+
+
+
+
+
+ circle
+
+ #4c25da
+
+
+ #232323
+ 0.5
+
+
+ 7
+
+
+
+
+ downstream_boundary
+
+ downstream_boundary
+
+
+
+ type
+ downstream_boundary
+
+
+
+
+
+ circle
+
+ #f31322
+
+
+ #232323
+ 0.5
+
+
+ 7
+
+
+
+
+ upstream_boundary
+
+ upstream_boundary
+
+
+
+ type
+ upstream_boundary
+
+
+
+
+
+ circle
+
+ #33a02c
+
+
+ #232323
+ 0.5
+
+
+ 7
+
+
+
+
+
+
+ node_id
+
+
+ Arial
+ 13
+
+
+
+
+ 0
+ 0.5
+
+
+
+
+ #323232
+
+ 1
+
+
+
+
+
+
diff --git a/src/ribasim_nl/ribasim_nl/discrete_control.py b/src/ribasim_nl/ribasim_nl/discrete_control.py
new file mode 100644
index 0000000..b72d407
--- /dev/null
+++ b/src/ribasim_nl/ribasim_nl/discrete_control.py
@@ -0,0 +1,46 @@
+from pandas import DataFrame
+
+
+def control_state(name: str, length: int) -> list[str]:
+ return [f"{name}_{i+1}" for i in range(length)]
+
+
+def condition(
+ values: list[float],
+ node_id: int,
+ listen_feature_id: int,
+ variable: str = "flow_rate",
+ name: str | None = None,
+) -> DataFrame:
+ df = DataFrame({"greater_than": values})
+ df.loc[:, ["node_id"]] = node_id
+ df.loc[:, ["listen_feature_id"]] = listen_feature_id
+ df.loc[:, ["variable"]] = variable
+ df.loc[:, ["remarks"]] = control_state(name, len(df))
+ return df
+
+
+def logic(
+ node_id: int,
+ length: int,
+ name: str | None = None,
+) -> DataFrame:
+ df = DataFrame(
+ {
+ "truth_state": [
+ "".join(["T"] * i + ["F"] * length)[0:length] for i in range(length)
+ ]
+ }
+ )
+ df.loc[:, ["node_id"]] = node_id
+ df.loc[:, ["control_state"]] = control_state(name, len(df))
+ return df
+
+
+def node_table(values: list[float], variable: str, name: str, **kwargs) -> DataFrame:
+ df = DataFrame({variable: values})
+ df.loc[:, ["control_state"]] = control_state(name, len(df))
+ for k, v in kwargs.items():
+ df.loc[:, [k]] = v
+
+ return df
diff --git a/src/ribasim_nl/ribasim_nl/geodataframe.py b/src/ribasim_nl/ribasim_nl/geodataframe.py
index d743399..4d6d32f 100644
--- a/src/ribasim_nl/ribasim_nl/geodataframe.py
+++ b/src/ribasim_nl/ribasim_nl/geodataframe.py
@@ -4,8 +4,11 @@
import geopandas as gpd
import pandas as pd
from geopandas import GeoDataFrame
+from shapely.geometry import MultiPolygon, Polygon
+from shapely.ops import polylabel
-from ribasim_nl.geometry import split_basin
+from ribasim_nl import Network
+from ribasim_nl.geometry import sort_basins, split_basin
def join_by_poly_overlay(
@@ -90,7 +93,7 @@ def split_basins(basins_gdf: GeoDataFrame, lines_gdf: GeoDataFrame) -> GeoDataFr
## filter polygons with two intersection-points only
poly_select_gdf = poly_select_gdf[
poly_select_gdf.geometry.boundary.intersection(line.geometry).apply(
- lambda x: False if x.geom_type == "Point" else len(x.geoms) == 2
+ lambda x: not x.geom_type == "Point"
)
]
@@ -100,7 +103,7 @@ def split_basins(basins_gdf: GeoDataFrame, lines_gdf: GeoDataFrame) -> GeoDataFr
f"no intersect for {line}. Please make sure it is extended outside the basin on two sides"
)
else:
- ## we create 2 new fatures in data
+ ## we create new features
data = []
for basin in poly_select_gdf.itertuples():
kwargs = basin._asdict()
@@ -241,3 +244,94 @@ def find_ident(point):
)
return poly_directions_gdf
+
+
+def basins_to_points(
+ basins_gdf: GeoDataFrame,
+ network: Network,
+ mask: Polygon | None = None,
+ buffer: int | None = None,
+) -> GeoDataFrame:
+ """Get points within a basin
+
+ Parameters
+ ----------
+ basins_gdf : GeoDataFrame
+ GeoDataFrame with Polygon basins
+ network : Network
+ Ribasim-NL network to snap points on
+ mask : Polygon, optional
+ Optional mask to clip basin, by default None
+ buffer : int, optional
+ Buffer to apply on basin in case no point is found, by default None
+
+ Returns
+ -------
+ GeoDataFrame
+ Points within basin on network
+ """
+ data = []
+ if network is not None:
+ links_gdf = network.links
+
+ def select_links(geometry):
+ idx = links_gdf.sindex.intersection(geometry.bounds)
+ links_select_gdf = links_gdf.iloc[idx]
+ links_select_gdf = links_select_gdf[links_select_gdf.within(geometry)]
+ return links_select_gdf
+
+ for row in basins_gdf.itertuples():
+ # get basin_polygon and centroid
+ basin_polygon = row.geometry
+ point = basin_polygon.centroid
+ node_id = None
+
+ # get links within basin_polygon
+ if network is not None:
+ # we prefer to find selected links within mask
+ if mask is not None:
+ masked_basin_polygon = basin_polygon.intersection(mask)
+ links_select_gdf = select_links(masked_basin_polygon)
+
+ # if not we try to find links within polygon
+ if links_select_gdf.empty:
+ links_select_gdf = select_links(basin_polygon)
+
+ # if still not we try to find links within polygon applying a buffer
+ if links_select_gdf.empty and (buffer is not None):
+ links_select_gdf = select_links(basin_polygon.buffer(buffer))
+
+ # if we selected links, we snap to closest node
+ if not links_select_gdf.empty:
+ # get link maximum length
+ link = links_select_gdf.loc[
+ links_select_gdf.geometry.length.sort_values(ascending=False).index[
+ 0
+ ]
+ ]
+
+ # get distance to upstream and downstream point in the link
+ us_point, ds_point = link.geometry.boundary.geoms
+ us_dist, ds_dist = (i.distance(point) for i in [us_point, ds_point])
+
+ # choose closest point as basin point
+ if us_dist < ds_dist:
+ node_id = getattr(link, "node_from")
+ point = us_point
+ else:
+ node_id = getattr(link, "node_to")
+ point = ds_point
+
+ # if we don't snap on network, we make sure point is within polygon
+ elif not point.within(basin_polygon):
+ # polylabel only works on polygons; we use largest polygon if input is MultiPolygon
+ if isinstance(basin_polygon, MultiPolygon):
+ basin_polygon = sort_basins(list(basin_polygon.geoms))[-1]
+ point = polylabel(basin_polygon)
+
+ attributes = {i: getattr(row, i) for i in basins_gdf.columns}
+ attributes["geometry"] = point
+ attributes["node_id"] = node_id
+ data += [attributes]
+
+ return gpd.GeoDataFrame(data, crs=basins_gdf.crs)
diff --git a/src/ribasim_nl/ribasim_nl/model.py b/src/ribasim_nl/ribasim_nl/model.py
new file mode 100644
index 0000000..269174a
--- /dev/null
+++ b/src/ribasim_nl/ribasim_nl/model.py
@@ -0,0 +1,142 @@
+import geopandas as gpd
+import pandas as pd
+import ribasim
+from ribasim import Model, Network
+from shapely.geometry import LineString, Point
+
+from ribasim_nl.case_conversions import pascal_to_snake_case
+
+# TODO: get this from ribasim somehow
+CLASS_TABLES = {
+ "Basin": ["static", "profile", "state"],
+ "LinearResistance": ["static"],
+ "ManningResistance": ["static"],
+ "Pump": ["static"],
+ "Outlet": ["static"],
+ "Terminal": ["static"],
+ "FlowBoundary": ["static"],
+ "LevelBoundary": ["static", "time"],
+ "FractionalFlow": ["static"],
+ "TabulatedRatingCurve": ["static"],
+}
+
+TABLES = [
+ j
+ for r in [
+ [f"{pascal_to_snake_case(k)}.{i}.df" for i in v]
+ for k, v in CLASS_TABLES.items()
+ ]
+ for j in r
+]
+
+
+def get_table(model: Model, table: str = "basin.static.df"):
+ if table not in TABLES:
+ raise ValueError(f"the value of table should be in {TABLES} not {table}")
+
+ else:
+ attributes = table.split(".")
+ value = model
+ for attr in attributes:
+ if hasattr(value, attr):
+ value = getattr(value, attr)
+ else:
+ return None
+ return value
+
+
+def add_control_node_to_network(
+ network: Network,
+ node_ids: list[int],
+ offset=100,
+ offset_node_id: int | None = None,
+ ctrl_node_geom: tuple | None = None,
+ **kwargs,
+) -> int:
+ """Add a control node and control edge to a ribasim.Network
+
+ Parameters
+ ----------
+ network : Network
+ Ribasim.Network with Node and Edge tables
+ node_ids : list[int]
+ Nodes to connect the control node
+ offset : int, optional
+ left-side offset of control node to node_id, in case len(node_ids) == 1 or offset_node_id is specified.
+ In other case this value is ignored. By default 100
+ offset_node_id : int | None, optional
+ User can explicitly specify a offset_node_id for a left-offset of the control-node. by default None
+ ctrl_node_geom : tuple | None, optional
+ User can explicitly specify an (x, y) tuple . by default None
+
+
+ Extra kwargs will be added as attributes to the control-node
+
+ Returns
+ -------
+ network, int
+ updated network and node_id of the control-node
+ """
+
+ # see if we have one node-id to offset ctrl-node from
+ if offset_node_id is not None:
+ node_id = offset_node_id
+ elif len(node_ids) == 1:
+ node_id = node_ids[0]
+
+ if ctrl_node_geom is None:
+ if node_id is not None: # if node-id we take the left-offset
+ linestring = (
+ network.edge.df[network.edge.df["to_node_id"] == node_id]
+ .iloc[0]
+ .geometry
+ )
+ ctrl_node_geom = Point(
+ linestring.parallel_offset(offset, "left").coords[-1]
+ )
+ else: # if not we take the centroid of all node_ids
+ ctrl_node_geom = network.node.df[
+ network.node.df.index.isin(node_ids)
+ ].unary_union.centroid
+ else:
+ ctrl_node_geom = Point(ctrl_node_geom)
+
+ # ad the ctrl-node to the network
+ ctrl_node_id = network.node.df.index.max() + 1
+ ctrl_node = {
+ "type": "DiscreteControl",
+ "meta_node_id": ctrl_node_id,
+ "geometry": ctrl_node_geom,
+ **kwargs,
+ }
+
+ network.node.df.loc[ctrl_node_id] = ctrl_node
+ network.node.df.crs = 28992
+
+ # add edge(s) to the network
+ ctrl_edge_gdf = gpd.GeoDataFrame(
+ [
+ {
+ "from_node_id": ctrl_node_id,
+ "to_node_id": i,
+ "edge_type": "control",
+ "geometry": LineString(
+ (ctrl_node_geom, network.node.df.at[i, "geometry"])
+ ),
+ }
+ for i in node_ids
+ ]
+ )
+
+ network.edge = ribasim.Edge(
+ df=pd.concat([network.edge.df, ctrl_edge_gdf], ignore_index=True)
+ )
+ return ctrl_node_id
+
+
+def update_table(table, new_table):
+ node_ids = new_table.node_id.unique()
+ table = table[~table.node_id.isin(node_ids)]
+ table = pd.concat([table, new_table])
+ table.reset_index(inplace=True)
+ return table
diff --git a/src/ribasim_nl/ribasim_nl/network.py b/src/ribasim_nl/ribasim_nl/network.py
index ce92b18..9230001 100644
--- a/src/ribasim_nl/ribasim_nl/network.py
+++ b/src/ribasim_nl/ribasim_nl/network.py
@@ -1,11 +1,31 @@
-from dataclasses import dataclass
+import logging
+from collections import Counter
+from dataclasses import dataclass, field
+from itertools import chain, product
from pathlib import Path
+import geopandas as gpd
+import networkx as nx
+import pandas as pd
from geopandas import GeoDataFrame, GeoSeries
-from networkx import Graph
-from shapely.geometry import LineString, box
+from networkx import DiGraph, Graph, NetworkXNoPath, shortest_path, traversal
+from shapely.geometry import LineString, Point, box
from shapely.ops import snap, split
+from ribasim_nl.styles import add_styles_to_geopackage
+
+logger = logging.getLogger(__name__)
+
+GEOMETRIES_ALLOWED = ["LineString", "MultiLineString"]
+
+
+def stop_iter(first_value, second_value):
+ # stop iteration if neither value is None and values are unequal
+ if all((pd.notna(first_value), pd.notna(second_value))):
+ return first_value != second_value
+ else:
+ return False
+
@dataclass
class Network:
@@ -20,10 +40,19 @@ class Network:
Attributes
----------
lines_gdf : GeoDataFrame
- GeoDataFrame with lines
+ GeoDataFrame with LineStrings
+ tolerance : float | None
+ tolerance for snapping nodes and filling gaps. Defaults to None
+ name_col: str
+ column in lines_gdf to preserve as 'name' column in network-links. Defaults to 'name'
+ id_col: str
+ column in lines_gdf to preserve as 'id' column in network-links. Defaults to 'id'
Methods
-------
+ from_lines_gpkg(gpkg_file, layer=None, **kwargs)
+ returns a Network from a layer with LineStrings within a GeoPackage.
+ Optionally the user specifies a layer in a multilayer-GeoPackage.
nodes
returns GeoDataFrame with the nodes in the network
links
@@ -35,39 +64,65 @@ class Network:
"""
lines_gdf: GeoDataFrame
+ name_col: str | None = None
+ id_col: str | None = None
tolerance: float | None = None
- _graph: Graph | None = None
- _nodes_gdf: GeoDataFrame | None = None
- _links_gdf: GeoDataFrame | None = None
+ _graph: DiGraph | None = field(default=None, repr=False)
+ _graph_undirected: Graph | None = field(default=None, repr=False)
- @property
- def nodes(self) -> GeoDataFrame:
- if self._nodes_gdf is None:
- geoseries = self.lines_gdf.boundary.explode(index_parts=True).unique()
-
- # snap nodes within tolerance if it's set. We:
- # 1. create polygons using buffer
- # 2. dissolving to a multipolygon using unary_union
- # 3. explode to individual polygons
- # 4. convert to points taking the centroid
- if self.tolerance is not None:
- geoseries = (
- GeoSeries([geoseries.buffer(self.tolerance / 2).unary_union()])
- .explode(index_parts=False)
- .reset_index(drop=True)
- .centroid
+ def __post_init__(self):
+ self.validate_inputs()
+
+ def validate_inputs(self):
+ """Validate if inputs are good-to-go for generating a graph"""
+ # check if name_col and id_col are valid values
+ for col in [self.name_col, self.id_col]:
+ if (col is not None) & (col not in self.lines_gdf.columns):
+ logger.warn(
+ f"{col} not a column in lines_gdf, input will be set to None"
)
+ col = None
+
+ # check if lines_gdf only contains allowed geometries
+ geom_types = self.lines_gdf.geom_type.unique()
+ if not all(i in GEOMETRIES_ALLOWED for i in geom_types):
+ raise ValueError(
+ f"Only geom_types {GEOMETRIES_ALLOWED} are allowed. Got {geom_types}"
+ )
+
+ # explode to LineString
+ elif "MultiLineString" in geom_types:
+ self.lines_gdf = self.lines_gdf.explode(index_parts=False)
- # make it a GeoDataFrame
- self._nodes_gdf = GeoDataFrame(geometry=geoseries, crs=self.lines_gdf.crs)
- # let's start index at 1 and name it node_id
- self._nodes_gdf.index += 1
- self._nodes_gdf.index.name = "node_id"
- return self._nodes_gdf
+ @classmethod
+ def from_lines_gpkg(cls, gpkg_file: str | Path, layer: str | None = None, **kwargs):
+ """Instantiate class from a lines_gpkg"""
+ lines_gdf = gpd.read_file(gpkg_file, layer=layer)
+ return cls(lines_gdf, **kwargs)
+
+ @classmethod
+ def from_network_gpkg(cls, gpkg_file: str | Path, **kwargs):
+ """Instantiate class from a network gpkg"""
+ nodes_gdf = gpd.read_file(gpkg_file, layer="nodes", engine="pyogrio").set_index(
+ "node_id"
+ )
+ links_gdf = gpd.read_file(gpkg_file, layer="links", engine="pyogrio").set_index(
+ ["node_from", "node_to"]
+ )
+ graph = DiGraph()
+ graph.add_nodes_from(nodes_gdf.to_dict(orient="index").items())
+ graph.add_edges_from(
+ [(k[0], k[1], v) for k, v in links_gdf.to_dict(orient="index").items()]
+ )
+
+ result = cls(links_gdf, **kwargs)
+ result.set_graph(graph)
+ return result
@property
def snap_tolerance(self):
+ """Snap tolerance for shapely.ops.snap"""
if self.tolerance:
return self.tolerance
else:
@@ -75,54 +130,65 @@ def snap_tolerance(self):
@property
def links(self) -> GeoDataFrame:
- if self._links_gdf is None:
- _ = self.graph
- return self._links_gdf
+ """Return graph links as GeoDataFrame"""
+ # make sure we have a graph
+ _ = self.graph
+ return GeoDataFrame(
+ [
+ {"node_from": i[0], "node_to": i[1], **i[2]}
+ for i in self.graph.edges.data()
+ ],
+ crs=self.lines_gdf.crs,
+ )
@property
- def graph(self) -> Graph:
- if self._graph is None:
- links_data = []
-
- # place first and last point if we have set tolerance
- def add_link(
- node_from, point_from, node_to, point_to, geometry, links_data
- ):
- if self.tolerance is not None:
- geometry = LineString(
- [(point_from.x, point_from.y)]
- + geometry.coords[1:-1]
- + [(point_to.x, point_to.y)]
- )
-
- # add edge to graph
- self._graph.add_edge(
- node_from,
- node_to,
- length=geometry.length,
- geometry=geometry,
- )
+ def nodes(self) -> GeoDataFrame:
+ """Return graph nodes as GeoDataFrame"""
+ # make sure we have a graph
+ _ = self.graph
+ gdf = GeoDataFrame.from_dict(
+ {i[0]: i[1] for i in self.graph.nodes.data()},
+ orient="index",
+ crs=self.lines_gdf.crs,
+ )
+ gdf.index.name = "node_id"
+ return gdf
- # add node_from, node_to to links
- links_data += [
- {"node_from": node_from, "node_to": node_to, "geometry": geometry}
- ]
+ @property
+ def graph_undirected(self) -> Graph:
+ if self._graph_undirected is None:
+ self._graph_undirected = Graph(self.graph)
+ return self._graph_undirected
- self._graph = Graph()
+ @property
+ def graph(self) -> DiGraph:
+ if self._graph is None:
+ # see if input is valid
+ self.validate_inputs()
+ self._graph = DiGraph()
# add nodes to graph
- for row in self.nodes.itertuples(): # TODO: use feeding self.nodes as dict using self._graph.add_nodes_from may be faster
+ nodes_gdf = self.get_nodes()
+ for row in nodes_gdf.itertuples():
self._graph.add_node(row.Index, geometry=row.geometry)
+ # add edges using link_def
+ link_def = {}
for row in self.lines_gdf.itertuples():
geometry = row.geometry
+ # provide id and name attributes if any
+ if self.id_col is not None:
+ link_def["id"] = getattr(row, self.id_col)
+ if self.name_col is not None:
+ link_def["name"] = getattr(row, self.name_col)
+
# select nodes of interest
if self.tolerance:
bounds = box(*geometry.bounds).buffer(self.tolerance).bounds
else:
bounds = row.geometry.bounds
- nodes_select = self.nodes.iloc[self.nodes.sindex.intersection(bounds)]
+ nodes_select = nodes_gdf.iloc[nodes_gdf.sindex.intersection(bounds)]
if self.tolerance is None:
nodes_select = nodes_select[nodes_select.distance(geometry) == 0]
else:
@@ -135,58 +201,478 @@ def add_link(
continue
# More than one node. We order selected nodes by distance from start_node
- nodes_select["distance"] = nodes_select.distance(
- geometry.boundary.geoms[0]
+ nodes_select["distance"] = nodes_select.geometry.apply(
+ lambda x: geometry.project(x)
)
nodes_select.sort_values("distance", inplace=True)
# More than one node. We select start_node and point-geometry
- node_from = nodes_select.index[0]
- point_from = nodes_select.loc[node_from].geometry
+ link_def["node_from"] = nodes_select.index[0]
+ link_def["point_from"] = nodes_select.loc[
+ link_def["node_from"]
+ ].geometry
# More than two nodes. Line should be split into parts. We create one extra edge for every extra node
if len(nodes_select) > 2:
for node in nodes_select[1:-1].itertuples():
- node_to = node.Index
- point_to = nodes_select.loc[node_to].geometry
+ link_def["node_to"] = node.Index
+ link_def["point_to"] = nodes_select.loc[
+ link_def["node_to"]
+ ].geometry
edge_geometry, geometry = split(
- snap(geometry, point_to, self.snap_tolerance), point_to
+ snap(geometry, link_def["point_to"], self.snap_tolerance),
+ link_def["point_to"],
).geoms
- add_link(
- node_from,
- point_from,
- node_to,
- point_to,
- edge_geometry,
- links_data,
- )
- node_from = node_to
- point_from = point_to
+ link_def["geometry"] = edge_geometry
+ self.add_link(**link_def)
+ link_def["node_from"] = link_def["node_to"]
+ link_def["point_from"] = link_def["point_to"]
# More than one node. We finish the (last) edge
- node_to = nodes_select.index[-1]
- point_to = nodes_select.loc[node_to].geometry
- add_link(
- node_from,
- point_from,
- node_to,
- point_to,
- geometry,
- links_data,
- )
+ link_def["node_to"] = nodes_select.index[-1]
+ link_def["point_to"] = nodes_select.loc[link_def["node_to"]].geometry
+ link_def["geometry"] = geometry
+ self.add_link(**link_def)
+
+ # Set all node-types
+ self.set_node_types()
- self._links_gdf = GeoDataFrame(links_data, crs=self.lines_gdf.crs)
return self._graph
+ def add_link(
+ self,
+ node_from,
+ node_to,
+ geometry,
+ point_from=None,
+ point_to=None,
+ id=None,
+ name=None,
+ ):
+ """Add a link (edge) to the network"""
+ if self.tolerance is not None:
+ geometry = LineString(
+ [(point_from.x, point_from.y)]
+ + geometry.coords[1:-1]
+ + [(point_to.x, point_to.y)]
+ )
+
+ # add edge to graph
+ self._graph.add_edge(
+ node_from,
+ node_to,
+ name=name,
+ id=id,
+ length=geometry.length,
+ geometry=geometry,
+ )
+
+ def overlay(self, gdf):
+ cols = ["node_id"] + [i for i in gdf.columns if i != "geometry"]
+ gdf_overlay = gpd.overlay(self.nodes.reset_index(), gdf, how="intersection")[
+ cols
+ ]
+ gdf_overlay = gdf_overlay[~gdf_overlay.duplicated(subset="node_id")]
+
+ for row in gdf_overlay.itertuples():
+ attrs = {
+ k: v for k, v in row._asdict().items() if k not in ["Index", "node_id"]
+ }
+ for k, v in attrs.items():
+ self._graph.nodes[row.node_id][k] = v
+
+ self._graph_undirected = None
+
+ def upstream_nodes(self, node_id):
+ return [
+ n
+ for n in traversal.bfs_tree(self._graph, node_id, reverse=True)
+ if n != node_id
+ ]
+
+ def downstream_nodes(self, node_id):
+ return [n for n in traversal.bfs_tree(self._graph, node_id) if n != node_id]
+
+ def has_upstream_nodes(self, node_id):
+ return len(self.upstream_nodes(node_id)) > 0
+
+ def has_downstream_nodes(self, node_id):
+ return len(self.downstream_nodes(node_id)) > 0
+
+ def find_upstream(self, node_id, attribute, max_iters=10):
+ upstream_nodes = self.upstream_nodes(node_id)
+ max_iters = min(max_iters, len(upstream_nodes))
+ value = None
+ for idx in range(max_iters):
+ node = self._graph.nodes[upstream_nodes[idx]]
+ if attribute in node.keys():
+ if pd.notna(node[attribute]):
+ value = node[attribute]
+ break
+ return value
+
+ def find_downstream(self, node_id, attribute, max_iters=10):
+ downstream_nodes = self.downstream_nodes(node_id)
+ max_iters = min(max_iters, len(downstream_nodes))
+ value = None
+ for idx in range(max_iters):
+ node = self._graph.nodes[downstream_nodes[idx]]
+ if attribute in node.keys():
+ if pd.notna(node[attribute]):
+ value = node[attribute]
+ break
+ return value
+
+ def get_downstream(self, node_id, attribute, max_iters=5):
+ downstream_nodes = self.downstream_nodes(node_id)
+
+ # get max_iters, as we search downstream, our list should be even and double max_iters
+ nbr_nodes = len(downstream_nodes)
+ if nbr_nodes % 2 == 1:
+ nbr_nodes -= 1
+ max_iters = min(nbr_nodes, max_iters * 2)
+
+ first_value = None
+ second_value = None
+
+ for idx in range(0, max_iters, 2):
+ first_node = self._graph.nodes[downstream_nodes[idx]]
+ second_node = self._graph.nodes[downstream_nodes[idx + 1]]
+
+ if attribute in first_node.keys():
+ if pd.notna(first_node[attribute]):
+ first_value = first_node[attribute]
+ if stop_iter(first_value, second_value):
+ break
+
+ if attribute in second_node.keys():
+ if pd.notna(pd.notna(second_node[attribute])):
+ second_value = second_node[attribute]
+ if stop_iter(first_value, second_value):
+ break
+
+ return first_value, second_value
+
+ def get_upstream_downstream(self, node_id, attribute, max_iters=5, max_length=2000):
+ # determine upstream and downstream nodes
+ upstream_nodes = self.upstream_nodes(node_id)
+ downstream_nodes = self.downstream_nodes(node_id)
+
+ max_iters = min(max_iters, len(upstream_nodes), len(downstream_nodes))
+
+ upstream_value = None
+ downstream_value = None
+
+ for idx in range(max_iters):
+ if (
+ nx.shortest_path_length(
+ self.graph,
+ upstream_nodes[idx],
+ downstream_nodes[idx],
+ weight="length",
+ )
+ > max_length
+ ):
+ break
+ us_node = self._graph.nodes[upstream_nodes[idx]]
+ ds_node = self._graph.nodes[downstream_nodes[idx]]
+
+ if attribute in us_node.keys():
+ if pd.notna(us_node[attribute]):
+ upstream_value = us_node[attribute]
+ if stop_iter(upstream_value, downstream_value):
+ break
+
+ if attribute in ds_node.keys():
+ if pd.notna(pd.notna(ds_node[attribute])):
+ downstream_value = ds_node[attribute]
+ if stop_iter(upstream_value, downstream_value):
+ break
+
+ if upstream_value == downstream_value:
+ return None, None
+ else:
+ return upstream_value, downstream_value
+
+ def move_node(
+ self,
+ point: Point,
+ max_distance: float,
+ align_distance: float,
+ node_types=["connection", "upstream_boundary", "downstream_boundary"],
+ ):
+ """Move network nodes and edges to new location
+
+ Parameters
+ ----------
+ point : Point
+ Point to move node to
+ max_distance : float
+ Max distance to find closes node
+ align_distance : float
+ Distance over edge, from node, where vertices will be removed to align adjacent edges with Point
+ """
+ # take links and nodes as gdf
+ nodes_gdf = self.nodes
+ links_gdf = self.links
+
+ # get closest connection-node
+ distances = (
+ nodes_gdf[nodes_gdf["type"].isin(node_types)].distance(point).sort_values()
+ )
+ node_id = distances.index[0]
+ node_distance = distances.iloc[0]
+
+ # check if node is within max_distance
+ if node_distance <= max_distance:
+ # update graph node
+ self.graph.nodes[node_id]["geometry"] = point
+
+ # update start-node of edges
+ edges_from = links_gdf[links_gdf.node_from == node_id]
+ for edge in edges_from.itertuples():
+ geometry = edge.geometry
+
+ # take first node from point
+ coords = list(point.coords)
+
+ # take all in between boundaries only if > REMOVE_VERT_DIST
+ for coord in list(
+ self.graph.edges[(edge.node_from, edge.node_to)]["geometry"].coords
+ )[1:-1]:
+ if geometry.project(Point(coord)) > align_distance:
+ coords += [coord]
+
+ # take the last from original geometry
+ coords += [geometry.coords[-1]]
+
+ self.graph.edges[(edge.node_from, edge.node_to)][
+ "geometry"
+ ] = LineString(coords)
+
+ # update end-node of edges
+ edges_from = links_gdf[links_gdf.node_to == node_id]
+ for edge in edges_from.itertuples():
+ geometry = edge.geometry
+
+ # take first from original geometry
+ coords = [geometry.coords[0]]
+
+ # take all in between boundaries only if > REMOVE_VERT_DIST
+ geometry = geometry.reverse()
+ for coord in list(
+ self.graph.edges[(edge.node_from, edge.node_to)]["geometry"].coords
+ )[1:-1]:
+ if geometry.project(Point(coord)) > align_distance:
+ coords += [coord]
+
+ # take the last from point
+ coords += [(point.x, point.y)]
+
+ self.graph.edges[(edge.node_from, edge.node_to)][
+ "geometry"
+ ] = LineString(coords)
+ return node_id
+ else:
+ logger.warning(
+ f"No Node moved. Closest node: {node_id}, distance > max_distance ({node_distance} > {max_distance})"
+ )
+ return None
+
+ def add_node(self, point: Point, max_distance: float, align_distance: float):
+ # set _graph undirected to None
+ self._graph_undirected = None
+
+ # get links
+ links_gdf = self.links
+
+ # get closest edge and distances
+ distances = links_gdf.distance(point).sort_values()
+ edge_id = distances.index[0]
+ edge_distance = distances.iloc[0]
+ edge_geometry = links_gdf.at[edge_id, "geometry"] # noqa: PD008
+ node_from = links_gdf.at[edge_id, "node_from"] # noqa: PD008
+ node_to = links_gdf.at[edge_id, "node_to"] # noqa: PD008
+
+ if edge_distance <= max_distance:
+ # add node
+ node_id = max(self.graph.nodes) + 1
+ node_geometry = edge_geometry.interpolate(edge_geometry.project(point))
+ self.graph.add_node(node_id, geometry=node_geometry, type="connection")
+
+ # add edges
+ self.graph.remove_edge(node_from, node_to)
+ us_geometry, ds_geometry = split(
+ snap(edge_geometry, node_geometry, 0.01), node_geometry
+ ).geoms
+ self.add_link(node_from, node_id, us_geometry)
+ self.add_link(node_id, node_to, ds_geometry)
+
+ return self.move_node(point, max_distance=max_distance, align_distance=100)
+ else:
+ logger.warning(
+ f"No Node added. Closest edge: {edge_id}, distance > max_distance ({edge_distance} > {max_distance})"
+ )
+ return None
+
def reset(self):
self._graph = None
- self._nodes_gdf = None
- self._links_gdf = None
+
+ def set_graph(self, graph: DiGraph):
+ """Set graph directly"""
+ self._graph = graph
+
+ def get_path(self, node_from, node_to, directed=True):
+ if directed:
+ try:
+ return shortest_path(self.graph, node_from, node_to, weight="length")
+ except NetworkXNoPath:
+ print(f"found path undirected between {node_from} and {node_to}")
+ return shortest_path(
+ self.graph_undirected, node_from, node_to, weight="length"
+ )
+ else:
+ return shortest_path(
+ self.graph_undirected, node_from, node_to, weight="length"
+ )
+
+ def get_links(self, node_from, node_to, directed=True):
+ path = self.get_path(node_from, node_to, directed)
+ edges_on_path = list(zip(path[:-1], path[1:]))
+ return self.links.set_index(["node_from", "node_to"]).loc[edges_on_path]
+
+ def subset_links(self, nodes_from, nodes_to):
+ gdf = pd.concat(
+ [
+ self.get_links(node_from, node_to)
+ for node_from, node_to in product(nodes_from, nodes_to)
+ ]
+ )
+ gdf = gdf.reset_index().drop_duplicates(["node_from", "node_to"]).reset_index()
+ return gdf
+
+ def subset_nodes(
+ self, nodes_from, nodes_to, inclusive=True, directed=True, duplicated_nodes=True
+ ):
+ def find_duplicates(lst, counts):
+ counter = Counter(lst)
+ duplicates = [item for item, count in counter.items() if count == counts]
+ return duplicates
+
+ paths = [
+ self.get_path(node_from, node_to, directed)
+ for node_from, node_to in product(nodes_from, nodes_to)
+ ]
+
+ node_ids = list(chain(*paths))
+ if duplicated_nodes:
+ node_ids = find_duplicates(node_ids, len(paths))
+ else:
+ node_ids = list(set(chain(*paths)))
+ if not inclusive:
+ exclude_nodes = nodes_from + nodes_to
+ node_ids = [i for i in node_ids if i not in exclude_nodes]
+ return self.nodes.loc[node_ids]
+
+ def _get_coordinates(self, node_from, node_to):
+ # get geometries from links
+ reverse = False
+ links = self.links
+ geometries = links.loc[
+ (links.node_from == node_from) & (links.node_to == node_to), ["geometry"]
+ ]
+ if geometries.empty:
+ geometries = links.loc[
+ (links.node_from == node_to) & (links.node_to == node_from),
+ ["geometry"],
+ ]
+ if not geometries.empty:
+ reverse = True
+ else:
+ raise ValueError(
+ f"{node_from}, {node_to} not valid start and end nodes in the network"
+ )
+
+ # select geometry
+ if len(geometries) > 1:
+ idx = geometries.length.sort_values(ascending=False).index[0]
+ geometry = geometries.loc[idx].geometry
+ elif len(geometries) == 1:
+ geometry = geometries.iloc[0].geometry
+
+ # invert geometry
+ if reverse:
+ geometry = geometry.reverse()
+
+ return list(geometry.coords)
+
+ def path_to_line(self, path):
+ coords = []
+ for node_from, node_to in zip(path[0:-1], path[1:]):
+ coords += self._get_coordinates(node_from, node_to)
+ return LineString(coords)
+
+ def get_line(self, node_from, node_to, directed=True):
+ path = self.get_path(node_from, node_to, directed)
+ return self.path_to_line(path)
+
+ def get_nodes(self) -> GeoDataFrame:
+ """Get nodes from lines_gdf
+
+ Approach if tolerance is set:
+ 1. create polygons using buffer (tolerance/2)
+ 2. dissolving to a multipolygon using unary_union
+ 3. explode to individual polygons
+ 4. convert to points taking the centroid
+
+ Returns
+ -------
+ GeoDataFrame
+ GeoDataFrame with nodes and index
+ """
+ geoseries = self.lines_gdf.boundary.explode(index_parts=True).unique()
+
+ # snap nodes within tolerance if it's set. We:
+
+ if self.tolerance is not None:
+ geoseries = (
+ GeoSeries([geoseries.buffer(self.tolerance / 2).unary_union()])
+ .explode(index_parts=False)
+ .reset_index(drop=True)
+ .centroid
+ )
+
+ # make it a GeoDataFrame
+ nodes_gdf = GeoDataFrame(geometry=geoseries, crs=self.lines_gdf.crs)
+ # let's start index at 1 and name it node_id
+ nodes_gdf.index += 1
+ nodes_gdf.index.name = "node_id"
+ return nodes_gdf
+
+ def set_node_types(self):
+ """Node types to seperate boundaries from connections"""
+ from_nodes = {i[0] for i in self.graph.edges}
+ to_nodes = {i[1] for i in self.graph.edges}
+ us_boundaries = [i for i in from_nodes if i not in to_nodes]
+ ds_boundaries = [i for i in to_nodes if i not in from_nodes]
+ for node_id in self._graph.nodes:
+ if node_id in us_boundaries:
+ self._graph.nodes[node_id]["type"] = "upstream_boundary"
+ elif node_id in ds_boundaries:
+ self._graph.nodes[node_id]["type"] = "downstream_boundary"
+ else:
+ self._graph.nodes[node_id]["type"] = "connection"
def to_file(self, path: str | Path):
+ """Write output to geopackage"""
path = Path(path)
- # make sure graph is created
- _ = self.graph
+ if path.suffix.lower() != ".gpkg":
+ raise ValueError(
+ f"{path} is not a GeoPackage, please provide a file with extention 'gpkg'"
+ )
+
# write nodes and links
- self.nodes.to_file(path, layer="nodes")
- self.links.to_file(path, layer="links")
+ self.nodes.to_file(path, layer="nodes", engine="pyogrio")
+ self.links.to_file(path, layer="links", engine="pyogrio")
+ # add styles
+ add_styles_to_geopackage(path)
diff --git a/src/ribasim_nl/ribasim_nl/raster.py b/src/ribasim_nl/ribasim_nl/raster.py
new file mode 100644
index 0000000..9bb447a
--- /dev/null
+++ b/src/ribasim_nl/ribasim_nl/raster.py
@@ -0,0 +1,177 @@
+import math
+from pathlib import Path
+
+import geopandas as gpd
+import numpy as np
+import rasterio
+from osgeo import gdal
+from pandas import DataFrame
+from rasterio import features # noqa:F401
+from rasterio.windows import from_bounds
+from shapely.geometry import LineString, Polygon
+
+DEFAULT_PERCENTILES = [
+ 0.01,
+ 0.1,
+ 1,
+ 5,
+ 10,
+ 20,
+ 30,
+ 40,
+ 50,
+ 60,
+ 70,
+ 80,
+ 90,
+ 95,
+ 99,
+ 99.9,
+ 99.99,
+ 100,
+]
+
+
+def build_vrt(raster_dir: Path):
+ """Build a vrt-file inside a directory of rasters.
+
+ Important notes!
+ 1. Only tif-files will be included
+ 2. All rasters should be equal in coordinate reference system, dtype (probably also nodata)
+
+ Parameters
+ ----------
+ raster_dir : Path
+ _description_
+ """
+
+ rasters_vrt = raster_dir / f"{raster_dir.name}.vrt"
+ if rasters_vrt.exists():
+ rasters_vrt.unlink()
+ raster_files = [f"{i.as_posix()}" for i in raster_dir.glob("*.tif")]
+
+ ds = gdal.VSIFOpenL(rasters_vrt.as_posix(), "w")
+ gdal.VSIFCloseL(ds)
+
+ vrt_ds = gdal.BuildVRT(rasters_vrt.as_posix(), raster_files)
+
+ for idx, raster_file in enumerate(raster_files):
+ file_name = Path(raster_file).name
+ subdataset_name = f"SUBDATASET_{idx}_NAME"
+ subdataset_description = f"SUBDATASET_{idx}_DESC"
+ vrt_ds.GetRasterBand(1).SetMetadataItem(subdataset_name, file_name)
+ vrt_ds.GetRasterBand(1).SetMetadataItem(
+ subdataset_description, f"File: {file_name}"
+ )
+
+ # Save the changes and close the VRT file
+ vrt_ds = None
+
+
+def sample_level_area(
+ raster_path: Path, polygon: Polygon, ident=None, percentiles=DEFAULT_PERCENTILES
+) -> DataFrame:
+ # Define the window coordinates (left, right, top, bottom)
+
+ # Open raster and read window from polygon.bounds
+ with rasterio.open(raster_path) as src:
+ # Read the raster data within the specified window
+ window = from_bounds(*polygon.bounds, transform=src.transform)
+ profile = src.profile
+ window_data = src.read(1, window=window)
+ scales = src.scales
+ dx, dy = src.res
+ cell_area = dx * dy
+
+ # get actual value if data is scaled
+ if scales[0] != 1:
+ window_data = np.where(
+ window_data == profile["nodata"],
+ profile["nodata"],
+ window_data * scales[0],
+ )
+
+ # Get the affine transformation associated with the window
+ window_transform = src.window_transform(window)
+
+ # create a mask-array from polygon
+ mask = rasterio.features.geometry_mask(
+ [polygon], window_data.shape, window_transform, all_touched=False, invert=True
+ )
+
+ # include nodata as False in mask
+ mask[window_data == profile["nodata"]] = False
+
+ # compute levels by percentiles
+ level = np.percentile(window_data[mask], percentiles)
+
+ # compute areas by level and cell-area
+ area = [np.sum(mask & (window_data <= value)) * cell_area for value in level]
+
+ df = DataFrame({"percentiles": percentiles, "level": level, "area": area})
+
+ if ident is not None:
+ print(f"sampled polygon {ident}")
+ df["id"] = ident
+ return df
+
+
+def line_to_samples(
+ line: LineString, sample_dist: float, crs=28992
+) -> gpd.GeoDataFrame:
+ """Convert line to samples
+
+ Parameters
+ ----------
+ line : LineString
+ Input line
+ sample_dist : float
+ minimal distance along line
+ crs : int, optional
+ output projection, by default 28992
+
+ Returns
+ -------
+ gpd.GeoDataFrame
+ GeoDataFrame with Point geometry and distance along the line
+ """
+ nbr_points = math.ceil(line.length / sample_dist)
+ gdf = gpd.GeoDataFrame(
+ geometry=[
+ line.interpolate(i / float(nbr_points - 1), normalized=True)
+ for i in range(nbr_points)
+ ],
+ crs=crs,
+ )
+ gdf["distance"] = gdf.geometry.apply(lambda x: line.project(x))
+ return gdf
+
+
+def sample_elevation_distance(raster_path: Path, line: LineString) -> gpd.GeoDataFrame:
+ """Sample values over an elevation raster using a line
+
+ Parameters
+ ----------
+ raster_path : Path
+ Path to raster
+ line : LineString
+ LineString to sample at raster-resolution
+
+ Returns
+ -------
+ gpd.GeoDataFrame
+ GeoDataFrame with Point-geometry, distance along the line and elevation value
+ """
+
+ with rasterio.open(raster_path) as src:
+ sample_dist = abs(src.res[0])
+ gdf = line_to_samples(line, sample_dist)
+ coords = zip(gdf["geometry"].x, gdf["geometry"].y)
+ gdf["elevation"] = [i[0] for i in src.sample(coords)]
+
+ gdf = gdf[gdf.elevation != src.nodata]
+ # get actual value if data is scaled
+ if src.scales[0] != 1:
+ gdf["elevation"] = gdf["elevation"] * src.scales[0]
+
+ return gdf
diff --git a/src/ribasim_nl/ribasim_nl/rating_curve.py b/src/ribasim_nl/ribasim_nl/rating_curve.py
new file mode 100644
index 0000000..f8c5fff
--- /dev/null
+++ b/src/ribasim_nl/ribasim_nl/rating_curve.py
@@ -0,0 +1,20 @@
+from pathlib import Path
+
+import pandas as pd
+from openpyxl import load_workbook
+from pandas import DataFrame, Series
+
+
+def read_rating_curve(file_path: Path, node_index: Series) -> DataFrame:
+ """Concat sheets in a verdeelsleutel.xlsx to 1 pandas dataframe."""
+ wb = load_workbook(file_path)
+ sheet_names = wb.sheetnames
+ dfs = []
+ for sheet_name in sheet_names:
+ if sheet_name != "disclaimer":
+ df = pd.read_excel(file_path, sheet_name=sheet_name)
+ df["code_waterbeheerder"] = sheet_name
+ df["node_id"] = node_index.loc[sheet_name]
+ dfs += [df]
+
+ return pd.concat(dfs)[["node_id", "level", "flow_rate", "code_waterbeheerder"]]
diff --git a/src/ribasim_nl/ribasim_nl/reset_index.py b/src/ribasim_nl/ribasim_nl/reset_index.py
new file mode 100644
index 0000000..bc6faca
--- /dev/null
+++ b/src/ribasim_nl/ribasim_nl/reset_index.py
@@ -0,0 +1,46 @@
+import pandas as pd
+from ribasim import Model
+
+from ribasim_nl.model import TABLES, get_table
+
+
+def reset_index(model: Model, node_start=1):
+ # only reset if we have to
+ fid_min = model.network.node.df.index.min()
+ fid_max = model.network.node.df.index.max()
+ expected_length = fid_max - fid_min + 1
+ if not (
+ (node_start == fid_min) and (expected_length == len(model.network.node.df))
+ ):
+ # make sure column node_id == index
+ node_ids = model.network.node.df.index
+ model.network.node.df.loc[:, "fid"] = node_ids
+
+ # create a new index for re-indexing all tables
+ index = pd.Series(
+ data=[i + node_start for i in range(len(node_ids))], index=node_ids
+ )
+
+ # re-index node_id and fid
+ model.network.node.df.index = model.network.node.df["fid"].apply(
+ lambda x: index.loc[x]
+ )
+ model.network.node.df.index.name = "fid"
+ model.network.node.df.drop(columns=["fid"], inplace=True)
+
+ # renumber edges
+ model.network.edge.df.loc[:, ["from_node_id"]] = model.network.edge.df[
+ "from_node_id"
+ ].apply(lambda x: index.loc[x])
+
+ model.network.edge.df.loc[:, ["to_node_id"]] = model.network.edge.df[
+ "to_node_id"
+ ].apply(lambda x: index.loc[x])
+
+ # renumber tables
+ for table in TABLES:
+ df = get_table(model, table)
+ if df is not None:
+ df.loc[:, ["node_id"]] = df["node_id"].apply(lambda x: index.loc[x])
+
+ return model
diff --git a/src/ribasim_nl/ribasim_nl/styles.py b/src/ribasim_nl/ribasim_nl/styles.py
new file mode 100644
index 0000000..f8d12fe
--- /dev/null
+++ b/src/ribasim_nl/ribasim_nl/styles.py
@@ -0,0 +1,124 @@
+import re
+import sqlite3
+from datetime import datetime
+from pathlib import Path
+
+import fiona
+
+STYLES_DIR = Path(__file__).parent.joinpath("data", "styles")
+
+CREATE_TABLE_SQL = """
+CREATE TABLE "layer_styles" (
+ "id" INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
+ "f_table_catalog" TEXT(256),
+ "f_table_schema" TEXT(256),
+ "f_table_name" TEXT(256),
+ "f_geometry_column" TEXT(256),
+ "styleName" TEXT(30),
+ "styleQML" TEXT,
+ "styleSLD" TEXT,
+ "useAsDefault" BOOLEAN,
+ "description" TEXT,
+ "owner" TEXT(30),
+ "ui" TEXT(30),
+ "update_time" DATETIME DEFAULT (strftime('%Y-%m-%dT%H:%M:%fZ','now'))
+);
+"""
+DROP_TABLE_SQL = """DROP TABLE IF EXISTS "layer_styles";"""
+
+INSERT_ROW_SQL = """
+INSERT INTO "main"."layer_styles" (
+ "f_table_catalog",
+ "f_table_schema",
+ "f_table_name",
+ "f_geometry_column",
+ "styleName",
+ "styleQML",
+ "styleSLD",
+ "useAsDefault",
+ "description",
+ "owner",
+ "ui",
+ "update_time"
+)
+VALUES (
+ '',
+ '',
+ '{layer}',
+ 'geom',
+ '{layer}',
+ '{style_qml}',
+ '{style_sld}',
+ '1',
+ '{description}',
+ '',
+ '',
+ '{update_date_time}'
+);
+"""
+
+
+def read_style(style_path: Path) -> str:
+ """
+ To make style-text sql-compatible, we need to replace single ' to ''.
+ Example 'http://mrcc.com/qgis.dtd -> ''http://mrcc.com/qgis.dtd''
+
+ Parameters
+ ----------
+ style_path : Path
+ Path to sld-file
+
+ Returns
+ -------
+ str
+ style-string for SQL
+
+ """
+ style_txt = style_path.read_text()
+
+ pattern = r"'(.*?)'"
+ style_txt = re.sub(pattern, lambda m: f"''{m.group(1)}''", style_txt)
+
+ return style_txt
+
+
+def add_styles_to_geopackage(gpkg_path: Path):
+ """
+ Add styles to a HyDAMO GeoPackage
+
+ Parameters
+ ----------
+ gpkg_path : Path
+ Path to HyDAMO GeoPackage
+
+ Returns
+ -------
+ None.
+
+ """
+
+ with sqlite3.connect(gpkg_path) as conn:
+ # create table
+ conn.execute(DROP_TABLE_SQL)
+ conn.execute(CREATE_TABLE_SQL)
+
+ # add style per layer
+ for layer in fiona.listlayers(gpkg_path):
+ style_qml = STYLES_DIR / f"{layer}.qml"
+ style_sld = STYLES_DIR / f"{layer}.sld"
+
+ # check if style exists
+ if style_qml.exists() and style_sld.exists():
+ description = f"HyDAMO style for layer: {layer}"
+ update_date_time = f"{datetime.now().isoformat()}Z"
+
+ # push to GeoPackage
+ conn.execute(
+ INSERT_ROW_SQL.format(
+ layer=layer,
+ style_qml=read_style(style_qml),
+ style_sld=read_style(style_sld),
+ description=description,
+ update_date_time=update_date_time,
+ )
+ )
diff --git a/src/ribasim_nl/ribasim_nl/tables.py b/src/ribasim_nl/ribasim_nl/tables.py
new file mode 100644
index 0000000..998b9d6
--- /dev/null
+++ b/src/ribasim_nl/ribasim_nl/tables.py
@@ -0,0 +1,99 @@
+import pandas as pd
+from scipy import interpolate
+
+
+def average_width(df_left: pd.DataFrame, df_right: pd.DataFrame) -> pd.DataFrame:
+ """Combine two DataFrames with width(level) to one average width(level) relation
+
+ DataFrames should have columns 'level' and 'width'. Resulting DataFrame will contain all unique levels
+ and the average width of both dataframes. Widths will be linear interpolated if only in one of the two
+ DataFrames.
+
+ Parameters
+ ----------
+ df_left : pd.DataFrame
+ One DataFrame with width(level) relation
+ df_right : pd.DataFrame
+ Other DataFrame with width(level) relation
+
+ Returns
+ -------
+ pd.DataFrame
+ resulting DataFrame with width(level) relation
+ """
+ # get unique levels
+ level = list(set(df_left["level"].to_list() + df_right["level"].to_list()))
+
+ f_left = interpolate.interp1d(
+ df_left["level"].to_numpy(), df_left["width"].to_numpy(), bounds_error=False
+ )
+ f_right = interpolate.interp1d(
+ df_right["level"].to_numpy(), df_right["width"].to_numpy(), bounds_error=False
+ )
+
+ width = (f_left(level) + f_right(level)) / 2
+
+ df = pd.DataFrame({"level": level, "width": width})
+
+ # where width is NaN, its out of bounds of bounds of left or right. We'll replace it with left or right
+ df_single = (
+ pd.concat([df_left, df_right])
+ .sort_values("level")
+ .drop_duplicates("level")
+ .reset_index(drop=True)
+ )
+ df[df.width.isna()] = df_single[df.width.isna()]
+
+ return df
+
+
+def cumulative_area(df: pd.DataFrame) -> pd.Series:
+ """Calculate the cumulative_area from a DataFrame with width(level).
+
+ Parameters
+ ----------
+ df : pd.DataFrame
+ DataFrame with level and width columns
+
+ Returns
+ -------
+ pd.Series
+ Series with cumulative area
+ """
+
+ df.reset_index(drop=True, inplace=True)
+ df.sort_values("level", inplace=True)
+
+ dx = df["level"] - df["level"].shift(fill_value=0)
+ dy = df["width"] - df["width"].shift(fill_value=0)
+ area = (df["width"].shift(fill_value=0) * dx) + (dy * dx) / 2
+ area.loc[0] = 0
+ return area.cumsum()
+
+
+def manning_profile(df: pd.DataFrame) -> tuple[float, float]:
+ """Convert a DataFrame with a Width(level) relation to a manning profile_width and slope.
+
+ DataFrame should have columns 'level' and 'width'
+
+ Parameters
+ ----------
+ df : pd.DataFrame
+ DataFrame with level and width columns
+
+ Returns
+ -------
+ Tuple[int, int]
+ Tuple with (profile_width, slope) values
+ """
+
+ dz = df["level"].max() - df["level"].min()
+ tw = df["width"].max()
+
+ area = cumulative_area(df)
+ A = area.max()
+ bw = max(2 * A / (dz) - tw, 0)
+ dy = (tw - bw) / 2
+ S = dy / dz
+
+ return bw, S
diff --git a/src/ribasim_nl/ribasim_nl/verdeelsleutels.py b/src/ribasim_nl/ribasim_nl/verdeelsleutels.py
new file mode 100644
index 0000000..62f0569
--- /dev/null
+++ b/src/ribasim_nl/ribasim_nl/verdeelsleutels.py
@@ -0,0 +1,110 @@
+from pathlib import Path
+
+import pandas as pd
+import ribasim
+from openpyxl import load_workbook
+from pandas import DataFrame, Series
+
+from ribasim_nl.model import add_control_node_to_network
+
+
+def read_verdeelsleutel(file_path: Path) -> DataFrame:
+ """Concat sheets in a verdeelsleutel.xlsx to 1 pandas dataframe."""
+ wb = load_workbook(file_path)
+ sheet_names = wb.sheetnames
+ return pd.concat([pd.read_excel(file_path, sheet_name=i) for i in sheet_names])
+
+
+def verdeelsleutel_to_fractions(
+ verdeelsleutel_df: DataFrame, node_index: Series, keys: int = 2
+) -> DataFrame:
+ df = pd.concat(
+ [
+ verdeelsleutel_df[
+ [f"locatie_benedenstrooms_{i}", f"fractie_{i}", "beschrijving"]
+ ].rename(
+ columns={
+ f"locatie_benedenstrooms_{i}": "locatie_benedenstrooms",
+ f"fractie_{i}": "fraction",
+ "beschrijving": "control_state",
+ }
+ )
+ for i in range(1, keys + 1)
+ ]
+ )
+
+ for code, node_id in zip(node_index.index, node_index):
+ df.loc[
+ df.locatie_benedenstrooms.str.lower() == code.lower(), "node_id"
+ ] = node_id
+
+ df.loc[:, "fraction"] = df["fraction"].round(3)
+ return df[["node_id", "fraction", "control_state"]]
+
+
+def verdeelsleutel_to_control(
+ verdeelsleutel_df,
+ model,
+ offset_node_id=None,
+ code_waterbeheerder=None,
+ waterbeheerder=None,
+):
+ keys = verdeelsleutel_df.locatie_bovenstrooms.unique()
+ frac_nodes = []
+ if len(keys) != 1:
+ raise ValueError(
+ f"number of keys in verdeelsleutel != 1: {verdeelsleutel_df.locatie_bovenstrooms.unique()}"
+ )
+ else:
+ listen_feature_id = (
+ model.network.node.df[
+ model.network.node.df["meta_code_waterbeheerder"] == keys[0]
+ ]
+ .iloc[0]
+ .meta_node_id
+ )
+
+ # get frac-nodes
+ for (loc1, loc2), df in verdeelsleutel_df.groupby(
+ ["locatie_benedenstrooms_1", "locatie_benedenstrooms_2"]
+ ):
+ frac_nodes += [
+ model.network.node.df[
+ (model.network.node.df["type"] == "FractionalFlow")
+ & (
+ model.network.node.df["meta_code_waterbeheerder"].str.lower()
+ == i.lower()
+ )
+ ]
+ .iloc[0]
+ .meta_node_id
+ for i in [loc1, loc2]
+ ]
+
+ ctrl_node_id = add_control_node_to_network(
+ model.network,
+ frac_nodes,
+ offset=100,
+ offset_node_id=offset_node_id,
+ meta_code_waterbeheerder=code_waterbeheerder,
+ meta_waterbeheerder=waterbeheerder,
+ )
+
+ # add descrete control
+ condition_df = df[["ondergrens_waarde", "beschrijving"]].rename(
+ columns={"ondergrens_waarde": "greater_than", "beschrijving": "remarks"}
+ )
+ condition_df["node_id"] = ctrl_node_id
+ condition_df["listen_feature_id"] = listen_feature_id
+ condition_df["variable"] = "flow_rate"
+
+ logic_df = df[["beschrijving"]].rename(columns={"beschrijving": "control_state"})
+ logic_df["truth_state"] = [
+ "".join(["T"] * i + ["F"] * len(df))[0 : len(df)] for i in range(len(df))
+ ]
+ logic_df["node_id"] = ctrl_node_id
+ model.discrete_control = ribasim.DiscreteControl(
+ logic=logic_df, condition=condition_df
+ )
+
+ return model
diff --git a/src/ribasim_nl/tests/data/osm_lines.gpkg b/src/ribasim_nl/tests/data/osm_lines.gpkg
new file mode 100644
index 0000000..cd40f71
Binary files /dev/null and b/src/ribasim_nl/tests/data/osm_lines.gpkg differ
diff --git a/src/ribasim_nl/tests/test_network.py b/src/ribasim_nl/tests/test_network.py
index d84a76a..3288c4a 100644
--- a/src/ribasim_nl/tests/test_network.py
+++ b/src/ribasim_nl/tests/test_network.py
@@ -1,9 +1,23 @@
+# %%
+from pathlib import Path
+
import geopandas as gpd
+import pytest
from ribasim_nl import Network
from shapely.geometry import LineString
-def test_network(tmp_path):
+@pytest.fixture
+def osm_lines_gpkg():
+ return Path(__file__).parent.joinpath("data", "osm_lines.gpkg")
+
+
+@pytest.fixture
+def edge_columns():
+ return ["node_from", "node_to", "name", "id", "length", "geometry"]
+
+
+def test_network(tmp_path, edge_columns):
lines_gdf = gpd.GeoDataFrame(
geometry=gpd.GeoSeries(
[
@@ -15,15 +29,15 @@ def test_network(tmp_path):
)
network = Network(lines_gdf)
- assert len(network.nodes) == len(network.graph.nodes) == 4
- assert len(network.links) == len(network.graph.edges) == 3
- assert all(i in ["geometry", "node_from", "node_to"] for i in network.links.columns)
+ assert len(network.graph.nodes) == 4
+ assert len(network.graph.edges) == 3
+ assert all(i in edge_columns for i in network.links.columns)
output_file = tmp_path / "network.gpkg"
network.to_file(output_file)
assert output_file.exists()
-def test_gap_in_network():
+def test_gap_in_network(edge_columns):
lines_gdf = gpd.GeoDataFrame(
geometry=gpd.GeoSeries(
[
@@ -36,17 +50,17 @@ def test_gap_in_network():
network = Network(lines_gdf)
# we'll find 5 nodes now, as there is a gap
- assert len(network.nodes) == len(network.graph.nodes) == 5
+ assert len(network.graph.nodes) == 5
# throw network away and regenerate it with tolerance
network.reset()
network.tolerance = 1
# we'll find 4 nodes now, as the gap is repaired
- assert len(network.nodes) == len(network.graph.nodes) == 4
- assert len(network.links) == len(network.graph.edges) == 3
+ assert len(network.graph.nodes) == 4
+ assert len(network.graph.edges) == 3
- assert all(i in ["geometry", "node_from", "node_to"] for i in network.links.columns)
+ assert all(i in edge_columns for i in network.links.columns)
def test_link_within_tolerance():
@@ -65,14 +79,14 @@ def test_link_within_tolerance():
# not snapping within tolerance should produce all links and nodes
network = Network(lines_gdf)
- assert len(network.links) == len(network.graph.edges) == 5
- assert len(network.nodes) == len(network.graph.nodes) == 6
+ assert len(network.graph.edges) == 5
+ assert len(network.graph.nodes) == 6
network.reset()
# tolerance >1m should remove link
network.tolerance = 1.1
- assert len(network.links) == len(network.graph.edges) == 4
- assert len(network.nodes) == len(network.graph.nodes) == 5
+ assert len(network.graph.edges) == 4
+ assert len(network.graph.nodes) == 5
def test_split_intersecting_links():
@@ -88,5 +102,15 @@ def test_split_intersecting_links():
network = Network(lines_gdf)
- assert len(network.links) == len(network.graph.edges) == 3
- assert len(network.nodes) == len(network.graph.nodes) == 4
+ assert len(network.graph.edges) == 3
+ assert len(network.graph.nodes) == 4
+
+
+def test_osm_lines(osm_lines_gpkg):
+ network = Network.from_lines_gpkg(osm_lines_gpkg)
+
+ assert len(network.graph.edges) == 42
+ assert len(network.graph.nodes) == 35
+
+
+# %%
diff --git a/src/ribasim_nl/tests/test_tables.py b/src/ribasim_nl/tests/test_tables.py
new file mode 100644
index 0000000..93bc783
--- /dev/null
+++ b/src/ribasim_nl/tests/test_tables.py
@@ -0,0 +1,20 @@
+import pandas as pd
+from ribasim_nl.tables import average_width, cumulative_area, manning_profile
+
+
+def test_interpolation_simple():
+ df_left = pd.DataFrame({"level": [1, 2, 3, 6], "width": [10, 20, 30, 60]})
+ df_right = pd.DataFrame({"level": [2, 4], "width": [20, 40]})
+
+ # combine two width-tables
+ df = average_width(df_left, df_right)
+ assert df.width.to_list() == [10.0, 20.0, 30.0, 40.0, 60.0]
+
+ # compute cumulative areas
+ area = cumulative_area(df)
+ assert area.to_list() == [0.0, 15.0, 40.0, 75.0, 175.0]
+
+ # calculate manning_profile
+ profile_width, profile_slope = manning_profile(df)
+ assert profile_width == 10.0
+ assert profile_slope == 5.0