diff --git a/setup.py b/setup.py index 4a0f0243a..d6aab97dc 100644 --- a/setup.py +++ b/setup.py @@ -35,7 +35,9 @@ "InplaceVolumes = webviz_subsurface.containers:InplaceVolumes", "InplaceVolumesOneByOne = webviz_subsurface.containers:InplaceVolumesOneByOne", "ReservoirSimulationTimeSeries = " - "webviz_subsurface.containers:ReservoirSimulationTimeSeries", + + "webviz_subsurface.containers:ReservoirSimulationTimeSeries", + "ReservoirSimulationTimeSeriesOneByOne = " + + "webviz_subsurface.containers:ReservoirSimulationTimeSeriesOneByOne", ] }, install_requires=[ diff --git a/webviz_subsurface/containers/__init__.py b/webviz_subsurface/containers/__init__.py index be0f589e2..b4709d749 100644 --- a/webviz_subsurface/containers/__init__.py +++ b/webviz_subsurface/containers/__init__.py @@ -33,6 +33,9 @@ from ._inplace_volumes import InplaceVolumes from ._inplace_volumes_onebyone import InplaceVolumesOneByOne from ._reservoir_simulation_timeseries import ReservoirSimulationTimeSeries +from ._reservoir_simulation_timeseries_onebyone import ( + ReservoirSimulationTimeSeriesOneByOne, +) __all__ = [ @@ -46,4 +49,5 @@ "InplaceVolumes", "InplaceVolumesOneByOne", "ReservoirSimulationTimeSeries", + "ReservoirSimulationTimeSeriesOneByOne", ] diff --git a/webviz_subsurface/containers/_reservoir_simulation_timeseries_onebyone.py b/webviz_subsurface/containers/_reservoir_simulation_timeseries_onebyone.py new file mode 100644 index 000000000..b6387fb79 --- /dev/null +++ b/webviz_subsurface/containers/_reservoir_simulation_timeseries_onebyone.py @@ -0,0 +1,337 @@ +from uuid import uuid4 +import json + +import numpy as np +import pandas as pd + +from dash.exceptions import PreventUpdate +from dash.dependencies import Input, Output, State +import dash_html_components as html +import dash_core_components as dcc +from dash_table import DataTable + +import webviz_core_components as wcc +from webviz_config import WebvizContainerABC +from webviz_config.common_cache import CACHE +from webviz_subsurface.private_containers._tornado_plot import TornadoPlot + +from ..datainput import load_smry, get_realizations + + +class ReservoirSimulationTimeSeriesOneByOne(WebvizContainerABC): + """### ReservoirSimulationTimeSeriesOneByOne + +Visualizes reservoir simulation time series for ensembles using design matrix. +A tornadoplot can be calculated interactively for each date/vector. +The realizations for each sensitivity can be highlighted. + +* `ensembles`: Which ensembles in `container_settings` to visualize. +* `column_keys`: List of vectors to extract. If not given, all vectors + from the simulations will be extracted. Wild card asterisk * + can be used. +* `initial_vector`: Initial vector to display +* `sampling`: Time separation between extracted values. Can be e.g. `monthly` + or `yearly`. +""" + + ENSEMBLE_COLUMNS = [ + "REAL", + "ENSEMBLE", + "DATE", + "SENSCASE", + "SENSNAME", + "SENSTYPE", + "RUNPATH", + ] + + TABLE_STAT = [ + "Sens Name", + "Sens Case", + "Mean", + "Stddev", + "Minimum", + "P90", + "P10", + "Maximum", + ] + + def __init__( + self, + app, + container_settings, + ensembles, + column_keys=None, + initial_vector=None, + sampling: str = "monthly", + ): + + self.time_index = sampling + self.column_keys = tuple(column_keys) if column_keys else None + self.ens_paths = tuple( + (ensemble, container_settings["scratch_ensembles"][ensemble]) + for ensemble in ensembles + ) + # Extract realizations and sensitivity information + realizations = get_realizations( + ensemble_paths=self.ens_paths, ensemble_set_name="EnsembleSet" + ) + smry = load_smry( + ensemble_paths=self.ens_paths, + ensemble_set_name="EnsembleSet", + time_index=self.time_index, + column_keys=self.column_keys, + ) + + self.data = pd.merge(smry, realizations, on=["ENSEMBLE", "REAL"]) + self.smry_cols = [ + c + for c in self.data.columns + if c not in ReservoirSimulationTimeSeriesOneByOne.ENSEMBLE_COLUMNS + ] + self.initial_vector = ( + initial_vector + if initial_vector and initial_vector in self.smry_cols + else self.smry_cols[0] + ) + self.tornadoplot = TornadoPlot(app, realizations, allow_click=True) + + self.make_uuids() + + self.set_callbacks(app) + + def make_uuids(self): + uuid = f"{uuid4()}" + self.smry_col_id = f"smry-col-{uuid}" + self.date_id = f"date-{uuid}" + self.date_label = f"date-label{uuid}" + self.ensemble_id = f"ensemble-{uuid}" + self.table_id = f"table-{uuid}" + self.graph_id = f"graph-{uuid}" + self.graph_wrapper_id = f"graph-wrapper-{uuid}" + + @property + def ensemble_selector(self): + """Dropdown to select ensemble""" + return html.Div( + style={"paddingBottom": "30px"}, + children=[ + html.Label("Ensemble"), + dcc.Dropdown( + id=self.ensemble_id, + options=[ + {"label": i, "value": i} + for i in list(self.data["ENSEMBLE"].unique()) + ], + clearable=False, + value=list(self.data["ENSEMBLE"])[0], + ), + ], + ) + + @property + def smry_selector(self): + """Dropdown to select ensemble""" + return html.Div( + style={"paddingBottom": "30px"}, + children=[ + html.Label("Vector"), + dcc.Dropdown( + id=self.smry_col_id, + options=[{"label": i, "value": i} for i in self.smry_cols], + clearable=False, + value=self.initial_vector, + ), + ], + ) + + def add_webvizstore(self): + return [ + ( + load_smry, + [ + { + "ensemble_paths": self.ens_paths, + "ensemble_set_name": "EnsembleSet", + "time_index": self.time_index, + "column_keys": self.column_keys, + } + ], + ), + ( + get_realizations, + [ + { + "ensemble_paths": self.ens_paths, + "ensemble_set_name": "EnsembleSet", + } + ], + ), + ] + + @staticmethod + def set_grid_layout(columns): + return { + "display": "grid", + "alignContent": "space-around", + "justifyContent": "space-between", + "gridTemplateColumns": f"{columns}", + } + + @property + def layout(self): + return html.Div( + style=self.set_grid_layout("3fr 1fr"), + children=[ + html.Div( + [ + html.Div( + style=self.set_grid_layout("1fr 1fr 1fr"), + children=[ + self.ensemble_selector, + self.smry_selector, + html.Label(id=self.date_label), + ], + ), + html.Div( + [ + html.Div( + id=self.graph_wrapper_id, + style={"height": "450px"}, + children=wcc.Graph(id=self.graph_id), + ), + DataTable( + id=self.table_id, + sort_action="native", + filter_action="native", + page_action="native", + page_size=10, + columns=[ + {"name": i, "id": i} + for i in ReservoirSimulationTimeSeriesOneByOne.TABLE_STAT + ], + ), + ] + ), + ] + ), + self.tornadoplot.layout, + ], + ) + + def set_callbacks(self, app): + @app.callback( + Output(self.graph_wrapper_id, "children"), + [Input(self.ensemble_id, "value"), Input(self.smry_col_id, "value")], + ) + def _render_lines(ensemble, vector): + """Callback to update graph, and tornado""" + + # Filter dataframe based on dropdown choices + data = filter_ensemble(self.data, ensemble, vector) + traces = [ + { + "type": "line", + "marker": {"color": "grey"}, + "hoverinfo": "skip", + "x": df["DATE"], + "y": df[vector], + "customdata": r, + } + for r, df in data.groupby(["REAL"]) + ] + traces[0]["hoverinfo"] = "x" + return [ + wcc.Graph( + id=self.graph_id, + figure={ + "data": traces, + "layout": { + "title": "Click on a date to calculate tornado plot. " + + "Click on a bar in tornadoplot to highlight relevant realizations", + "showlegend": False, + }, + }, + ) + ] + + @app.callback( + [ + Output(self.date_label, "children"), + Output(self.table_id, "data"), + Output(self.tornadoplot.storage_id, "children"), + ], + [ + Input(self.ensemble_id, "value"), + Input(self.graph_id, "clickData"), + Input(self.smry_col_id, "value"), + ], + ) + def _render_tornado(ensemble, clickdata, vector): + try: + date = clickdata["points"][0]["x"] + except TypeError: + raise PreventUpdate + data = filter_ensemble(self.data, ensemble, vector) + data = data.loc[data["DATE"].astype(str) == date] + table_rows = calculate_table_rows(data, vector) + return ( + f"Selected data {date}", + table_rows, + json.dumps( + { + "ENSEMBLE": ensemble, + "data": data[["REAL", vector]].values.tolist(), + } + ), + ) + + @app.callback( + Output(self.graph_id, "figure"), + [Input(self.tornadoplot.click_id, "children")], + [State(self.graph_id, "figure")], + ) + def _render_tornado(clickdata, figure): + if not clickdata: + return figure + clickdata = json.loads(clickdata) + for trace in figure["data"]: + if trace["customdata"] in clickdata["real_low"]: + trace["marker"] = {"color": "rgb(235, 0, 54)"} + trace["opacity"] = 1 + elif trace["customdata"] in clickdata["real_high"]: + trace["marker"] = {"color": "rgb(36, 55, 70)"} + trace["opacity"] = 1 + else: + trace["marker"] = {"color": "grey"} + trace["opacity"] = 0.02 + figure["layout"]["title"] = "" + return figure + + +def calculate_table_rows(df, vector): + table = [] + for (sensname, senscase), dframe in df.groupby(["SENSNAME", "SENSCASE"]): + values = dframe[vector] + try: + table.append( + { + "Sens Name": str(sensname), + "Sens Case": str(senscase), + "Minimum": f"{values.min():.2e}", + "Maximum": f"{values.max():.2e}", + "Mean": f"{values.mean():.2e}", + "Stddev": f"{values.std():.2e}", + "P10": f"{np.percentile(values, 90):.2e}", + "P90": f"{np.percentile(values, 10):.2e}", + } + ) + except KeyError: + pass + return table + + +@CACHE.memoize(timeout=CACHE.TIMEOUT) +def filter_ensemble(data, ensemble, vector): + return data.loc[data["ENSEMBLE"] == ensemble][ + ["DATE", "REAL", vector, "SENSCASE", "SENSNAME"] + ] diff --git a/webviz_subsurface/datainput/__init__.py b/webviz_subsurface/datainput/__init__.py index c10ce36df..864e3aac1 100644 --- a/webviz_subsurface/datainput/__init__.py +++ b/webviz_subsurface/datainput/__init__.py @@ -7,7 +7,7 @@ from ._history_match import extract_mismatch, scratch_ensemble from ._intersect import load_surface, get_wfence, get_hfence from ._inplace_volumes import extract_volumes -from ._fmu_input import load_parameters, get_realizations, find_surfaces +from ._fmu_input import load_parameters, get_realizations, find_surfaces, load_smry from ._reservoir_simulation_timeseries import ( get_time_series_data, get_time_series_statistics, @@ -16,6 +16,7 @@ get_time_series_delta_ens_stats, ) + __all__ = [ "scratch_ensemble", "extract_mismatch", @@ -29,6 +30,7 @@ "get_time_series_delta_ens", "get_time_series_delta_ens_stats", "load_parameters", + "load_smry", "get_realizations", "find_surfaces", ] diff --git a/webviz_subsurface/datainput/_fmu_input.py b/webviz_subsurface/datainput/_fmu_input.py index 93b3b7753..16229c1c5 100644 --- a/webviz_subsurface/datainput/_fmu_input.py +++ b/webviz_subsurface/datainput/_fmu_input.py @@ -27,6 +27,20 @@ def load_parameters( return load_ensemble_set(ensemble_paths, ensemble_set_name).parameters +@CACHE.memoize(timeout=CACHE.TIMEOUT) +@webvizstore +def load_smry( + ensemble_paths: tuple, + ensemble_set_name: str = "EnsembleSet", + time_index=str, + column_keys=tuple, +) -> pd.DataFrame: + + return load_ensemble_set(ensemble_paths, ensemble_set_name).get_smry( + time_index=time_index, column_keys=list(column_keys) if column_keys else None + ) + + @CACHE.memoize(timeout=CACHE.TIMEOUT) @webvizstore def get_realizations( diff --git a/webviz_subsurface/private_containers/_tornado_plot.py b/webviz_subsurface/private_containers/_tornado_plot.py index c036da8d8..bb4d3111b 100644 --- a/webviz_subsurface/private_containers/_tornado_plot.py +++ b/webviz_subsurface/private_containers/_tornado_plot.py @@ -234,7 +234,7 @@ def tornado_plot( # Group by sensitivity name/case and calculate average values for each case arr = [] - for sens_name, dframe in realizations.groupby(["SENSNAME"]): + for sens_name, sens_name_df in realizations.groupby(["SENSNAME"]): # Excluding the reference case as well as any cases named `ref` # `ref` is used as `SENSNAME`, typically for a single realization only, # when no seed uncertainty is used @@ -242,9 +242,11 @@ def tornado_plot( continue # If `SENSTYPE` is scalar grab the mean for each `SENSCASE` - if dframe["SENSTYPE"].all() == "scalar": - for sens_case, dframe2 in dframe.groupby(["SENSCASE"]): - values = data.loc[data["REAL"].isin(dframe2["REAL"])]["VALUE"].mean() + if sens_name_df["SENSTYPE"].all() == "scalar": + for sens_case, sens_case_df in sens_name_df.groupby(["SENSCASE"]): + values = data.loc[data["REAL"].isin(sens_case_df["REAL"])][ + "VALUE" + ].mean() arr.append( { @@ -252,20 +254,31 @@ def tornado_plot( "senscase": sens_case, "values": values, "values_ref": scale_to_ref(values, ref_avg, scale), - "reals": list(dframe["REAL"]), + "reals": list(sens_case_df["REAL"]), } ) # If `SENSTYPE` is monte carlo get p10, p90 - elif dframe["SENSTYPE"].all() == "mc": - p90 = data.loc[data["REAL"].isin(dframe["REAL"])]["VALUE"].quantile(0.10) - p10 = data.loc[data["REAL"].isin(dframe["REAL"])]["VALUE"].quantile(0.90) + elif sens_name_df["SENSTYPE"].all() == "mc": + # Get data for relevant realizations + case_df = data.loc[data["REAL"].isin(sens_name_df["REAL"])] + + # Calculate p90(low) and p10(high) + p90 = case_df["VALUE"].quantile(0.10) + p10 = case_df["VALUE"].quantile(0.90) + + # Extract list of realizations with values less then reference avg (low) + low_reals = list(case_df.loc[case_df["VALUE"] <= ref_avg]["REAL"]) + + # Extract list of realizations with values higher then reference avg (high) + high_reals = list(case_df.loc[case_df["VALUE"] > ref_avg]["REAL"]) + arr.append( { "sensname": sens_name, "senscase": "p90", "values": p90, "values_ref": scale_to_ref(p90, ref_avg, scale), - "reals": list(dframe["REAL"]), + "reals": low_reals, } ) arr.append( @@ -274,7 +287,7 @@ def tornado_plot( "senscase": "p10", "values": p10, "values_ref": scale_to_ref(p10, ref_avg, scale), - "reals": list(dframe["REAL"]), + "reals": high_reals, } ) else: @@ -285,10 +298,9 @@ def tornado_plot( # Group by sensitivity name and calculate low / high values arr2 = [] - for sensname, dframe in pd.DataFrame(arr).groupby(["sensname"]): - low = dframe.loc[dframe["values_ref"].idxmin()] - high = dframe.loc[dframe["values_ref"].idxmax()] - + for sensname, sens_name_df in pd.DataFrame(arr).groupby(["sensname"]): + low = sens_name_df.loc[sens_name_df["values_ref"].idxmin()] + high = sens_name_df.loc[sens_name_df["values_ref"].idxmax()] arr2.append( { "low": low["values_ref"] if low["values_ref"] < 0 else 0,