diff --git a/openquake/cat/completeness/analysis.py b/openquake/cat/completeness/analysis.py
index 61705507e..77b61e0ff 100644
--- a/openquake/cat/completeness/analysis.py
+++ b/openquake/cat/completeness/analysis.py
@@ -177,7 +177,8 @@ def check_criterion(criterion, rate, previous_norm, tvars):
elif criterion == 'optimize':
tmp_rate = -1
- norm = get_norm_optimize(tcat, aval, bval, ctab, cmag, n_obs, t_per, info=False)
+ norm = get_norm_optimize(tcat, aval, bval, ctab, cmag, n_obs, t_per,
+ last_year, info=False)
elif criterion == 'optimize_a':
tmp_rate = -1
@@ -309,20 +310,10 @@ def _completeness_analysis(fname, years, mags, binw, ref_mag, ref_upp_mag,
print(f'Iteration: {iper:05d} norm: {norm:12.6e}', end="\r")
ctab = _make_ctab(prm, years, mags)
+ print(ctab)
if isinstance(ctab, str):
continue
- #tmp = []
- #for yea, j in zip(years, prm):
- # if j >= -1e-10:
- # tmp.append([yea, mags[int(j)]])
- #tmp = np.array(tmp)
-
- #if len(tmp) > 0:
- # ctab = clean_completeness(tmp)
- #else:
- # continue
-
# Check compatibility between catalogue and completeness table. This
# function finds in each magnitude interval defined in the completeness
# table the earliest year since the occurrence of a number X of
@@ -467,8 +458,42 @@ def _completeness_analysis(fname, years, mags, binw, ref_mag, ref_upp_mag,
return save
+def read_compl_params(config):
+ """
+ """
+
+ # Read parameters for completeness analysis
+ key = 'completeness'
+ ms = np.array(config[key]['mags'], dtype=float)
+ yrs = np.array(config[key]['years'])
+ bw = config.get('bin_width', 0.1)
+ r_m = config[key].get('ref_mag', 5.0)
+ r_up_m = config[key].get('ref_upp_mag', None)
+ bmin = config[key].get('bmin', 0.8)
+ bmax = config[key].get('bmax', 1.2)
+ # Options: 'largest_rate', 'match_rate', 'optimize'
+ crit = config[key].get('optimization_criterion', 'optimize')
+
+ return ms, yrs, bw, r_m, r_up_m, bmin, bmax, crit
+
-def completeness_analysis(fname_input_pattern, fname_config, folder_out_figs,
+def read_compl_data(folder_in):
+ """
+ """
+ # Reading completeness data
+ print(f'Reading completeness data from: {folder_in:s}')
+ fname_disp = os.path.join(folder_in, 'dispositions.npy')
+ perms = np.load(fname_disp)
+ mags_chk = np.load(os.path.join(folder_in, 'mags.npy'))
+ years_chk = np.load(os.path.join(folder_in, 'years.npy'))
+ compl_tables = {'perms': perms, 'mags_chk': mags_chk,
+ 'years_chk': years_chk}
+
+ return compl_tables, mags_chk, years_chk
+
+
+
+def completeness_analysis(fname_input_pattern, f_config, folder_out_figs,
folder_in, folder_out, skip=''):
"""
:param fname_input_pattern:
@@ -486,36 +511,18 @@ def completeness_analysis(fname_input_pattern, fname_config, folder_out_figs,
"""
# Loading configuration
- config = toml.load(fname_config)
+ config = toml.load(f_config)
- # Read parameters for completeness analysis
- key = 'completeness'
- mags = np.array(config[key]['mags'])
- years = np.array(config[key]['years'])
- binw = config.get('bin_width', 0.1)
- ref_mag = config[key].get('ref_mag', 5.0)
- ref_upp_mag = config[key].get('ref_upp_mag', None)
- bmin = config[key].get('bmin', 0.8)
- bmax = config[key].get('bmax', 1.2)
- # Options: 'largest_rate', 'match_rate', 'optimize'
- criterion = config[key].get('optimization_criterion', 'optimize')
- print(criterion)
+ ms, yrs, bw, r_m, r_up_m, bmin, bmax, crit = read_compl_params(config)
- # Reading completeness data
- print(f'Reading completeness data from: {folder_in:s}')
- fname_disp = os.path.join(folder_in, 'dispositions.npy')
- perms = np.load(fname_disp)
- mags_chk = np.load(os.path.join(folder_in, 'mags.npy'))
- years_chk = np.load(os.path.join(folder_in, 'years.npy'))
- compl_tables = {'perms': perms, 'mags_chk': mags_chk,
- 'years_chk': years_chk}
+ compl_tables, mags_chk, years_chk = read_compl_data(folder_in)
# Fixing sorting of years
- if np.all(np.diff(years)) >= 0:
- years = np.flipud(years)
+ if np.all(np.diff(yrs)) >= 0:
+ yrs = np.flipud(yrs)
- np.testing.assert_array_equal(mags, mags_chk)
- np.testing.assert_array_equal(years, years_chk)
+ np.testing.assert_array_almost_equal(ms, mags_chk)
+ np.testing.assert_array_almost_equal(yrs, years_chk)
# Info
if len(skip) > 0:
@@ -537,8 +544,8 @@ def completeness_analysis(fname_input_pattern, fname_config, folder_out_figs,
else:
var = {}
- res = _completeness_analysis(fname, years, mags, binw, ref_mag,
- ref_upp_mag, [bmin, bmax], criterion,
+ res = _completeness_analysis(fname, yrs, ms, bw, r_m,
+ r_up_m, [bmin, bmax], crit,
compl_tables, src_id,
folder_out_figs=folder_out_figs,
folder_out=folder_out,
@@ -563,6 +570,6 @@ def completeness_analysis(fname_input_pattern, fname_config, folder_out_figs,
# Updating configuration
config['sources'][src_id] = var
- with open(fname_config, 'w', encoding='utf-8') as fou:
+ with open(f_config, 'w', encoding='utf-8') as fou:
fou.write(toml.dumps(config))
- print(f'Updated {fname_config:s}')
+ print(f'Updated {f_config:s}')
diff --git a/openquake/cat/completeness/generate.py b/openquake/cat/completeness/generate.py
index f3b6dffd2..e98b82de2 100755
--- a/openquake/cat/completeness/generate.py
+++ b/openquake/cat/completeness/generate.py
@@ -50,21 +50,22 @@ def get_completenesses(fname_config, folder_out):
create_folder(folder_out)
config = toml.load(fname_config)
key = 'completeness'
- mags = np.array(config[key]['mags'])
+ mags = np.array(config[key]['mags'], dtype=np.float32)
years = np.array(config[key]['years'])
num_steps = config[key].get('num_steps', 0)
min_mag_compl = config[key].get('min_mag_compl', None)
apriori_conditions = config[key].get('apriori_conditions', {})
cref = config[key].get('completeness_ref', None)
step = config[key].get('step', 8)
+ mrange = config['completeness'].get('deviation', 1.0)
_get_completenesses(mags, years, folder_out, num_steps, min_mag_compl,
- apriori_conditions, cref, step)
+ apriori_conditions, cref, step, mrange)
def _get_completenesses(mags, years, folder_out=None, num_steps=0,
min_mag_compl=None, apriori_conditions={},
- completeness_ref=None, step=6):
+ completeness_ref=None, step=6, mrange=1.0):
"""
:param mags:
A list or numpy array in increasing order
@@ -194,13 +195,7 @@ def _get_completenesses(mags, years, folder_out=None, num_steps=0,
mags_ref = [c[1] for c in completeness_ref]
rem = []
for iper, prm in enumerate(perms):
- #tmp = []
- #for yea, j in zip(years, prm):
- # if j >= -1e-10:
- # tmp.append([yea, mags[int(j)]])
-#
-# tmp = np.array(tmp)
-# ctab = clean_completeness(tmp)
+
ctab = _make_ctab(prm, years, mags)
for yr, mg in ctab:
if not yr in years_ref:
@@ -208,7 +203,7 @@ def _get_completenesses(mags, years, folder_out=None, num_steps=0,
continue
index = years_ref.index(yr)
mdiff = abs(mags_ref[index] - mg)
- if mdiff > 1:
+ if mdiff > mrange:
rem.append(iper)
continue
diff --git a/openquake/cat/completeness/mfd_eval_plots.py b/openquake/cat/completeness/mfd_eval_plots.py
new file mode 100644
index 000000000..4c4a84be6
--- /dev/null
+++ b/openquake/cat/completeness/mfd_eval_plots.py
@@ -0,0 +1,397 @@
+import os
+import pandas as pd
+import numpy as np
+from glob import glob
+from openquake.baselib import sap
+from matplotlib import pyplot as plt
+from openquake.cat.completeness.analysis import read_compl_data, _make_ctab
+from openquake.hazardlib.mfd import TruncatedGRMFD
+import matplotlib.colors as mcolors
+from matplotlib.patches import Ellipse
+
+def _read_results(results_dir):
+ fils = glob(os.path.join(results_dir, 'full*'))
+ print(f'Total instances: { len(fils)}')
+ # avals, bvals, weis = [], [], []
+ dfs = []
+ for ii, fi in enumerate(fils):
+ if ii%50==0:
+ print(f'reading instance {ii}')
+ df = pd.read_csv(fi)
+ idx = fi.split('_')[-1].replace('.csv', '')
+ df['idx'] = [idx] * len(df)
+ dfs.append(df)
+
+ df_all = pd.concat(dfs, ignore_index=True)
+
+ mags, rates = [], []
+ cm_rates = []
+ for ii in range(len(df_all)):
+ row = df_all.iloc[ii]
+ mags.append([float(m) for m in row.mags[1:-1].split(', ')])
+ rate = [float(m) for m in row.rates[1:-1].split(', ')]
+ rates.append(rate)
+ cm_rates.append([sum(rate[ii:]) for ii in range(len(rate))])
+
+ df_all['mags'] = mags
+ df_all['rates'] = rates
+ df_all['cm_rates'] = cm_rates
+
+
+ return df_all
+
+def _get_best_results(df_all):
+
+ dfs = []
+ for ii, idx in enumerate(set(df_all.idx)):
+ df_sub = df_all[df_all.idx == idx]
+ df_subB = df_sub[df_sub.norm == min(df_sub.norm)]
+
+ dfs.append(df_subB)
+
+ df_best = pd.concat(dfs, ignore_index=True)
+
+ return df_best
+
+def _make_a_b_histos(df_all, df_best, figsdir):
+ fig, ax = plt.subplots(1, 2, constrained_layout=True, figsize=(10,5))
+ binsA = np.arange(min(df_all.agr), max(df_all.agr), 0.1)
+ binsB = np.arange(min(df_all.bgr), max(df_all.bgr), 0.02)
+ num_cats = len(set(df_all.idx))
+
+ color = 'tab:grey'
+ ax[0].set_xlabel('a-value')
+ ax[0].set_ylabel('Count, all', color=color)
+ ax[0].tick_params(axis='y', labelcolor=color)
+
+ ax[1].set_xlabel('b-value')
+ ax[1].set_ylabel('Count, all', color=color)
+ ax[1].tick_params(axis='y', labelcolor=color)
+
+ for ii, idx in enumerate(set(df_all.idx)):
+
+ df_sub = df_all[df_all.idx == idx]
+ if num_cats < 10:
+ alpha = 0.1
+ else:
+ alpha = 10/num_cats
+
+ ax[0].hist(df_sub.agr, bins=binsA, color='gray', alpha=alpha)
+ ax[1].hist(df_sub.bgr, bins=binsB, color='gray', alpha=alpha)
+ ax2a = ax[0].twinx()
+ ax2b = ax[1].twinx()
+ ax2a.hist(df_best.agr, bins=binsA, color='red', alpha=0.2)
+ ax2b.hist(df_best.bgr, bins=binsB, color='red', alpha=0.2)
+
+ color = 'tab:red'
+ ax2a.set_ylabel('Count, best', color=color)
+ ax2a.tick_params(axis='y', labelcolor=color)
+ ax2b.set_ylabel('Count, best', color=color)
+ ax2b.tick_params(axis='y', labelcolor=color)
+
+ figname = os.path.join(figsdir, 'a-b-histos.png')
+ fig.savefig(figname, dpi=300)
+ plt.close(fig)
+
+def plt_compl_tables(compdir, figdir, df_best):
+ ctabids = df_best.id.values
+ compl_tables, mags_chk, years_chk = read_compl_data(compdir)
+
+ yrs, mgs = [],[]
+ for cid in ctabids:
+ ctab = _make_ctab(compl_tables['perms'][int(cid)],
+ years_chk, mags_chk)
+
+ # add first
+ plt.plot(ctab[0][0], ctab[0][1], 'ko', alpha=0.03)
+ plt.plot([ctab[0][0], ctab[0][0]+10], [ctab[0][1], ctab[0][1]], 'r--', alpha=0.03)
+ yrs.append(ctab[0][0])
+ mgs.append(ctab[0][1])
+
+ for ii in range(len(ctab)-1):
+ plt.plot([ctab[ii][0], ctab[ii+1][0]], [ctab[ii+1][1], ctab[ii+1][1]], 'r', alpha=0.03)
+ plt.plot([ctab[ii][0], ctab[ii][0]], [ctab[ii][1], ctab[ii+1][1]], 'r', alpha=0.03)
+ plt.plot(ctab[ii+1][0], ctab[ii+1][1], 'ko', alpha=0.03)
+
+ yrs.append(ctab[ii+1][0])
+ mgs.append(ctab[ii+1][1])
+
+ plt.title('Completeness tables: Best results/catalogue')
+ plt.xlabel('Year')
+ plt.ylabel('Lower magnitude threshold')
+ fout1 = os.path.join(figdir, 'completeness_v1.png')
+ plt.savefig(fout1, dpi=300)
+ plt.close()
+
+ plt.hist2d(yrs, mgs)
+ plt.colorbar(label='Count')
+ fout2 = os.path.join(figdir, 'completeness_v2.png')
+ plt.savefig(fout2, dpi=300)
+ plt.close()
+
+def plt_a_b_density(df, figsdir, figname, weights=None, density=True):
+ plt.hist2d(df.agr, df.bgr, bins=(10,10), weights=weights)
+ plt.colorbar(label='Count')
+ fout = os.path.join(figsdir, figname)
+ plt.savefig(fout, dpi=300)
+ plt.close()
+
+def get_top_percent(df_all, fraction):
+ min_norm = min(df_all.norm)
+ max_norm = max(df_all.norm)
+ thresh = abs(max_norm - min_norm) * fraction
+ df_thresh = df_all[df_all.norm <= min_norm + thresh]
+
+ return df_thresh
+
+
+def plot_best_mfds(df_best, figsdir):
+ num = len(df_best)
+ for ii in range(len(df_best)):
+ row = df_best.iloc[ii]
+ mfd = TruncatedGRMFD(4, 8.5, 0.2, df_best.agr.iloc[ii], df_best.bgr.iloc[ii])
+ mgrts = mfd.get_annual_occurrence_rates()
+ mfd_m = [m[0] for m in mgrts]
+ mfd_r = [m[1] for m in mgrts]
+ mfd_cr = [sum(mfd_r[ii:]) for ii in range(len(mfd_r))]
+ if ii == 0:
+ plt.scatter(row.mags, row.rates, marker='_', color='r',
+ label='Incremental occurrence')
+ plt.scatter(row.mags, row.cm_rates, marker='.', color='b',
+ label='Cumulative occurrence')
+ plt.semilogy(mfd_m, mfd_r, color='r', linewidth=0.1,
+ zorder=0, label='Incremental MFD')
+ plt.semilogy(mfd_m, mfd_cr, color='b',
+ linewidth=0.1, zorder=0, label='Cumulative MFD')
+
+ else:
+ if num <= 10:
+ alpha1 = 0.1
+ else:
+ alpha1 = 10/num
+
+ plt.scatter(row.mags, row.rates, marker='_', color='r',
+ alpha=alpha1)
+ plt.scatter(row.mags, row.cm_rates, marker='.', color='b',
+ alpha=alpha1)
+ plt.semilogy(mfd_m, mfd_r, color='r', alpha=3*alpha1, linewidth=0.1,
+ zorder=0)
+ plt.semilogy(mfd_m, mfd_cr, color='b', alpha=5*alpha1,
+ linewidth=0.1, zorder=0)
+
+ plt.xlabel('Magnitude')
+ plt.ylabel('Annual occurrence rates')
+ plt.legend()
+ fout = os.path.join(figsdir, 'mfds_best.png')
+ plt.savefig(fout, dpi=300)
+ plt.close()
+
+# Function to calculate normalized histogram for a group
+def norm_histo(group, field='rates', bins=10):
+ counts, bin_edges = np.histogram(group[field], bins=bins, density=True)
+ # Normalize counts to ensure the area under the histogram equals 1
+ bin_widths = np.diff(bin_edges)
+ normalized_counts = counts * bin_widths
+ bin_centers = [0.5*(bin_edges[ii] + bin_edges[ii+1]) for ii in range(len(bin_edges)-1)]
+ alpha = (normalized_counts - min(normalized_counts)) / (max(normalized_counts) - min(normalized_counts))
+
+ return bin_centers, alpha
+
+def weighted_mean(values, weights):
+ return np.sum(values * weights) / np.sum(weights)
+
+def weighted_covariance(x, y, weights):
+ mean_x = weighted_mean(x, weights)
+ mean_y = weighted_mean(y, weights)
+ cov_xy = np.sum(weights * (x - mean_x) * (y - mean_y)) / np.sum(weights)
+ cov_xx = np.sum(weights * (x - mean_x)**2) / np.sum(weights)
+ cov_yy = np.sum(weights * (y - mean_y)**2) / np.sum(weights)
+ return np.array([[cov_xx, cov_xy], [cov_xy, cov_yy]])
+
+
+def plot_weighted_covariance_ellipse(df, figdir, n_std=1.0,
+ figname='a-b-covariance.png'):
+
+ # set up data
+ x = df.agr
+ y = df.bgr
+ wei = 1-df.norm
+ weights = (wei - min(wei)) / (max(wei) - min(wei))
+
+ # set up plot
+ fig, ax = plt.subplots()
+ hb = ax.hexbin(x, y, gridsize=20, cmap='Blues')
+ cb = fig.colorbar(hb, ax=ax)
+ cb.set_label('counts')
+
+ # get covariance
+ cov_matrix = weighted_covariance(x, y, weights)
+ eigenvalues, eigenvectors = np.linalg.eigh(cov_matrix)
+
+ # Sort the eigenvalues and eigenvectors
+ order = eigenvalues.argsort()[::-1]
+ eigenvalues, eigenvectors = eigenvalues[order], eigenvectors[:, order]
+
+ # Get the index of the largest eigenvalue
+ largest_eigvec = eigenvectors[:, 0]
+
+ angle = np.degrees(np.arctan2(largest_eigvec[1], largest_eigvec[0]))
+ width, height = 2 * n_std * np.sqrt(eigenvalues)
+
+ ellipse = Ellipse(xy=(weighted_mean(x, weights), weighted_mean(y, weights)),
+ width=width, height=height, angle=angle,
+ facecolor='none', edgecolor='red')
+
+ ax.add_patch(ellipse)
+
+ angle_rad = np.radians(angle) # angle computed during the ellipse plotting
+
+ center_x = weighted_mean(x, weights)
+ center_y = weighted_mean(y, weights)
+
+ a = np.sqrt(eigenvalues[0]) # Length of semi-major axis
+ b = np.sqrt(eigenvalues[1]) # Length of semi-minor axis
+
+ # For the semi-major axis (a)
+ major_x1 = center_x + a * np.cos(angle_rad)
+ major_y1 = center_y + a * np.sin(angle_rad)
+
+ major_x2 = center_x - a * np.cos(angle_rad)
+ major_y2 = center_y - a * np.sin(angle_rad)
+
+
+ ax.scatter(center_x, center_y, c='white', marker='s', edgecolors='red')
+ ax.scatter(major_x1, major_y1, c='white', marker='o', edgecolors='red')
+ ax.scatter(major_x2, major_y2, c='white', marker='o', edgecolors='red')
+
+ color = 'red'
+ ax.text(0.02, 0.98, f'a = {np.round(major_x1, 3)}, b = {np.round(major_y1, 3)}',
+ transform=ax.transAxes, verticalalignment='top', horizontalalignment='left',
+ fontsize=12, color=color)
+ ax.text(0.02, 0.94, f'a = {np.round(center_x, 3)}, b = {np.round(center_y, 3)}',
+ transform=ax.transAxes, verticalalignment='top', horizontalalignment='left',
+ fontsize=12, color=color)
+ ax.text(0.02, 0.90, f'a = {np.round(major_x2, 3)}, b = {np.round(major_y2, 3)}',
+ transform=ax.transAxes, verticalalignment='top', horizontalalignment='left',
+ fontsize=12, color=color)
+
+
+ ax.set_xlabel('a-value', fontsize=12)
+ ax.set_ylabel('b-value', fontsize=12)
+ ax.set_title(figname.replace('.png', ''))
+
+ fout = os.path.join(figdir, figname)
+ plt.savefig(fout, dpi=300)
+ plt.close()
+
+ return center_x, center_y, major_x1, major_y1, major_x2, major_y2
+
+
+
+def plot_all_mfds(df_all, df_best, figsdir, field='rates', bins=10, bw=0.2, figname=None):
+# Group the DataFrame by the 'Category' column and apply the histogram calculation function
+
+ fl_mags = [item for sublist in df_all.mags.values for item in sublist]
+ fl_rates = [item for sublist in df_all.rates.values for item in sublist]
+ fl_crates = [item for sublist in df_all.cm_rates.values for item in sublist]
+ fl_df = pd.DataFrame({'mags': fl_mags, 'rates': fl_rates, 'cm_rates': fl_crates})
+
+ grouped = fl_df.groupby('mags')
+ hist_data = grouped.apply(lambda g: norm_histo(g, field=field, bins=bins))
+ mags = hist_data._mgr.axes[0].values
+ results = hist_data.values
+
+ all_alpha = []
+ for mag, rat in zip(mags, results):
+ m = [mag] * len(rat[0])
+ nmc = rat[1]
+ all_alpha.extend(nmc)
+ alph_min = min(all_alpha)
+ alph_max = max(all_alpha)
+
+ norm = mcolors.Normalize(vmin=alph_min, vmax=alph_max)
+
+ # Choose a colormap
+ colormap = plt.cm.Purples
+
+ for mag, rat in zip(mags, results):
+ m = [mag] * len(rat[0])
+ alph = rat[1]
+ alph[alph<0.1]=0.2
+ plt.semilogy([m[0], m[0]], [min(rat[0]), max(rat[0])], c='gray', linewidth=1, zorder=1)
+ plt.scatter(m, rat[0], 250, marker='_', c=alph, cmap=colormap, norm=norm, zorder=0)
+
+ for index, row in df_best.iterrows():
+ plt.scatter(row['mags'], row[field], 2, 'k', marker='s')
+ mfd = TruncatedGRMFD(min(mags)-bw, 8.5, bw, row.agr, row.bgr)
+ mgrts = mfd.get_annual_occurrence_rates()
+ mfd_m = [m[0] for m in mgrts]
+ mfd_r = [m[1] for m in mgrts]
+ if len(df_best) <=30:
+ alpha = 0.1
+ else:
+ alpha=30/len(df_best)
+
+ if field == 'cm_rates':
+ mfd_cr = [sum(mfd_r[ii:]) for ii in range(len(mfd_r))]
+ plt.semilogy(mfd_m, mfd_cr, color='maroon', linewidth=0.2, zorder=10, alpha=alpha)
+ else:
+ plt.semilogy(mfd_m, mfd_r, color='maroon', linewidth=0.2, zorder=10, alpha=alpha)
+
+ if figname==None:
+ figname = f'mfds_all_{field}.png'
+
+ fout = os.path.join(figsdir, figname)
+
+
+ plt.xlabel('Magnitude')
+ plt.ylabel('annual occurrence rate')
+ plt.title(figname.replace('.png', ''))
+ plt.savefig(fout)
+ plt.close()
+
+
+def make_all_plots(resdir_base, compdir, figsdir_base, labels):
+ agrs, bgrs, labs = [], [], []
+ for label in labels:
+ print(f'Running for {label}')
+ resdir = os.path.join(resdir_base, label)
+ figsdir = os.path.join(figsdir_base, label)
+ print('getting all results')
+ df_all = _read_results(resdir)
+ print('getting best results')
+ df_best = _get_best_results(df_all)
+ print('making histograms')
+ _make_a_b_histos(df_all, df_best, figsdir)
+ print('plotting completeness')
+ plt_compl_tables(compdir, figsdir, df_best)
+ print('plotting 2d histo')
+ plt_a_b_density(df_best, figsdir, 'a-b-density_best.png')
+ plt_a_b_density(df_all, figsdir, 'a-b-density_all.png')
+ df_thresh = get_top_percent(df_all, 0.2)
+ plt_a_b_density(df_thresh, figsdir, 'a-b-density_20percent.png')
+ nm = df_thresh.norm
+ nm_weight = (nm- min(nm))/(max(nm)-min(nm))
+ plt_a_b_density(df_thresh, figsdir, 'a-b-density_20percent_w.png',
+ weights = nm_weight)
+ print('plotting mfds')
+ plot_best_mfds(df_best, figsdir)
+ plot_all_mfds(df_all, df_best, figsdir, field='rates', bins=60)
+ plot_all_mfds(df_all, df_best, figsdir, field='cm_rates', bins=60)
+ plot_all_mfds(df_best, df_best, figsdir, field='rates', bins=60,
+ figname='mfds_best_rates.png')
+ plot_all_mfds(df_best, df_best, figsdir, field='cm_rates', bins=60,
+ figname='mfds_best_cmrates.png')
+ plot_all_mfds(df_all, df_thresh, figsdir, field='rates', bins=60,
+ figname='mfds_thresh_rates.png')
+ plot_all_mfds(df_all, df_thresh, figsdir, field='cm_rates', bins=60,
+ figname='mfds_thresh_cmrates.png')
+ print('plotting covariance')
+ cx, cy, mx1, my1, mx2, my2 = plot_weighted_covariance_ellipse(df_best, figsdir)
+ plot_weighted_covariance_ellipse(df_thresh, figsdir, figname=f'{label}-20percent.png')
+
+ labs.extend([f'{label}-center', f'{label}-low', f'{label}-high'])
+ agrs.extend([cx, mx1, mx2])
+ bgrs.extend([cy, my1, my2])
+
+ return labs, np.round(agrs, 3), np.round(bgrs, 3)
diff --git a/openquake/cat/completeness/norms.py b/openquake/cat/completeness/norms.py
index b721c047b..9c1d18fe6 100644
--- a/openquake/cat/completeness/norms.py
+++ b/openquake/cat/completeness/norms.py
@@ -98,7 +98,8 @@ def get_completeness_matrix(tcat, ctab, mbinw, ybinw):
return oin, out, cmags, cyeas
-def get_norm_optimize(tcat, aval, bval, ctab, cmag, n_obs, t_per, info=False):
+def get_norm_optimize(tcat, aval, bval, ctab, cmag, n_obs, t_per, last_year,
+ info=False):
"""
:param aval:
GR a-value
diff --git a/openquake/mbi/cat/MFDs_sample_mag_sigma.py b/openquake/mbi/cat/MFDs_sample_mag_sigma.py
new file mode 100644
index 000000000..dbc45a9e5
--- /dev/null
+++ b/openquake/mbi/cat/MFDs_sample_mag_sigma.py
@@ -0,0 +1,48 @@
+# ------------------- The OpenQuake Model Building Toolkit --------------------
+# Copyright (C) 2022 GEM Foundation
+# _______ _______ __ __ _______ _______ ___ _
+# | || | | |_| || _ || || | | |
+# | _ || _ | ____ | || |_| ||_ _|| |_| |
+# | | | || | | ||____|| || | | | | _|
+# | |_| || |_| | | || _ | | | | |_
+# | || | | ||_|| || |_| | | | | _ |
+# |_______||____||_| |_| |_||_______| |___| |___| |_|
+#
+# This program is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Affero General Public License as published by the Free
+# Software Foundation, either version 3 of the License, or (at your option) any
+# later version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+# -----------------------------------------------------------------------------
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# coding: utf-8
+
+import toml
+import numpy as np
+from openquake.baselib import sap
+
+from openquake.mbt.tools.mfd_sample.make_mfds import make_many_mfds
+
+
+def main(fname_config):
+ """
+ Runs the workflow that samples the magnitude sigma for each event, creating
+ many catalogues, then declusters them all based on their TRT, then runs
+ completeness analysis that jointly solves for the MFD, and finally returns
+ three mfds based on the center and 1-sigma of the a-b covariance
+ """
+ make_many_mfds(fname_config)
+
+
+msg = 'Name of the .toml file with configuration parameters'
+main.fname_config = msg
+
+if __name__ == '__main__':
+ sap.run(main)
diff --git a/openquake/mbi/ccl/change_class.py b/openquake/mbi/ccl/change_class.py
new file mode 100644
index 000000000..a98d67f87
--- /dev/null
+++ b/openquake/mbi/ccl/change_class.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python
+# ------------------- The OpenQuake Model Building Toolkit --------------------
+# Copyright (C) 2022 GEM Foundation
+# _______ _______ __ __ _______ _______ ___ _
+# | || | | |_| || _ || || | | |
+# | _ || _ | ____ | || |_| ||_ _|| |_| |
+# | | | || | | ||____|| || | | | | _|
+# | |_| || |_| | | || _ | | | | |_
+# | || | | ||_|| || |_| | | | | _ |
+# |_______||____||_| |_| |_||_______| |___| |___| |_|
+#
+# This program is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Affero General Public License as published by the Free
+# Software Foundation, either version 3 of the License, or (at your option) any
+# later version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+# -----------------------------------------------------------------------------
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# coding: utf-8
+
+from openquake.mbt.tools.tr.change_class import change
+
+from openquake.baselib import sap
+
+def main(cat_pickle_filename, treg, eqlist):
+
+ change(cat_pickle_filename, treg, eqlist)
+
+
+msg = 'Pickled catalogue file'
+main.cat_pickle_filename = msg
+msg = 'hdf5 file with classifications'
+main.treg = msg
+msg = 'File of format ,'
+main.eqlist = msg
+
+if __name__ == "__main__":
+ sap.run(main)
diff --git a/openquake/mbi/ccl/classify.py b/openquake/mbi/ccl/classify.py
index 554bbde85..d7e5bf7b8 100644
--- a/openquake/mbi/ccl/classify.py
+++ b/openquake/mbi/ccl/classify.py
@@ -157,6 +157,11 @@ def classify(ini_fname, compute_distances, rf=''):
if 'upp_mag' in config[key]:
upp_mag = float(config[key]['upp_mag'])
+ # specifying surface type
+ surftype = 'ComplexFault'
+ if 'surface_type' in config[key]:
+ surftype = config[key]['surface_type']
+
#
sse = SetSubductionEarthquakes(trlab,
treg_filename,
@@ -171,7 +176,7 @@ def classify(ini_fname, compute_distances, rf=''):
upp_year,
low_mag,
upp_mag)
- sse.classify(compute_distances, remove_from)
+ sse.classify(compute_distances, remove_from, surftype)
# crustal earthquakes
elif re.search('^crustal', key) or re.search('^volcanic', key):
diff --git a/openquake/mbi/ccl/decluster_multiple_TR.py b/openquake/mbi/ccl/decluster_multiple_TR.py
index a471260a7..c357ebc65 100755
--- a/openquake/mbi/ccl/decluster_multiple_TR.py
+++ b/openquake/mbi/ccl/decluster_multiple_TR.py
@@ -67,6 +67,7 @@ def main(config_fname, *, root=None):
fname_cat = os.path.join(root, config['main']['catalogue'])
fname_reg = os.path.join(root, config['main']['tr_file'])
fname_out = os.path.join(root, config['main']['output'])
+
create_sc = config['main']['create_subcatalogues']
save_afrs = config['main']['save_aftershocks']
add_deflt = config['main']['catalogue_add_defaults']
@@ -97,7 +98,6 @@ def main(config_fname, *, root=None):
fname_reg, create_sc, 'csv', meth[2], save_afrs,
cat_lab, add_deflt)
-
MSG = 'Path to the configuration fname - typically a .toml file'
main.config_fname = MSG
MSG = 'Root folder used as a reference for paths [pwd]'
diff --git a/openquake/mbi/sub/build_complex_surface.py b/openquake/mbi/sub/build_complex_surface.py
index 0ed8c2382..5463cd29d 100755
--- a/openquake/mbi/sub/build_complex_surface.py
+++ b/openquake/mbi/sub/build_complex_surface.py
@@ -2,6 +2,7 @@
# coding: utf-8
import os
+import sys
from openquake.baselib import sap
from openquake.sub.build_complex_surface import build_complex_surface
diff --git a/openquake/mbi/sub/create_2pt5_model.py b/openquake/mbi/sub/create_2pt5_model.py
index 148701ff2..f16fbae74 100755
--- a/openquake/mbi/sub/create_2pt5_model.py
+++ b/openquake/mbi/sub/create_2pt5_model.py
@@ -32,7 +32,6 @@
from openquake.sub.create_2pt5_model import create_2pt5_model
from openquake.hazardlib.geo.geodetic import distance
-
def main(in_path, out_path, *, maximum_sampling_distance=25.):
"""
From a set of profiles it creates the top surface of the slab
diff --git a/openquake/mbi/sub/plot_geometries.py b/openquake/mbi/sub/plot_geometries.py
index cf220aba4..d3b34f3b1 100755
--- a/openquake/mbi/sub/plot_geometries.py
+++ b/openquake/mbi/sub/plot_geometries.py
@@ -13,6 +13,7 @@
try:
import pyvista as pv
+ PLOT = True
except:
PLOT = False
diff --git a/openquake/mbt/tests/tools/mfd_sample/config/compl.toml b/openquake/mbt/tests/tools/mfd_sample/config/compl.toml
new file mode 100644
index 000000000..19d35583d
--- /dev/null
+++ b/openquake/mbt/tests/tools/mfd_sample/config/compl.toml
@@ -0,0 +1,21 @@
+name = "CAM test"
+mmin = 4.0
+bin_width = 0.2
+rupture_mesh_spacing = 2.5
+kernel_maximum_distance = 120.0
+kernel_smoothing = [ [ 0.8, 20.0,], [ 0.15, 40.0,], [ 0.05, 80.0,],]
+source_prefix = "L01"
+
+[completeness]
+mags = []
+years = []
+completeness_ref = [ [ 1980.0, 4.0,], [ 1964.0, 5.0,], [ 1904.0, 7.0,],]
+deviation = 1.7
+ref_mag = 2.0
+ref_upp_mag = 10.0
+min_mag_compl = 4.0
+bmin = 0.7
+bmax = 1.3
+flexible = true
+num_steps = 2
+optimization_criterion = "optimize"
diff --git a/openquake/mbt/tests/tools/mfd_sample/config/dec.toml b/openquake/mbt/tests/tools/mfd_sample/config/dec.toml
new file mode 100644
index 000000000..ecaf037ba
--- /dev/null
+++ b/openquake/mbt/tests/tools/mfd_sample/config/dec.toml
@@ -0,0 +1,16 @@
+[main]
+
+catalogue = './data2/sub_cat.pkl'
+tr_file = './data2/classified_test.hdf5'
+
+create_subcatalogues = 'true'
+catalogue_add_defaults = 'true'
+
+[method1]
+name = 'GardnerKnopoffType1'
+params = {'time_distance_window' = 'UhrhammerWindow', 'fs_time_prop' = 0.1}
+label = 'UH'
+
+[case1]
+regions = ['crustal', 'intA']
+label = 'int_cru'
diff --git a/openquake/mbt/tests/tools/mfd_sample/config/test.toml b/openquake/mbt/tests/tools/mfd_sample/config/test.toml
new file mode 100644
index 000000000..f2ba733cc
--- /dev/null
+++ b/openquake/mbt/tests/tools/mfd_sample/config/test.toml
@@ -0,0 +1,23 @@
+[main]
+catalogue_filename = './data2/sub_cat.pkl'
+output_directory = 'test'
+
+[catalogues]
+number = 3
+sample_type = 'random'
+create_catalogues = true
+
+[decluster]
+decluster_catalogues = true
+decluster_settings = 'config/dec.toml'
+
+[completeness]
+generate_completeness = true
+completeness_settings = 'config/compl.toml'
+
+[mfds]
+create_mfds = true
+labels = ['intA','crustal']
+
+[plot]
+make_plots = true
diff --git a/openquake/mbt/tests/tools/mfd_sample/data/catalogue.csv b/openquake/mbt/tests/tools/mfd_sample/data/catalogue.csv
new file mode 100644
index 000000000..d566cd6c3
--- /dev/null
+++ b/openquake/mbt/tests/tools/mfd_sample/data/catalogue.csv
@@ -0,0 +1,6 @@
+eventID,Identifier,year,month,day,hour,minute,second,latitude,longitude,depth,Agency,magnitude,sigmaMagnitude,note
+EV0001,A,1900,1,1,0,0,0,45.1,10.1,5,AA,6.2,0.3,interface
+EV0002,A,1900,1,1,0,0,0,45.1,10.1,50,AA,6.2,0.5,below interface
+EV0003,A,1900,1,1,0,0,0,45.4,10.3,10,AA,6.2,0.1,crustal
+EV0004,A,1900,1,1,0,0,0,45.5,10.5,31,AA,6.2,0.22,sub_crustal
+EV0005,A,1900,1,1,0,0,0,45.6,10.6,20,AA,6.2,1.0,crustal
diff --git a/openquake/mbt/tests/tools/mfd_sample/data/test02.hdf5 b/openquake/mbt/tests/tools/mfd_sample/data/test02.hdf5
new file mode 100644
index 000000000..96e858071
Binary files /dev/null and b/openquake/mbt/tests/tools/mfd_sample/data/test02.hdf5 differ
diff --git a/openquake/mbt/tests/tools/mfd_sample/data2/classified_test.hdf5 b/openquake/mbt/tests/tools/mfd_sample/data2/classified_test.hdf5
new file mode 100644
index 000000000..6db4173ad
Binary files /dev/null and b/openquake/mbt/tests/tools/mfd_sample/data2/classified_test.hdf5 differ
diff --git a/openquake/mbt/tests/tools/mfd_sample/data2/sub_cat.pkl b/openquake/mbt/tests/tools/mfd_sample/data2/sub_cat.pkl
new file mode 100644
index 000000000..6c0645b85
Binary files /dev/null and b/openquake/mbt/tests/tools/mfd_sample/data2/sub_cat.pkl differ
diff --git a/openquake/mbt/tests/tools/mfd_sample/expected/v0_catalogue.csv b/openquake/mbt/tests/tools/mfd_sample/expected/v0_catalogue.csv
new file mode 100644
index 000000000..85f3ad958
--- /dev/null
+++ b/openquake/mbt/tests/tools/mfd_sample/expected/v0_catalogue.csv
@@ -0,0 +1,6 @@
+eventID,Agency,year,month,day,hour,minute,second,timeError,longitude,latitude,SemiMajor90,SemiMinor90,ErrorStrike,depth,depthError,magnitude,sigmaMagnitude,magnitudeType
+EV0001,AA,1900,1,1,0,0,0.0,0.0,10.1,45.1,45.1,45.1,45.1,5.0,5.0,6.012359563267861,0.3,0.3
+EV0002,AA,1900,1,1,0,0,0.0,0.0,10.1,45.1,45.1,45.1,45.1,50.0,50.0,5.887265938779767,0.5,0.5
+EV0003,AA,1900,1,1,0,0,0.0,0.0,10.3,45.4,45.4,45.4,45.4,10.0,10.0,6.137453187755954,0.1,0.1
+EV0004,AA,1900,1,1,0,0,0.0,0.0,10.5,45.5,45.5,45.5,45.5,31.0,31.0,6.062397013063098,0.22,0.22
+EV0005,AA,1900,1,1,0,0,0.0,0.0,10.6,45.6,45.6,45.6,45.6,20.0,20.0,5.574531877559534,1.0,1.0
diff --git a/openquake/mbt/tests/tools/mfd_sample/expected/v1_catalogue.csv b/openquake/mbt/tests/tools/mfd_sample/expected/v1_catalogue.csv
new file mode 100644
index 000000000..c361cf742
--- /dev/null
+++ b/openquake/mbt/tests/tools/mfd_sample/expected/v1_catalogue.csv
@@ -0,0 +1,6 @@
+eventID,Agency,year,month,day,hour,minute,second,timeError,longitude,latitude,SemiMajor90,SemiMinor90,ErrorStrike,depth,depthError,magnitude,sigmaMagnitude,magnitudeType
+EV0001,AA,1900,1,1,0,0,0.0,0.0,10.1,45.1,45.1,45.1,45.1,5.0,5.0,6.305972746393061,0.3,0.3
+EV0002,AA,1900,1,1,0,0,0.0,0.0,10.1,45.1,45.1,45.1,45.1,50.0,50.0,6.376621243988434,0.5,0.5
+EV0003,AA,1900,1,1,0,0,0.0,0.0,10.3,45.4,45.4,45.4,45.4,10.0,10.0,6.235324248797687,0.1,0.1
+EV0004,AA,1900,1,1,0,0,0.0,0.0,10.5,45.5,45.5,45.5,45.5,31.0,31.0,6.277713347354911,0.22,0.22
+EV0005,AA,1900,1,1,0,0,0.0,0.0,10.6,45.6,45.6,45.6,45.6,20.0,20.0,6.553242487976869,1.0,1.0
diff --git a/openquake/mbt/tests/tools/mfd_sample/expected/v_mags.csv b/openquake/mbt/tests/tools/mfd_sample/expected/v_mags.csv
new file mode 100644
index 000000000..d9e27f3f7
--- /dev/null
+++ b/openquake/mbt/tests/tools/mfd_sample/expected/v_mags.csv
@@ -0,0 +1,5 @@
+6.01,6.31
+5.89,6.38
+6.14,6.24
+6.06,6.28
+5.57,6.55
diff --git a/openquake/mbt/tests/tools/mfd_sample/expected2/mfd-results.csv b/openquake/mbt/tests/tools/mfd_sample/expected2/mfd-results.csv
new file mode 100644
index 000000000..0e7a810c0
--- /dev/null
+++ b/openquake/mbt/tests/tools/mfd_sample/expected2/mfd-results.csv
@@ -0,0 +1,7 @@
+,label,a-values,b-values
+0,intA-center,3.422,0.848
+1,intA-low,3.334,0.804
+2,intA-high,3.51,0.892
+3,crustal-center,3.35,0.878
+4,crustal-low,3.264,0.849
+5,crustal-high,3.437,0.907
diff --git a/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py b/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py
new file mode 100644
index 000000000..ce9a85305
--- /dev/null
+++ b/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py
@@ -0,0 +1,86 @@
+import os
+import toml
+import shutil
+import unittest
+import tempfile
+import numpy as np
+import pandas as pd
+
+from openquake.mbt.tools.mfd_sample.make_mfds import (_create_catalogue_versions,
+ make_many_mfds)
+
+BASE_PATH = os.path.dirname(__file__)
+
+class TestGenCats(unittest.TestCase):
+
+ def setUp(self):
+ self.catfi = os.path.join(BASE_PATH, 'data', 'catalogue.csv')
+ # Create the temporary folder
+ self.tmpd = next(tempfile._get_candidate_names())
+
+ def test_generate_cats(self):
+ """
+ Test calculation of exceedance rate for magnitude equal to bin limit
+ """
+ _create_catalogue_versions(self.catfi, self.tmpd, 2, stype='random',
+ numstd=1, rseed=122)
+
+ mags_exp_fi = os.path.join(BASE_PATH, 'expected', 'v_mags.csv')
+ mags_out_fi = os.path.join(self.tmpd, 'v_mags.csv')
+ expected = pd.read_csv(mags_exp_fi)
+ output = pd.read_csv(mags_out_fi)
+ assert expected.equals(output)
+
+ shutil.rmtree(self.tmpd)
+
+class TestWorkflow(unittest.TestCase):
+
+ def setUp(self):
+ self.tmpd = tempfile.mkdtemp()
+
+ def test_full_wkflow(self):
+
+ # reassign main configs
+ config_fi = os.path.join(BASE_PATH, 'config', 'test.toml')
+ config = toml.load(config_fi)
+ cat_fi_name = config['main']['catalogue_filename']
+ cat_fi_name_new = os.path.join(BASE_PATH, cat_fi_name)
+ outdir_name = config['main']['output_directory']
+ outdir_name_new = os.path.join(BASE_PATH, outdir_name)
+ config['main']['catalogue_filename'] = cat_fi_name_new
+ #config['main']['output_directory'] = outdir_name_new
+ config['main']['output_directory'] = self.tmpd
+ dec_toml = config['decluster']['decluster_settings']
+ dec_toml_new = os.path.join(BASE_PATH, dec_toml)
+ comp_toml = config['completeness']['completeness_settings']
+ comp_toml_new = os.path.join(BASE_PATH, comp_toml)
+ config['completeness']['completeness_settings'] = comp_toml_new
+
+ # reassign decluster configs
+ config_dec = toml.load(dec_toml_new)
+ config_dec['main']['catalogue'] = cat_fi_name_new
+ dec_tr = config_dec['main']['tr_file']
+ dec_tr_new = os.path.join(BASE_PATH, dec_tr)
+ config_dec['main']['tr_file'] = dec_tr_new
+ config_dec_fi_new = os.path.join(BASE_PATH, 'test_dec_tmp.toml')
+ config['decluster']['decluster_settings'] = config_dec_fi_new
+
+ config_fi_new = os.path.join(BASE_PATH, 'test_tmp.toml')
+ file=open(config_fi_new,"w")
+ toml.dump(config, file)
+ file.close()
+
+ decfile=open(config_dec_fi_new,"w")
+ toml.dump(config_dec, decfile)
+ decfile.close()
+
+ make_many_mfds(config_fi_new, BASE_PATH)
+
+ expected_fi = os.path.join(BASE_PATH, 'expected2', 'mfd-results.csv')
+ output_fi = os.path.join(self.tmpd, 'mfd-results.csv')
+ expected = pd.read_csv(expected_fi)
+ output = pd.read_csv(output_fi)
+ assert expected.equals(output)
+
+ shutil.rmtree(self.tmpd)
+ os.remove('tmp-config-dcl.toml')
diff --git a/openquake/mbt/tools/mfd_sample/make_mfds.py b/openquake/mbt/tools/mfd_sample/make_mfds.py
new file mode 100644
index 000000000..1c0427910
--- /dev/null
+++ b/openquake/mbt/tools/mfd_sample/make_mfds.py
@@ -0,0 +1,277 @@
+#!/usr/bin/env python
+
+import os
+import re
+import sys
+import numpy as np
+import pandas as pd
+import pickle
+import glob
+import time
+import toml
+
+from openquake.baselib import sap
+from scipy.stats import truncnorm
+from copy import deepcopy
+
+from openquake.hmtk.parsers.catalogue import CsvCatalogueParser
+from openquake.mbi.ccl.decluster_multiple_TR import main as decluster_multiple_TR
+from openquake.cat.completeness.generate import get_completenesses
+from openquake.cat.completeness.analysis import (_completeness_analysis,
+ read_compl_params,
+ read_compl_data)
+from openquake.cat.completeness.mfd_eval_plots import make_all_plots
+
+def _get_truncated_normal(mean=0, sd=1, low=0, upp=10):
+ return truncnorm(
+ (low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd)
+
+def _write_cat_instance(catalogue, format, fileout, cnum):
+ if format == 'hmtk':
+ catalogue.write_catalogue(fileout.format(cnum))
+ elif format == 'pickle':
+ with open(fileout.format(cnum), 'wb') as f:
+ pickle.dump(catalogue, f)
+
+def _create_catalogue_versions(catfi, outdir, numcats=None, stype='random',
+ numstd=1, rseed=122):
+ """
+ catfi: catalogue filename (pkl or hmtk/csv)
+ outdir: where to put the catalogues
+ numcats: number of catalogues to create, if stype is random
+ stype:
+ random - samples from truncnorm
+ even - same sample for every magnitude; -1 : 1 by 0.2
+ rseed: random seed to control the cat generation
+ """
+ # check if output folder is empty
+
+ if os.path.exists(outdir):
+ tmps = f'\nWarning: {outdir} already exists! '
+ tmps += '\n Overwriting files.'
+ print(tmps)
+ # sys.exit(1)
+ else:
+ os.makedirs(outdir)
+
+ csvout = os.path.join(outdir, 'v{}_catalogue.pkl')
+ fileout = os.path.join(outdir, 'v_mags.csv')
+ factors = np.arange(-1,1,0.1)
+
+ # read catalogue
+ if 'pkl' in catfi:
+ format = 'pickle'
+ catalogue = pd.read_pickle(catfi)
+
+ elif ('csv' in catfi) or ('hmtk' in catfi):
+ parser = CsvCatalogueParser(catfi) # From .csv to hmtk
+ catalogue = parser.read_file()
+ format = 'hmtk'
+
+ else:
+ print(sys.stderr, "Use a supported catalogue format.")
+ sys.exit(1)
+
+ data = catalogue.data
+
+ if numcats and stype == 'even':
+ print(sys.stderr, \
+ "Cannot specify number of catalogues for even sampling.")
+ sys.exit(1)
+
+ if stype == 'random':
+
+ mags = []
+
+ time_strt = time.time()
+ for ii in np.arange(0,len(data['magnitude'])):
+ m=data['magnitude'][ii]
+ sd=data['sigmaMagnitude'][ii]
+ allm = _get_truncated_normal(mean=m, sd=sd,
+ low=m-numstd*sd, upp=m+numstd*sd)
+ mags_perm = allm.rvs(size=numcats, random_state=rseed)
+ mags.append(mags_perm)
+
+ marr = np.array(mags)
+ np.savetxt(fileout, marr, delimiter=',', fmt='%.2f')
+
+ for ii, ms in enumerate(marr.T):
+ time_strt = time.time()
+ catalogue = deepcopy(catalogue)
+ catalogue.data['magnitude'] = np.array(ms)
+ _write_cat_instance(catalogue, format, csvout, ii)
+ time_end = time.time()
+
+
+ elif stype == 'even':
+ full_mags = {}
+ for jj, f in enumerate(factors):
+ mags = [f * ms + m for m, ms in zip(data['magnitude'], data['sigmaMagnitude'])]
+ catalogue = deepcopy(catalogue)
+ catalogue.data['magnitude'] = np.array(mags)
+ _write_cat_instance(catalogue, format, fileout, jj)
+ full_mags[jj] = mags
+ pd.DataFrame(full_mags).to_csv(fileout, index=None)
+
+ else:
+ print(sys.stderr, "Use a supported sampling type.")
+ sys.exit(1)
+
+
+
+def _decl_all_cats(outdir, dcl_toml_tmp, decdir):
+
+ """
+ """
+ catvs = glob.glob(os.path.join(outdir, '*pkl'))
+
+ config = toml.load(dcl_toml_tmp)
+ config['main']['save_aftershocks'] = False
+ for cat in catvs:
+ config['main']['catalogue'] = cat
+ config['main']['output'] = decdir
+ tmpfi = 'tmp-config-dcl.toml'
+ with open(tmpfi, 'w') as f:
+ f.write(toml.dumps(config))
+
+ decluster_multiple_TR(tmpfi)
+
+ labels = []
+ for key in config:
+ if re.search('^case', key):
+ labels.extend(config[key]['regions'])
+
+
+
+def _gen_comple(compl_toml, dec_outdir, compdir, tmpfi):
+ """
+ """
+ config = toml.load(compl_toml)
+
+ cref = config['completeness'].get('completeness_ref', None)
+
+ mmin_compl = config['completeness']['min_mag_compl']
+
+ if cref == None:
+ mags = np.arange(4, 8.0, 0.1)
+ years = np.arange(1900, 2020, 5.0)
+ else:
+ mrange = config['completeness'].get('deviation', 1.0)
+ mags_cref = [c[1] for c in cref]
+ mags_min = min(mags_cref) - mrange
+ mags_max = max(mags_cref) + mrange
+ mags_all = np.arange(mags_min, mags_max, 0.2)
+ mags = list(set([round(m,2) for m in mags_all if m >= mmin_compl ]))
+ mags.sort()
+ print(mags)
+
+ years = [c[0] for c in cref]
+ years.sort()
+
+ config['completeness']['mags'] = mags
+ config['completeness']['years'] = years
+
+ with open(tmpfi, 'w') as f:
+ f.write(toml.dumps(config))
+
+ get_completenesses(tmpfi, compdir)
+
+
+def _compl_analysis(decdir, compdir, compl_toml, labels, fout, fout_figs):
+ """
+ """
+ # load configs
+ config = toml.load(compl_toml)
+
+ ms, yrs, bw, r_m, r_up_m, bmin, bmax, crit = read_compl_params(config)
+ compl_tables, mags_chk, years_chk = read_compl_data(compdir)
+
+ # Fixing sorting of years
+ if np.all(np.diff(yrs)) >= 0:
+ yrs = np.flipud(yrs)
+
+ for lab in labels:
+ dec_catvs = glob.glob(os.path.join(decdir, f'*{lab}.csv'))
+ fout_lab = os.path.join(fout, lab)
+ fout_figs_lab = os.path.join(fout_figs, lab, 'mfds')
+ for ii, cat in enumerate(dec_catvs):
+ try:
+ res = _completeness_analysis(cat, yrs, ms, bw, r_m,
+ r_up_m, [bmin, bmax], crit,
+ compl_tables, f'{ii}',
+ folder_out_figs=fout_figs_lab,
+ folder_out=fout_lab,
+ rewrite=False)
+ except:
+ print(f'Impossible for catalogue {ii}')
+
+
+def make_many_mfds(configfile, basedir=None):
+ """
+ """
+
+ config = toml.load(configfile)
+
+ # read basic inputs
+ catfi = config['main']['catalogue_filename']
+ outdir = config['main']['output_directory']
+ labs = config['mfds'].get('labels', None)
+
+
+ # make subdirs based on outdir name
+ catdir = os.path.join(outdir, 'catalogues')
+ decdir = os.path.join(outdir, 'declustered')
+ compdir = os.path.join(outdir, 'completeness')
+ resdir = os.path.join(outdir, 'results')
+ figdir = os.path.join(outdir, 'figures')
+ tmpconf = os.path.join(outdir, 'tmp-config-compl.toml')
+
+
+ # if create_cats, make the sampled catalogues
+ create_cats = config['catalogues'].get('create_catalogues', True)
+ if create_cats:
+ ncats = int(config['catalogues'].get('number', 1000))
+ stype = config['catalogues'].get('sampling_type', 'random')
+ nstd = config['catalogues'].get('num_std_devs', 1)
+ rseed = config['catalogues'].get('random_seed', 122)
+ _create_catalogue_versions(catfi, catdir, numcats=ncats, stype=stype,
+ numstd=nstd, rseed=rseed)
+
+ # perform all the declustering possibilities - links TRTs following configs
+ decluster = config['decluster'].get('decluster_catalogues', True)
+ if decluster:
+ dcl_toml_tmpl = config['decluster']['decluster_settings']
+ _decl_all_cats(catdir, dcl_toml_tmpl, decdir)
+
+
+ # generate the completeness tables
+ generate = config['completeness'].get('generate_completeness', True)
+ if generate:
+ compl_toml = config['completeness']['completeness_settings']
+ _gen_comple(compl_toml, decdir, compdir, tmpconf)
+
+ # create the mfds for the given TRT labels
+ mfds = config['mfds'].get('create_mfds', True)
+ if mfds:
+ if not labs:
+ print('Must specify the TRTs')
+ sys.exit()
+
+ _compl_analysis(decdir, compdir, tmpconf, labs, resdir, figdir)
+
+ plots = config['plot'].get('make_plots', True)
+ if plots:
+ if not labs:
+ print('Must specify the TRTs')
+ sys.exit()
+ labels, agrs, bgrs = make_all_plots(resdir, compdir, figdir, labs)
+ fin = pd.DataFrame({'label': labels, 'a-values': agrs, 'b-values': bgrs})
+ if basedir:
+ fin.to_csv(os.path.join(basedir, outdir, 'mfd-results.csv'))
+ else:
+ fin.to_csv(os.path.join(outdir, 'mfd-results.csv'))
+
+make_many_mfds.configfile = 'path to configuration file'
+
+if __name__ == '__main__':
+ sap.run(make_many_mfds)
diff --git a/openquake/mbt/tools/model_building/dclustering.py b/openquake/mbt/tools/model_building/dclustering.py
index 923d34019..e530c2cd4 100755
--- a/openquake/mbt/tools/model_building/dclustering.py
+++ b/openquake/mbt/tools/model_building/dclustering.py
@@ -62,9 +62,9 @@ def dec(declustering_params, declustering_meth, cat):
def decluster(catalogue_hmtk_fname, declustering_meth, declustering_params,
- output_path, labels=None, tr_fname=None, subcatalogues=False,
- fmat='csv', olab='', save_af=False, out_fname_ext='',
- fix_defaults=False):
+ output_path, labels=None, tr_fname=None,
+ subcatalogues=False, fmat='csv', olab='', save_af=False,
+ out_fname_ext='', fix_defaults=False):
"""
:param str catalogue_hmtk_fname:
Full path to the file containing the initial catalogue
@@ -124,7 +124,7 @@ def decluster(catalogue_hmtk_fname, declustering_meth, declustering_params,
cat = _add_defaults(cat)
cato = copy.deepcopy(cat)
- # Select earthquakes belonging to a given TR. When necessary combining
+ # Select earthquakes belonging to a given TR. When necessary combining
# multiple TRs, use label ,AND...
idx = numpy.full(cat.data['magnitude'].shape, True, dtype=bool)
sumchk = 0
@@ -216,40 +216,44 @@ def decluster(catalogue_hmtk_fname, declustering_meth, declustering_params,
tmpsum1 = int(sum(idx_tmp))
tmpsum2 = int(sum(kkk))
logging.info(tmps.format(lab, tmpsum1, tmpsum2, pct))
- print(tmps.format(lab, tmpsum1, tmpsum2, pct))
- print(tmpr.format(min(ooo.data['magnitude']),
+ try:
+ print(tmps.format(lab, tmpsum1, tmpsum2, pct))
+ print(tmpr.format(min(ooo.data['magnitude']),
max(ooo.data['magnitude'])))
- #
- # Output filename
- ext = '_dec_{:s}_{:s}.{:s}'.format(olab, lab, fmat)
- tcat_fname = Path(os.path.basename(catalogue_hmtk_fname)).stem
- tcat_fname = tcat_fname + ext
- tmps = os.path.join(output_path, tcat_fname)
- tcat_fname = os.path.abspath(tmps)
- if save_af:
- ext = '_dec_af_{:s}_{:s}.{:s}'.format(olab, lab, fmat)
- tcataf_fname = Path(
- os.path.basename(catalogue_hmtk_fname)).stem + ext
- tmps = os.path.join(output_path, tcataf_fname)
- tcataf_fname = os.path.abspath(tmps)
- #
- # Dumping data into the pickle file
- if ooo is not None:
- if fmat == 'csv':
- ooo.write_catalogue(tcat_fname)
- if save_af:
- aaa.write_catalogue(tcataf_fname)
- elif fmat == 'pkl':
- fou = open(tcat_fname, 'wb')
- pickle.dump(ooo, fou)
- fou.close()
- if save_af:
- fou = open(tcataf_fname, 'wb')
- pickle.dump(aaa, fou)
+ #
+ # Output filename
+ ext = '_dec_{:s}_{:s}.{:s}'.format(olab, lab, fmat)
+ tcat_fname = Path(os.path.basename(catalogue_hmtk_fname)).stem
+ tcat_fname = tcat_fname + ext
+ tmps = os.path.join(output_path, tcat_fname)
+ tcat_fname = os.path.abspath(tmps)
+ if save_af:
+ ext = '_dec_af_{:s}_{:s}.{:s}'.format(olab, lab, fmat)
+ tcataf_fname = Path(
+ os.path.basename(catalogue_hmtk_fname)).stem + ext
+ tmps = os.path.join(output_path, tcataf_fname)
+ tcataf_fname = os.path.abspath(tmps)
+ #
+ # Dumping data into the pickle file
+ if ooo is not None:
+ if fmat == 'csv':
+ ooo.write_catalogue(tcat_fname)
+ if save_af:
+ aaa.write_catalogue(tcataf_fname)
+ elif fmat == 'pkl':
+ fou = open(tcat_fname, 'wb')
+ pickle.dump(ooo, fou)
fou.close()
- else:
- tstr = 'Catalogue for region {:s} is empty'.format(lab)
- logging.warning(tstr)
+ if save_af:
+ fou = open(tcataf_fname, 'wb')
+ pickle.dump(aaa, fou)
+ fou.close()
+ else:
+ tstr = 'Catalogue for region {:s} is empty'.format(lab)
+ logging.warning(tstr)
+ except:
+ continue
+
f.close()
outl = [out_fname]
diff --git a/openquake/mbt/tools/tr/set_subduction_earthquakes.py b/openquake/mbt/tools/tr/set_subduction_earthquakes.py
index 241534ec8..1426524f5 100644
--- a/openquake/mbt/tools/tr/set_subduction_earthquakes.py
+++ b/openquake/mbt/tools/tr/set_subduction_earthquakes.py
@@ -38,6 +38,7 @@
get_distances_from_surface)
from openquake.sub.utils import (_read_edges,
build_complex_surface_from_edges,
+ build_kite_surface_from_profiles,
plot_complex_surface)
from openquake.hmtk.seismicity.selector import CatalogueSelector
@@ -95,7 +96,7 @@ def __init__(self, label, treg_filename, distance_folder, edges_folder,
self.low_mag = float(low_mag)
self.upp_mag = float(upp_mag)
- def classify(self, compute_distances, remove_from):
+ def classify(self, compute_distances, remove_from, surftype='ComplexFault'):
"""
:param bool compute_distances:
A boolean indicating if distances between earthquakes and the
@@ -140,21 +141,32 @@ def classify(self, compute_distances, remove_from):
# Build the complex fault surface
tedges = _read_edges(edges_folder)
print(edges_folder)
- surface = build_complex_surface_from_edges(edges_folder)
- mesh = surface.mesh
+ if surftype == 'ComplexFault':
+ surface = build_complex_surface_from_edges(edges_folder)
+ # Create polygon encompassing the mesh
+ mesh = surface.mesh
+ plo = list(mesh.lons[0, :])
+ pla = list(mesh.lats[0, :])
+ #
+ plo += list(mesh.lons[:, -1])
+ pla += list(mesh.lats[:, -1])
+ #
+ plo += list(mesh.lons[-1, ::-1])
+ pla += list(mesh.lats[-1, ::-1])
+ #
+ plo += list(mesh.lons[::-1, 0])
+ pla += list(mesh.lats[::-1, 0])
+
+ elif surftype == 'KiteFault':
+ surface = build_kite_surface_from_profiles(edges_folder)
+ # Create polygon encompassing the mesh
+ mesh = surface.mesh
+ plo = surface.surface_projection[0]
+ pla = surface.surface_projection[1]
+ else:
+ msg = f'surface type {surftype} not supported'
+ raise ValueError(msg)
- # Create polygon encompassing the mesh
- plo = list(mesh.lons[0, :])
- pla = list(mesh.lats[0, :])
- #
- plo += list(mesh.lons[:, -1])
- pla += list(mesh.lats[:, -1])
- #
- plo += list(mesh.lons[-1, ::-1])
- pla += list(mesh.lats[-1, ::-1])
- #
- plo += list(mesh.lons[::-1, 0])
- pla += list(mesh.lats[::-1, 0])
# Set variables used in griddata
data = np.array([mesh.lons.flatten().T, mesh.lats.flatten().T]).T
@@ -167,10 +179,10 @@ def classify(self, compute_distances, remove_from):
grp.create_dataset('mesh', data=ddd)
# Set the bounding box of the subduction surface
- min_lo_sub = np.amin(mesh.lons)
- min_la_sub = np.amin(mesh.lats)
- max_lo_sub = np.amax(mesh.lons)
- max_la_sub = np.amax(mesh.lats)
+ min_lo_sub = np.nanmin(mesh.lons)
+ min_la_sub = np.nanmin(mesh.lats)
+ max_lo_sub = np.nanmax(mesh.lons)
+ max_la_sub = np.nanmax(mesh.lats)
# Select the earthquakes within the bounding box
idxs = sorted(list(sidx.intersection((min_lo_sub-DELTA,
@@ -263,8 +275,12 @@ def classify(self, compute_distances, remove_from):
# interpolation
# sub_depths = griddata(data, values, (points[:, 0], points[:, 1]),
# method='cubic')
- rbfi = RBFInterpolator(data[:, 0:2], values, kernel='multiquadric',
- epsilon=1)
+ val_red = values[~np.isnan(values)]
+ dat_red = data[~np.isnan(data)].reshape(-1, 2)
+# dat_red_fi = dat_red.reshape(len(val_red), 2)
+
+ rbfi = RBFInterpolator(dat_red[:, 0:2], val_red, kernel='multiquadric',
+ epsilon=1, neighbors=100)
sub_depths = rbfi(points[:, 0:2])
# Save the distances to a file
diff --git a/openquake/sub/create_2pt5_model.py b/openquake/sub/create_2pt5_model.py
index a2fc691a3..fe6aeaa5a 100644
--- a/openquake/sub/create_2pt5_model.py
+++ b/openquake/sub/create_2pt5_model.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python
import os
import re
+import sys
import glob
import numpy
@@ -54,7 +55,8 @@ def get_interpolated_profiles(sps, lengths, number_of_samples):
for key in sorted(sps.keys()):
# calculate the sampling distance
- samp = lengths[key] / number_of_samples
+ # multiplier is making the last point be closer to the original end pt
+ samp = lengths[key] / number_of_samples * 0.99
# set data for the profile
dat = sps[key]
@@ -294,7 +296,7 @@ def write_edges_csv(sps, foldername):
#
# run for all the edges i.e. number of
max_num = len(sps[list(sps.keys())[0]])
- for idx in range(0, max_num - 1):
+ for idx in range(0, max_num ):
dat = []
for key in sorted(sps):
dat.append(sps[key][idx, :])
diff --git a/openquake/sub/cross_sections.py b/openquake/sub/cross_sections.py
index 5728c2d19..a98d6fde7 100644
--- a/openquake/sub/cross_sections.py
+++ b/openquake/sub/cross_sections.py
@@ -48,6 +48,7 @@
from scipy.interpolate import LinearNDInterpolator
from openquake.hmtk.seismicity.selector import CatalogueSelector
+from openquake.hmtk.parsers.catalogue.csv_catalogue_parser import CsvCatalogueParser
from openquake.hmtk.parsers.catalogue.gcmt_ndk_parser import ParseNDKtoGCMT
@@ -178,6 +179,9 @@ def __init__(self, cross_section):
self.topo = None
self.litho = None
self.volc = None
+ self.cs = None
+ self.c_eqks = None
+ self.count = [0]*4
def set_trench_axis(self, filename):
"""
@@ -215,6 +219,58 @@ def set_catalogue(self, catalogue, bffer=75.):
newcat = selector.select_catalogue(boo)
self.ecat = newcat
+
+ def set_catalogue_classified(self, classes, classlist, bffer=75.):
+ """
+ """
+ print('setting catalogue')
+
+ types = classlist.split(',')
+ datal = []
+ for file_n in types:
+ filen = os.path.join(classes.format(file_n))
+ print(filen)
+ parser = CsvCatalogueParser(filen)
+ catalogueA = parser.read_file()
+ sel1 = CatalogueSelector(catalogueA, create_copy=True)
+ catalogue = sel1.within_magnitude_range(lower_mag=None,upper_mag=None)
+ print(len(catalogue.data['depth']))
+ _,_,_,_,qual = self.csec.get_mm()
+ if qual==1:
+ idxs = self.csec.get_eqks_within_buffer_idl(catalogue, bffer)
+ else:
+ idxs = self.csec.get_eqks_within_buffer(catalogue, bffer)
+ boo = numpy.zeros_like(catalogue.data['magnitude'], dtype=int)
+ boo[idxs] = 1
+ selector = CatalogueSelector(catalogue, create_copy=True)
+ selector = CatalogueSelector(catalogue, create_copy=True)
+ newcat = selector.select_catalogue(boo)
+ lon = newcat.data['longitude']
+ lon = ([x+360 if x<0 else x for x in lon])
+ lat = newcat.data['latitude']
+ depth = newcat.data['depth']
+ mag = newcat.data['magnitude']
+ cl_len = len(lat)
+ if str.lower(filen).find('crustal')>0:
+ cla = [1]*cl_len
+ self.count[0] = cl_len
+ if str.lower(filen).find('int')>0:
+ cla = [2]*cl_len
+ self.count[1] = cl_len
+ if str.lower(filen).find('slab')>0:
+ cla = [3]*cl_len
+ self.count[2] = cl_len
+ if str.lower(filen).find('unc')>0:
+ cla = [4]*cl_len
+ self.count[3] = cl_len
+ for g in range(len(lat)):
+ datal.append([lon[g], lat[g], depth[g], cla[g], mag[g]])
+
+ dataa = numpy.array(datal)
+ if len(cla):
+ self.c_eqks = numpy.squeeze(dataa[:, :])
+
+
def set_slab1pt0(self, filename, bffer=2.0):
"""
:parameter filename:
@@ -333,19 +389,19 @@ def set_gcmt(self, filename, gcmt_mag=0.0, bffer=75.):
minlo, maxlo, minla, maxla, qual = self.csec.get_mm()
if qual == 0:
- idxs = self.csec.get_grd_nodes_within_buffer(loc,
+ idxs, _ = self.csec.get_grd_nodes_within_buffer(loc,
lac,
bffer,
minlo, maxlo,
minla, maxla)
if qual == 1:
- idxs = self.csec.get_grd_nodes_within_buffer_idl(loc,
+ idxs, _ = self.csec.get_grd_nodes_within_buffer_idl(loc,
lac,
bffer,
minlo, maxlo,
minla, maxla)
if idxs is not None:
- cmt_cat.select_catalogue_events(idxs[0])
+ cmt_cat.select_catalogue_events(idxs)
self.gcmt = cmt_cat
def set_topo(self, filename, bffer=0.25):
diff --git a/openquake/sub/edges_set.py b/openquake/sub/edges_set.py
index ee06e715d..1ebfa336e 100644
--- a/openquake/sub/edges_set.py
+++ b/openquake/sub/edges_set.py
@@ -6,6 +6,7 @@
from openquake.hazardlib.geo import Line, Point
from openquake.hazardlib.source import ComplexFaultSource
+from openquake.hazardlib.source import KiteFaultSource
from openquake.hazardlib.tom import PoissonTOM
from openquake.hazardlib.const import TRT
from openquake.hazardlib.mfd import TruncatedGRMFD
@@ -73,3 +74,4 @@ def get_complex_fault(self, params={}, section_length=None):
p['temporal_occurrence_model'],
self.edges,
p['rake'])
+
diff --git a/openquake/sub/plotting/plot_cross_section.py b/openquake/sub/plotting/plot_cross_section.py
index 634663ee1..1f73bf416 100755
--- a/openquake/sub/plotting/plot_cross_section.py
+++ b/openquake/sub/plotting/plot_cross_section.py
@@ -1,7 +1,9 @@
#!/usr/bin/env python
+import os
import re
import sys
+import h5py
import pickle
import numpy
import matplotlib
@@ -12,7 +14,7 @@
from obspy.imaging.beachball import beach
from matplotlib.backend_bases import KeyEvent
-from matplotlib.patches import (Circle, Rectangle)
+from matplotlib.patches import (Circle, Rectangle, Ellipse)
from matplotlib.collections import PatchCollection
from openquake.sub.cross_sections import (CrossSection, CrossSectionData)
@@ -36,6 +38,11 @@
'R-SS': 'goldenrod',
'SS-R': 'yellow'}
+CLASSIFICATION = {'interface': 'blue',
+ 'crustal': 'purple',
+ 'slab': 'aquamarine',
+ 'unclassified': 'yellow'}
+
def onclick(event):
print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
@@ -52,11 +59,15 @@ def _plot_h_eqk_histogram(axes, csda, dep_max=[], dis_max=[]):
:param dep_max:
:param dis_max:
"""
- if csda.ecat is None:
+ if (csda.ecat is None) and (csda.c_eqks is None):
return
plt.axes(axes)
- newcat = csda.ecat
+
+ if csda.ecat:
+ newcat = csda.ecat
+ else:
+ newcat = csda.c_eqks
olo = csda.csec.olo
ola = csda.csec.ola
@@ -124,7 +135,7 @@ def _plot_v_eqk_histogram(axes, csda, dep_max=[], dis_max=[]):
:param dis_max:
"""
- if csda.ecat is None:
+ if (csda.ecat is None) and (csda.c_eqks is None):
return
plt.axes(axes)
@@ -394,6 +405,37 @@ def _plot_eqks(axes, csda):
axes.add_collection(p)
+def _plot_c_eqks(axes, csda):
+ """
+ :parameter axes:
+ :parameter csda:
+ """
+
+ classcat = csda.c_eqks
+
+ olo = csda.csec.olo
+ ola = csda.csec.ola
+ dsts = geodetic_distance(olo, ola,
+ classcat[:,0],
+ classcat[:,1])
+ depths = classcat[:,2]
+ classes = classcat[:,3]
+ patches = []
+ for dst, dep, cls, mag in zip(dsts,
+ classcat[:,2],
+ classcat[:,3],
+ classcat[:,4]):
+ circle = Circle((dst, dep), (mag*0.5)**1.5, ec='white')
+ patches.append(circle)
+ colors = classcat[:,3]
+ p = PatchCollection(patches, zorder=0, edgecolors='white')
+ p.set_alpha(0.6)
+ p.set_array(numpy.array(colors))
+ p.set_clim([1,4])
+ axes.add_collection(p)
+
+
+
def _print_legend(axes, depp, lnght):
x = int(lnght / 2.)
xstep = 40
@@ -418,7 +460,20 @@ def _print_legend(axes, depp, lnght):
axes.add_patch(box)
-def _print_info(axes, csec, depp):
+def _print_legend2(axes, depp, lnght):
+ x = 7
+ ystep=11
+ y=depp-49
+ patches = []
+ for key in sorted(CLASSIFICATION):
+ box = matplotlib.patches.Ellipse(xy=(x, y), width=8, height=8,
+ color=CLASSIFICATION[key], clip_on=False)
+ y += ystep
+ box.set_alpha(0.5)
+ axes.add_patch(box)
+
+
+def _print_info(axes, csec, depp, count):
"""
"""
plt.axes(axes)
@@ -430,12 +485,33 @@ def _print_info(axes, csec, depp):
axes.annotate(note, xy=(0.0, depp+30), xycoords='data',
annotation_clip=False, fontsize=8)
- note = 'Cross-Section lenght: %.1f [km]' % (csec.length[0])
+ note = 'Cross-Section length: %.1f [km]' % (csec.length[0])
plt.gca().annotate(note, xy=(0.0, depp+40), xycoords='data',
annotation_clip=False, fontsize=8)
+ ystep=11
+ xloc=17
+ note = 'Classification:'
+ plt.gca().annotate(note, xy=(4, depp-5*ystep), xycoords='data',
+ annotation_clip=False, fontsize=8)
+ note = 'Crustal: %d' % (count[0])
+ plt.gca().annotate(note, xy=(xloc, depp-4*ystep), xycoords='data',
+ annotation_clip=False, fontsize=8)
+
+ note = 'Interface: %d' % (count[1])
+ plt.gca().annotate(note, xy=(xloc, depp-3*ystep), xycoords='data',
+ annotation_clip=False, fontsize=8)
+
+ note = 'Slab: %d' % (count[2])
+ plt.gca().annotate(note, xy=(xloc, depp-2*ystep), xycoords='data',
+ annotation_clip=False, fontsize=8)
+
+ note = 'Unclassified: %d' % (count[3])
+ plt.gca().annotate(note, xy=(xloc, depp-1*ystep), xycoords='data',
+ annotation_clip=False, fontsize=8)
+
-def plot(csda, depp, lnght):
+def plot(csda, depp, lnght, plottype):
"""
"""
# Computing figure width
@@ -458,11 +534,18 @@ def plot(csda, depp, lnght):
plt.axes(ax3)
ax3.xaxis.tick_top()
- _print_info(ax3, csda.csec, depp)
+ _print_info(ax3, csda.csec, depp, csda.count)
_print_legend(ax3, depp, lnght)
+ _print_legend2(ax3, depp, lnght)
# Plotting
- _plot_eqks(ax3, csda)
+# _plot_eqks(ax3, csda)
+ if 'classification' in plottype:
+ _plot_c_eqks(plt.subplot(gs[3]), csda)
+ else:
+ _plot_eqks(plt.subplot(gs[3]), csda)
+ _plot_h_eqk_histogram(plt.subplot(gs[1]), csda, depp, lnght)
+ _plot_v_eqk_histogram(plt.subplot(gs[2]), csda, depp, lnght)
_plot_moho(ax3, csda)
_plot_litho(ax3, csda)
_plot_topo(ax3, csda)
@@ -472,8 +555,6 @@ def plot(csda, depp, lnght):
_plot_focal_mech(ax3, csda)
_plot_slab1pt0(ax3, csda)
_plot_np_intersection(ax3, csda)
- _plot_h_eqk_histogram(ax1, csda, depp, lnght)
- _plot_v_eqk_histogram(ax2, csda, depp, lnght)
# Main panel
plt.axes(ax3)
@@ -567,9 +648,22 @@ def plt_cs(olo, ola, depp, lnght, strike, ids, ini_filename):
try:
fname_trench = config['data']['trench_axis_filename']
csda.set_trench_axis(fname_trench)
- except:
+ except:
fname_trench = None
fname_eqk_cat = config['data']['catalogue_pickle_filename']
+
+ plottype = '';
+ if config.has_option('general','type'):
+ plottype = config['general']['type']
+ if 'classification' in plottype:
+ fname_class = config['data']['class_base']
+ fname_classlist = config['data']['class_list']
+ csda.set_catalogue_classified(fname_class,fname_classlist)
+ else:
+ fname_eqk_cat = config['data']['catalogue_pickle_filename']
+ cat = pickle.load(open(fname_eqk_cat, 'rb'))
+ csda.set_catalogue(cat,75.)
+
fname_slab = config['data']['slab1pt0_filename']
fname_crust = config['data']['crust1pt0_filename']
fname_gcmt = config['data']['gcmt_filename']
@@ -594,7 +688,7 @@ def plt_cs(olo, ola, depp, lnght, strike, ids, ini_filename):
csda.set_litho_moho_depth(fname_litho)
csda.set_volcano(fname_volc)
- fig = plot(csda, depp, lnght)
+ fig = plot(csda, depp, lnght, plottype)
return fig
diff --git a/openquake/sub/profiles.py b/openquake/sub/profiles.py
index 66523d259..f4a4925ca 100644
--- a/openquake/sub/profiles.py
+++ b/openquake/sub/profiles.py
@@ -9,6 +9,15 @@
from scipy import interpolate
from openquake.hazardlib.geo import Line, Point
+from openquake.sub.edges_set import DEFAULTS
+
+from openquake.hazardlib.geo import Line, Point
+from openquake.hazardlib.source import ComplexFaultSource
+from openquake.hazardlib.source import KiteFaultSource
+from openquake.hazardlib.tom import PoissonTOM
+from openquake.hazardlib.const import TRT
+from openquake.hazardlib.mfd import TruncatedGRMFD
+from openquake.hazardlib.scalerel.strasser2010 import StrasserInterface
def _from_lines_to_array(lines):
@@ -101,3 +110,27 @@ def smooth(self, method='linear'):
plt.show()
return grd
+
+
+def get_kite_fault(profiles, params={}, section_length=None):
+ """
+ :param params:
+ :param params:
+ """
+ p = DEFAULTS
+
+ # update the default parameters
+ for key in params:
+ p[key] = params[key]
+
+ # create the kite fault source instance
+ return KiteFaultSource(p['source_id'],
+ p['name'],
+ p['tectonic_region_type'],
+ p['mfd'],
+ p['rupture_mesh_spacing'],
+ p['magnitude_scaling_relationship'],
+ p['rupture_aspect_ratio'],
+ p['temporal_occurrence_model'],
+ profiles.profiles,
+ p['rake'])
diff --git a/openquake/sub/slab/rupture.py b/openquake/sub/slab/rupture.py
index 67aa1234f..34e561a36 100755
--- a/openquake/sub/slab/rupture.py
+++ b/openquake/sub/slab/rupture.py
@@ -16,10 +16,10 @@
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
-# from mayavi import mlab
+#from mayavi import mlab
from pyproj import Proj
-# from openquake.sub.plotting.tools import plot_mesh
-# from openquake.sub.plotting.tools import plot_mesh_mayavi
+##from openquake.sub.plotting.tools import plot_mesh
+#from openquake.sub.plotting.tools import plot_mesh_mayavi
from openquake.sub.misc.edge import create_from_profiles
from openquake.sub.quad.msh import create_lower_surface_mesh
@@ -41,10 +41,12 @@
from openquake.man.checks.catalogue import load_catalogue
from openquake.wkf.utils import create_folder
+#PLOTTING = True
PLOTTING = False
-def get_catalogue(cat_pickle_fname, treg_filename=None, label=''):
+def get_catalogue(cat_pickle_fname, treg_filename=None, label='',
+ sort_cat=False):
"""
:param cat_pickle_fname:
:param treg_filename:
@@ -58,9 +60,9 @@ def get_catalogue(cat_pickle_fname, treg_filename=None, label=''):
f.close()
#
# loading the catalogue
- # catalogue = pickle.load(open(cat_pickle_fname, 'rb'))
catalogue = load_catalogue(cat_pickle_fname)
- catalogue.sort_catalogue_chronologically()
+ if sort_cat == True:
+ catalogue.sort_catalogue_chronologically()
#
# if a label and a TR are provided we filter the catalogue
if treg_filename is not None:
@@ -537,6 +539,10 @@ def calculate_ruptures(ini_fname, only_plt=False, ref_fdr=None, agr=None,
# Catalogue
cat_pickle_fname = config.get('main', 'catalogue_pickle_fname')
cat_pickle_fname = os.path.abspath(os.path.join(ref_fdr, cat_pickle_fname))
+ try:
+ sort_cat = bool(config.get('main', 'sort_catalogue'))
+ except:
+ sort_cat = False
# Output
hdf5_filename = config.get('main', 'out_hdf5_fname')
@@ -586,12 +592,14 @@ def calculate_ruptures(ini_fname, only_plt=False, ref_fdr=None, agr=None,
logging.info('Creating ruptures on virtual faults')
print('Creating ruptures on virtual faults')
ohs = create_inslab_meshes(msh, dips, slab_thickness, sampling)
+ #breakpoint()
- if only_plt:
- pass
+ #if only_plt:
+ # pass
+ if False:
# TODO consider replacing wiith pyvista
- """
+
azim = 10.
elev = 20.
dist = 20.
@@ -625,7 +633,7 @@ def calculate_ruptures(ini_fname, only_plt=False, ref_fdr=None, agr=None,
mlab.show()
exit(0)
- """
+
if PLOTTING:
vsc = 0.01
@@ -671,6 +679,9 @@ def calculate_ruptures(ini_fname, only_plt=False, ref_fdr=None, agr=None,
vspa)
# mlo, mla, mde = msh3d.select_nodes_within_two_meshesa(omsh, olmsh)
mlo, mla, mde = msh3d.get_coordinates_vectors()
+ if False:
+ df = pd.DataFrame({'mlo': mlo, 'mla': mla, 'mde': mde})
+ df.to_csv('mesh_coords.csv')
# save data on hdf5 file
if os.path.exists(hdf5_filename):
@@ -687,7 +698,8 @@ def calculate_ruptures(ini_fname, only_plt=False, ref_fdr=None, agr=None,
fh5.close()
# Get catalogue
- catalogue = get_catalogue(cat_pickle_fname, treg_filename, label)
+ catalogue = get_catalogue(cat_pickle_fname, treg_filename, label,
+ sort_cat)
# smoothing
values, smooth = smoothing(mlo, mla, mde, catalogue, hspa, vspa,
diff --git a/openquake/sub/slab/rupture_utils.py b/openquake/sub/slab/rupture_utils.py
index 69da3a912..67af58c6b 100644
--- a/openquake/sub/slab/rupture_utils.py
+++ b/openquake/sub/slab/rupture_utils.py
@@ -196,5 +196,7 @@ def get_discrete_dimensions(area, sampling, aspr):
wdt = None
lng = None
elif area_error > 0.25 and lng > 1e-10 and wdt > 1e-10:
- raise ValueError('Area discrepancy: ', area, lng*wdt, lng, wdt, aspr)
+ print('Area discrepancy: ', area, lng*wdt, lng, wdt, aspr)
+
+ #raise ValueError('Area discrepancy: ', area, lng*wdt, lng, wdt, aspr)
return lng, wdt
diff --git a/openquake/sub/tests/create_2pt5_model_test.py b/openquake/sub/tests/create_2pt5_model_test.py
index b3f712b66..47f4f6484 100644
--- a/openquake/sub/tests/create_2pt5_model_test.py
+++ b/openquake/sub/tests/create_2pt5_model_test.py
@@ -16,6 +16,7 @@
CS_DATA_PATH = os.path.join(os.path.dirname(__file__), 'data/cs')
+CS_DATA_PATH2 = os.path.join(os.path.dirname(__file__), 'data/cs2')
CAM_DATA_PATH = os.path.join(os.path.dirname(__file__), 'data/cs_cam')
PAISL_DATA_PATH = os.path.join(os.path.dirname(__file__), 'data/cs_paisl')
@@ -54,7 +55,7 @@ def test_write_profiles_edges(self):
tmp = []
for fname in glob.glob(os.path.join(self.test_dir, 'edge*')):
tmp.append(fname)
- self.assertEqual(len(tmp), 7)
+ self.assertEqual(len(tmp), 8)
class ReadProfilesTest(unittest.TestCase):
@@ -124,7 +125,7 @@ def test_interpolation_simple_30km(self):
Test profile interpolation: simple case | sampling: 30 km
"""
# read data and compute distances
- sps, dmin, dmax = read_profiles_csv(CS_DATA_PATH)
+ sps, dmin, dmax = read_profiles_csv(CS_DATA_PATH2, 0, 28)
lengths, longest_key, shortest_key = get_profiles_length(sps)
maximum_sampling_distance = 30
num_sampl = np.ceil(lengths[longest_key] / maximum_sampling_distance)
diff --git a/openquake/sub/tests/data/cs2/cs_3.csv b/openquake/sub/tests/data/cs2/cs_3.csv
new file mode 100644
index 000000000..fc2bf8b28
--- /dev/null
+++ b/openquake/sub/tests/data/cs2/cs_3.csv
@@ -0,0 +1,5 @@
+10.0 45.0 1.0
+10.2 45.2 3.0
+10.3 45.3 7.0
+10.5 45.5 19.0
+10.7 45.7 40.0
diff --git a/openquake/sub/tests/data/cs2/cs_4.csv b/openquake/sub/tests/data/cs2/cs_4.csv
new file mode 100644
index 000000000..5e598bd9d
--- /dev/null
+++ b/openquake/sub/tests/data/cs2/cs_4.csv
@@ -0,0 +1,5 @@
+10.1 45.0 1.6
+10.3 45.2 2.0
+10.4 45.3 5.0
+10.6 45.5 9.0
+10.8 45.7 35.0
diff --git a/openquake/sub/tests/slab/rupture_smooth_test.py b/openquake/sub/tests/slab/rupture_smooth_test.py
index c958cdc1e..e1e4553a2 100644
--- a/openquake/sub/tests/slab/rupture_smooth_test.py
+++ b/openquake/sub/tests/slab/rupture_smooth_test.py
@@ -50,6 +50,7 @@ def setUp(self):
config['main']['profile_folder'] = self.out_path
# Spatial distribution controlled by smoothing
config['main']['uniform_fraction'] = '0.0'
+ config['main']['sort_catalogue'] = 'True'
# Save the new .ini
self.ini = os.path.join(self.out_path, 'test.ini')
diff --git a/openquake/sub/utils.py b/openquake/sub/utils.py
index bbcea0970..28a07c709 100644
--- a/openquake/sub/utils.py
+++ b/openquake/sub/utils.py
@@ -34,9 +34,10 @@
from mpl_toolkits.mplot3d import Axes3D
from openquake.hazardlib.geo import Line, Point
-from openquake.hazardlib.geo.surface import ComplexFaultSurface
+from openquake.hazardlib.geo.surface import ComplexFaultSurface, KiteSurface
from openquake.hazardlib.scalerel.wc1994 import WC1994
from openquake.hazardlib.geo.utils import plane_fit
+from openquake.sub.profiles import ProfileSet
def mecclass(plungt, plungb, plungp):
@@ -225,6 +226,34 @@ def _read_edges(foldername):
tedges.append(_read_edge_file(fle))
return tedges
+def _read_edge_file(filename):
+ """
+ :parameter str filename:
+ The name of the edge file
+ :return:
+ An instance of :class:`openquake.hazardlib.geo.line.Line`
+ """
+ points = []
+ for line in open(filename, 'r'):
+ aa = re.split('\\s+', line)
+ points.append(Point(float(aa[0]),
+ float(aa[1]),
+ float(aa[2])))
+ return Line(points)
+
+def _read_profiles(foldername):
+ """
+ :parameter foldername:
+ The folder containing the `cs_*` files
+ :return:
+ A list of :class:`openquake.hazardlib.geo.line.Line` instances
+ """
+ path = os.path.join(foldername, 'cs*.*')
+ tprofiles = []
+ for fle in sorted(glob.glob(path)):
+ tprofiles.append(_read_pro_file(fle))
+ return tprofiles
+
def _get_array(tedges):
"""
@@ -327,6 +356,23 @@ def build_complex_surface_from_edges(foldername):
return surface
+def build_kite_surface_from_profiles(foldername):
+ """
+ :parameter str foldername:
+ The folder containing the `edge_*` files
+ :return:
+ An instance of :class:`openquake.hazardlib.geo.surface`
+ """
+
+ # Read edges
+ profiles = ProfileSet.from_files(foldername)
+
+ # Build kite fault surface
+ surface = KiteSurface.from_profiles(profiles.profiles, 5, 5)
+
+ return surface
+
+
def plot_complex_surface(tedges):
"""
:parameter list tedges: