From 925756f9a28d7c56a04dc931344f657b82cdc366 Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Wed, 6 Mar 2024 15:55:18 +0100 Subject: [PATCH 01/29] adding kite fault possibility to sub tools --- .../tools/tr/set_subduction_earthquakes.py | 48 +++++++++++-------- openquake/sub/create_2pt5_model.py | 12 +++++ openquake/sub/edges_set.py | 2 + openquake/sub/profiles.py | 33 +++++++++++++ 4 files changed, 76 insertions(+), 19 deletions(-) diff --git a/openquake/mbt/tools/tr/set_subduction_earthquakes.py b/openquake/mbt/tools/tr/set_subduction_earthquakes.py index 241534ec8..6e89e87b7 100644 --- a/openquake/mbt/tools/tr/set_subduction_earthquakes.py +++ b/openquake/mbt/tools/tr/set_subduction_earthquakes.py @@ -38,6 +38,7 @@ get_distances_from_surface) from openquake.sub.utils import (_read_edges, build_complex_surface_from_edges, + build_kite_surface_from_profiles, plot_complex_surface) from openquake.hmtk.seismicity.selector import CatalogueSelector @@ -140,21 +141,24 @@ def classify(self, compute_distances, remove_from): # Build the complex fault surface tedges = _read_edges(edges_folder) print(edges_folder) - surface = build_complex_surface_from_edges(edges_folder) +# surface = build_complex_surface_from_edges(edges_folder) + surface = build_kite_surface_from_profiles(edges_folder) mesh = surface.mesh # Create polygon encompassing the mesh - plo = list(mesh.lons[0, :]) - pla = list(mesh.lats[0, :]) - # - plo += list(mesh.lons[:, -1]) - pla += list(mesh.lats[:, -1]) - # - plo += list(mesh.lons[-1, ::-1]) - pla += list(mesh.lats[-1, ::-1]) - # - plo += list(mesh.lons[::-1, 0]) - pla += list(mesh.lats[::-1, 0]) +# plo = list(mesh.lons[0, :]) +# pla = list(mesh.lats[0, :]) +# # +# plo += list(mesh.lons[:, -1]) +# pla += list(mesh.lats[:, -1]) +# # +# plo += list(mesh.lons[-1, ::-1]) +# pla += list(mesh.lats[-1, ::-1]) +# # +# plo += list(mesh.lons[::-1, 0]) +# pla += list(mesh.lats[::-1, 0]) + plo = surface.surface_projection[0] + pla = surface.surface_projection[1] # Set variables used in griddata data = np.array([mesh.lons.flatten().T, mesh.lats.flatten().T]).T @@ -167,10 +171,10 @@ def classify(self, compute_distances, remove_from): grp.create_dataset('mesh', data=ddd) # Set the bounding box of the subduction surface - min_lo_sub = np.amin(mesh.lons) - min_la_sub = np.amin(mesh.lats) - max_lo_sub = np.amax(mesh.lons) - max_la_sub = np.amax(mesh.lats) + min_lo_sub = np.nanmin(mesh.lons) + min_la_sub = np.nanmin(mesh.lats) + max_lo_sub = np.nanmax(mesh.lons) + max_la_sub = np.nanmax(mesh.lats) # Select the earthquakes within the bounding box idxs = sorted(list(sidx.intersection((min_lo_sub-DELTA, @@ -261,10 +265,16 @@ def classify(self, compute_distances, remove_from): # Compute the depth of the top of the slab at every epicenter using # interpolation - # sub_depths = griddata(data, values, (points[:, 0], points[:, 1]), + from scipy.interpolate import griddata +# breakpoint() + val_red = values[~np.isnan(values)] + dat_red = data[~np.isnan(data)] + dat_red_fi = dat_red.reshape(len(val_red), 2) + #sub_depths = griddata(dat_red_fi, val_red, (points[:, 0], points[:, 1]), # method='cubic') - rbfi = RBFInterpolator(data[:, 0:2], values, kernel='multiquadric', - epsilon=1) + #breakpoint() + rbfi = RBFInterpolator(dat_red_fi[:, 0:2], val_red, kernel='multiquadric', + epsilon=1, neighbors=50) sub_depths = rbfi(points[:, 0:2]) # Save the distances to a file diff --git a/openquake/sub/create_2pt5_model.py b/openquake/sub/create_2pt5_model.py index a2fc691a3..4bc210703 100644 --- a/openquake/sub/create_2pt5_model.py +++ b/openquake/sub/create_2pt5_model.py @@ -123,6 +123,18 @@ def get_interpolated_profiles(sps, lengths, number_of_samples): # updating index idx += 1 + # add last point if it's far + if len(spro) == number_of_samples: + dist_chk = distance(spro[-1][0], spro[-1][1], spro[-1][2], + dat[-1, 0], dat[-1, 1], dat[-1, 2]) + if abs(1-dist_chk/samp) < 0.1: + spro.append([dat[-1, 0], dat[-1, 1], dat[-1, 2]]) + dchks = [] + for ii in range(len(spro)-2): + p2p_dst = distance(spro[ii][0], spro[ii][1], spro[ii][2], + spro[ii+1][0], spro[ii+1][1], spro[ii+1][2]) + dchks.append(p2p_dst.round(0)) + assert len(set(dchks)) == 1 # Saving results if len(spro): diff --git a/openquake/sub/edges_set.py b/openquake/sub/edges_set.py index ee06e715d..1ebfa336e 100644 --- a/openquake/sub/edges_set.py +++ b/openquake/sub/edges_set.py @@ -6,6 +6,7 @@ from openquake.hazardlib.geo import Line, Point from openquake.hazardlib.source import ComplexFaultSource +from openquake.hazardlib.source import KiteFaultSource from openquake.hazardlib.tom import PoissonTOM from openquake.hazardlib.const import TRT from openquake.hazardlib.mfd import TruncatedGRMFD @@ -73,3 +74,4 @@ def get_complex_fault(self, params={}, section_length=None): p['temporal_occurrence_model'], self.edges, p['rake']) + diff --git a/openquake/sub/profiles.py b/openquake/sub/profiles.py index 66523d259..f4a4925ca 100644 --- a/openquake/sub/profiles.py +++ b/openquake/sub/profiles.py @@ -9,6 +9,15 @@ from scipy import interpolate from openquake.hazardlib.geo import Line, Point +from openquake.sub.edges_set import DEFAULTS + +from openquake.hazardlib.geo import Line, Point +from openquake.hazardlib.source import ComplexFaultSource +from openquake.hazardlib.source import KiteFaultSource +from openquake.hazardlib.tom import PoissonTOM +from openquake.hazardlib.const import TRT +from openquake.hazardlib.mfd import TruncatedGRMFD +from openquake.hazardlib.scalerel.strasser2010 import StrasserInterface def _from_lines_to_array(lines): @@ -101,3 +110,27 @@ def smooth(self, method='linear'): plt.show() return grd + + +def get_kite_fault(profiles, params={}, section_length=None): + """ + :param params: + :param params: + """ + p = DEFAULTS + + # update the default parameters + for key in params: + p[key] = params[key] + + # create the kite fault source instance + return KiteFaultSource(p['source_id'], + p['name'], + p['tectonic_region_type'], + p['mfd'], + p['rupture_mesh_spacing'], + p['magnitude_scaling_relationship'], + p['rupture_aspect_ratio'], + p['temporal_occurrence_model'], + profiles.profiles, + p['rake']) From 7d2447c4e87f03baf7dcb94fe6c1a657a777ddca Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Wed, 6 Mar 2024 16:03:52 +0100 Subject: [PATCH 02/29] writing the last edge --- openquake/sub/create_2pt5_model.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/openquake/sub/create_2pt5_model.py b/openquake/sub/create_2pt5_model.py index 4bc210703..67cbac34c 100644 --- a/openquake/sub/create_2pt5_model.py +++ b/openquake/sub/create_2pt5_model.py @@ -306,11 +306,14 @@ def write_edges_csv(sps, foldername): # # run for all the edges i.e. number of max_num = len(sps[list(sps.keys())[0]]) - for idx in range(0, max_num - 1): + for idx in range(0, max_num ): dat = [] for key in sorted(sps): dat.append(sps[key][idx, :]) + print(sps[key][idx,:]) fname = os.path.join(foldername, 'edge_%03d.csv' % (idx)) + if idx == 12: + breakpoint() numpy.savetxt(fname, numpy.array(dat)) From 28111f98392986a03054d8ac79cc18b8aab523dc Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Thu, 7 Mar 2024 09:40:14 +0100 Subject: [PATCH 03/29] edits to fix the profile and edge generation, and starts on putting the kite faults as an option into subduction tools --- openquake/mbi/sub/plot_geometries.py | 1 + openquake/sub/create_2pt5_model.py | 19 ++----- openquake/sub/tests/create_2pt5_model_test.py | 5 +- openquake/sub/utils.py | 53 ++++++++++++++++++- 4 files changed, 60 insertions(+), 18 deletions(-) diff --git a/openquake/mbi/sub/plot_geometries.py b/openquake/mbi/sub/plot_geometries.py index cf220aba4..d3b34f3b1 100755 --- a/openquake/mbi/sub/plot_geometries.py +++ b/openquake/mbi/sub/plot_geometries.py @@ -13,6 +13,7 @@ try: import pyvista as pv + PLOT = True except: PLOT = False diff --git a/openquake/sub/create_2pt5_model.py b/openquake/sub/create_2pt5_model.py index 67cbac34c..9ac7af76e 100644 --- a/openquake/sub/create_2pt5_model.py +++ b/openquake/sub/create_2pt5_model.py @@ -54,7 +54,8 @@ def get_interpolated_profiles(sps, lengths, number_of_samples): for key in sorted(sps.keys()): # calculate the sampling distance - samp = lengths[key] / number_of_samples + # multiplier is making the last point be closer to the original end pt + samp = lengths[key] / number_of_samples * 0.99 # set data for the profile dat = sps[key] @@ -123,18 +124,6 @@ def get_interpolated_profiles(sps, lengths, number_of_samples): # updating index idx += 1 - # add last point if it's far - if len(spro) == number_of_samples: - dist_chk = distance(spro[-1][0], spro[-1][1], spro[-1][2], - dat[-1, 0], dat[-1, 1], dat[-1, 2]) - if abs(1-dist_chk/samp) < 0.1: - spro.append([dat[-1, 0], dat[-1, 1], dat[-1, 2]]) - dchks = [] - for ii in range(len(spro)-2): - p2p_dst = distance(spro[ii][0], spro[ii][1], spro[ii][2], - spro[ii+1][0], spro[ii+1][1], spro[ii+1][2]) - dchks.append(p2p_dst.round(0)) - assert len(set(dchks)) == 1 # Saving results if len(spro): @@ -312,8 +301,8 @@ def write_edges_csv(sps, foldername): dat.append(sps[key][idx, :]) print(sps[key][idx,:]) fname = os.path.join(foldername, 'edge_%03d.csv' % (idx)) - if idx == 12: - breakpoint() + #if idx == 12: + #breakpoint() numpy.savetxt(fname, numpy.array(dat)) diff --git a/openquake/sub/tests/create_2pt5_model_test.py b/openquake/sub/tests/create_2pt5_model_test.py index b3f712b66..47f4f6484 100644 --- a/openquake/sub/tests/create_2pt5_model_test.py +++ b/openquake/sub/tests/create_2pt5_model_test.py @@ -16,6 +16,7 @@ CS_DATA_PATH = os.path.join(os.path.dirname(__file__), 'data/cs') +CS_DATA_PATH2 = os.path.join(os.path.dirname(__file__), 'data/cs2') CAM_DATA_PATH = os.path.join(os.path.dirname(__file__), 'data/cs_cam') PAISL_DATA_PATH = os.path.join(os.path.dirname(__file__), 'data/cs_paisl') @@ -54,7 +55,7 @@ def test_write_profiles_edges(self): tmp = [] for fname in glob.glob(os.path.join(self.test_dir, 'edge*')): tmp.append(fname) - self.assertEqual(len(tmp), 7) + self.assertEqual(len(tmp), 8) class ReadProfilesTest(unittest.TestCase): @@ -124,7 +125,7 @@ def test_interpolation_simple_30km(self): Test profile interpolation: simple case | sampling: 30 km """ # read data and compute distances - sps, dmin, dmax = read_profiles_csv(CS_DATA_PATH) + sps, dmin, dmax = read_profiles_csv(CS_DATA_PATH2, 0, 28) lengths, longest_key, shortest_key = get_profiles_length(sps) maximum_sampling_distance = 30 num_sampl = np.ceil(lengths[longest_key] / maximum_sampling_distance) diff --git a/openquake/sub/utils.py b/openquake/sub/utils.py index bbcea0970..ef3d4072e 100644 --- a/openquake/sub/utils.py +++ b/openquake/sub/utils.py @@ -34,9 +34,10 @@ from mpl_toolkits.mplot3d import Axes3D from openquake.hazardlib.geo import Line, Point -from openquake.hazardlib.geo.surface import ComplexFaultSurface +from openquake.hazardlib.geo.surface import ComplexFaultSurface, KiteSurface from openquake.hazardlib.scalerel.wc1994 import WC1994 from openquake.hazardlib.geo.utils import plane_fit +from openquake.sub.profiles import ProfileSet def mecclass(plungt, plungb, plungp): @@ -225,6 +226,34 @@ def _read_edges(foldername): tedges.append(_read_edge_file(fle)) return tedges +def _read_edge_file(filename): + """ + :parameter str filename: + The name of the edge file + :return: + An instance of :class:`openquake.hazardlib.geo.line.Line` + """ + points = [] + for line in open(filename, 'r'): + aa = re.split('\\s+', line) + points.append(Point(float(aa[0]), + float(aa[1]), + float(aa[2]))) + return Line(points) + +def _read_profiles(foldername): + """ + :parameter foldername: + The folder containing the `cs_*` files + :return: + A list of :class:`openquake.hazardlib.geo.line.Line` instances + """ + path = os.path.join(foldername, 'cs*.*') + tprofiles = [] + for fle in sorted(glob.glob(path)): + tprofiles.append(_read_pro_file(fle)) + return tprofiles + def _get_array(tedges): """ @@ -327,6 +356,28 @@ def build_complex_surface_from_edges(foldername): return surface +def build_kite_surface_from_profiles(foldername): + """ + :parameter str foldername: + The folder containing the `edge_*` files + :return: + An instance of :class:`openquake.hazardlib.geo.surface` + """ + + # Read edges + profiles = ProfileSet.from_files(foldername) + + # Kite fault source + from openquake.sub.profiles import get_kite_fault + kf_src = get_kite_fault(profiles) + surface = kf_src.surface + + # Build kite fault surface +# surface = KiteSurface.from_profiles(profiles.profiles, 5, 5) + + return surface + + def plot_complex_surface(tedges): """ :parameter list tedges: From 700ea9957449af0008dda32a7fd8a6e6f3fd0257 Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Tue, 19 Mar 2024 10:57:34 +0100 Subject: [PATCH 04/29] adding routine for making and plotting many mfds from catalogues with sampled M --- openquake/mbi/cat/MFDs_sample_mag_sigma.py | 257 +++++++++++++++++++++ 1 file changed, 257 insertions(+) create mode 100644 openquake/mbi/cat/MFDs_sample_mag_sigma.py diff --git a/openquake/mbi/cat/MFDs_sample_mag_sigma.py b/openquake/mbi/cat/MFDs_sample_mag_sigma.py new file mode 100644 index 000000000..1270e5d67 --- /dev/null +++ b/openquake/mbi/cat/MFDs_sample_mag_sigma.py @@ -0,0 +1,257 @@ +#!/usr/bin/env python + +import os +import re +import sys +import numpy as np +import pandas as pd +import pickle +import glob +import time +import toml + +from openquake.baselib import sap +from scipy.stats import truncnorm +from copy import deepcopy + +from openquake.hmtk.parsers.catalogue import CsvCatalogueParser +from openquake.mbi.ccl.decluster_multiple_TR import main as decluster_multiple_TR +from openquake.cat.completeness.generate import get_completenesses +from openquake.cat.completeness.analysis import (_completeness_analysis, + read_compl_params, + read_compl_data) + +def _get_truncated_normal(mean=0, sd=1, low=0, upp=10): + return truncnorm( + (low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd) + +def _write_cat_instance(catalogue, format, fileout, cnum): + if format == 'hmtk': + catalogue.write_catalogue(fileout.format(cnum)) + elif format == 'pickle': + with open(fileout.format(cnum), 'wb') as f: + pickle.dump(catalogue, f) + +def _create_catalogue_versions(catfi, outdir, numcats=None, stype='random'): + """ + catfi: catalogue filename (pkl or hmtk/csv) + stype: + random - samples from truncnorm + even - same sample for every magnitude; -1 : 1 by 0.2 + """ + # check if output folder is empty + + if os.path.exists(outdir): + tmps = f'\nError: {outdir} already exists! ' + tmps += '\n Choose an empty directory.' + print(tmps) + sys.exit(1) + else: + os.makedirs(outdir) + + csvout = os.path.join(outdir, 'v{}_'+catfi.split('/')[-1]) + fileout = os.path.join(outdir, 'v_mags.csv') + factors = np.arange(-1,1,0.1) + + # read catalogue + if 'pkl' in catfi: + format = 'pickle' + catalogue = pd.read_pickle(catfi) + + elif ('csv' in catfi) or ('hmtk' in catfi): + parser = CsvCatalogueParser(catfi) # From .csv to hmtk + catalogue = parser.read_file() + format = 'hmtk' + + else: + print(sys.stderr, "Use a supported catalogue format.") + sys.exit(1) + + data = catalogue.data + + if numcats and stype == 'even': + print(sys.stderr, \ + "Cannot specify number of catalogues for even sampling.") + sys.exit(1) + + if stype == 'random': + + mags = [] + + time_strt = time.time() + for ii in np.arange(0,len(data['magnitude'])): + m=data['magnitude'][ii] + sd=data['sigmaMagnitude'][ii] + allm = _get_truncated_normal(mean=m, sd=sd, low=m-sd, upp=m+sd) + mags_perm = allm.rvs(size=numcats) + mags.append(mags_perm) + + time_end = time.time() + print('Run time for generating magnitudes: ') + print(time_end - time_strt) + + marr = np.array(mags) + np.savetxt(fileout, marr, delimiter=',', fmt='%.2f') + + for ii, ms in enumerate(marr.T): + time_strt = time.time() + catalogue = deepcopy(catalogue) + catalogue.data['magnitude'] = np.array(ms) + _write_cat_instance(catalogue, format, csvout, ii) + time_end = time.time() + print('Run time for writing catalogue: ') + print(time_end - time_strt) + + + elif stype == 'even': + full_mags = {} + for jj, f in enumerate(factors): + mags = [f * ms + m for m, ms in zip(data['magnitude'], data['sigmaMagnitude'])] + catalogue = deepcopy(catalogue) + catalogue.data['magnitude'] = np.array(mags) + _write_cat_instance(catalogue, format, fileout, jj) + full_mags[jj] = mags + pd.DataFrame(full_mags).to_csv(fileout, index=None) + + else: + print(sys.stderr, "Use a supported sampling type.") + sys.exit(1) + + + +def _decl_all_cats(outdir, cat, dcl_toml_tmp): + + """ + """ + catvs = glob.glob(os.path.join(outdir, '*pkl')) + + config = toml.load(dcl_toml_tmp) + config['main']['save_aftershocks'] = False + dec_outdir = 'dec_' + outdir + for cat in catvs: + config['main']['catalogue'] = cat + config['main']['output'] = dec_outdir + tmpfi = 'tmp-config-dcl.toml' + with open(tmpfi, 'w') as f: + f.write(toml.dumps(config)) + + decluster_multiple_TR(tmpfi) + + labels = [] + for key in config: + if re.search('^case', key): + labels.extend(config[key]['regions']) + + return dec_outdir, labels + + +def _gen_comple(compl_toml, dec_outdir): + """ + """ + config = toml.load(compl_toml) + + cref = config['completeness'].get('completeness_ref', None) + + mmin_compl = config['completeness']['min_mag_compl'] + + if cref == None: + mags = np.arange(4, 8.0, 0.1) + years = np.arange(1900, 2020, 5.0) + else: + mrange = config['completeness'].get('deviation', 1.0) + mags_cref = [c[1] for c in cref] + mags_min = min(mags_cref) - mrange + mags_max = max(mags_cref) + mrange + mags_lo = np.arange(mags_min, 5.8, 0.1) + mags_hi = np.arange(5.8, mags_max, 0.2) + mags_all = mags_lo.tolist() + mags_hi.tolist() + mags = list(set([round(m,2) for m in mags_all if m >= mmin_compl ])) + mags.sort() + print(mags) + + years = [c[0] for c in cref] + years.sort() + + config['completeness']['mags'] = mags + config['completeness']['years'] = years + + tmpfi = 'tmp-config-compl.toml' + + with open(tmpfi, 'w') as f: + f.write(toml.dumps(config)) + + compdir = dec_outdir.replace('dec', 'compl') + get_completenesses(tmpfi, compdir) + + return compdir, tmpfi + + +def _compl_analysis(decdir, compdir, compl_toml, labels, fout): + """ + """ + fout_figs = fout + '_figs' + ms, yrs, bw, r_m, r_up_m, bmin, bmax, crit = read_compl_params(compl_toml) + compl_tables, mags_chk, years_chk = read_compl_data(compdir) + + # Fixing sorting of years + if np.all(np.diff(yrs)) >= 0: + yrs = np.flipud(yrs) + + for lab in labels: + dec_catvs = glob.glob(os.path.join(decdir, f'*{lab}*')) + fout = compdir.replace('dec_', 'compl_') + f'_{lab}' + fout_figs = fout + '_figs' + for ii, cat in enumerate(dec_catvs): + try: + res = _completeness_analysis(cat, yrs, ms, bw, r_m, + r_up_m, [bmin, bmax], crit, + compl_tables, f'{ii}', + folder_out_figs=fout_figs, + folder_out=fout, + rewrite=False) + except: + print(f'Impossible for catalogue {ii}') + + return res + + +def main(catfi, outdir, dcl_toml_tmpl, compl_toml, labels=None, + numcats=10, stype='random'): +# """ +# """ +# # create versions of catalogue that are sampling the sigma + + dec_outdir = 'dec_' + outdir + compdir = dec_outdir.replace('dec', 'compl') + #tmpconf = 'tmp-config-compl.toml' + + if labels: + labs = labels.split(',') + if 0: + ncats = int(numcats) + _create_catalogue_versions(catfi, outdir, numcats=ncats, + stype='random') + + # perform all the declustering possibilities - links TRTs + if 0: + dec_outdir, labels_all = _decl_all_cats(outdir, catfi, dcl_toml_tmpl) + + if 1: + if labels == None: + labs = labels_all + + compdir, tmpconf = _gen_comple(compl_toml, dec_outdir) + + res = _compl_analysis(dec_outdir, compdir, tmpconf, labs, compdir+'_res') + +main.catfi = 'Name of the original catalogue file, pkl or hmtk format' +main.outdir = 'Directory in which to save the iterations of the catalogue' +main.dcl_toml_tmpl = 'template declustering settings file' +main.compl_toml = 'completeness analysis settings file' +main.labels = 'list of trt labels, if not all' +main.numcats = 'number of catalogues to make' +main.stype = 'random or with an increment through magnitude sigma' + +if __name__ == '__main__': + freeze_support() + sap.run(main) From 190ba5a59c363373205a4fabba3be9dec6be9c66 Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Tue, 19 Mar 2024 10:58:11 +0100 Subject: [PATCH 05/29] pulling some lines into functions for more flexible use --- openquake/cat/completeness/analysis.py | 84 ++++++++++++++------------ 1 file changed, 45 insertions(+), 39 deletions(-) diff --git a/openquake/cat/completeness/analysis.py b/openquake/cat/completeness/analysis.py index 61705507e..da42c8b75 100644 --- a/openquake/cat/completeness/analysis.py +++ b/openquake/cat/completeness/analysis.py @@ -177,7 +177,8 @@ def check_criterion(criterion, rate, previous_norm, tvars): elif criterion == 'optimize': tmp_rate = -1 - norm = get_norm_optimize(tcat, aval, bval, ctab, cmag, n_obs, t_per, info=False) + norm = get_norm_optimize(tcat, aval, bval, ctab, cmag, n_obs, t_per, + last_year, info=False) elif criterion == 'optimize_a': tmp_rate = -1 @@ -309,20 +310,10 @@ def _completeness_analysis(fname, years, mags, binw, ref_mag, ref_upp_mag, print(f'Iteration: {iper:05d} norm: {norm:12.6e}', end="\r") ctab = _make_ctab(prm, years, mags) + print(ctab) if isinstance(ctab, str): continue - #tmp = [] - #for yea, j in zip(years, prm): - # if j >= -1e-10: - # tmp.append([yea, mags[int(j)]]) - #tmp = np.array(tmp) - - #if len(tmp) > 0: - # ctab = clean_completeness(tmp) - #else: - # continue - # Check compatibility between catalogue and completeness table. This # function finds in each magnitude interval defined in the completeness # table the earliest year since the occurrence of a number X of @@ -467,40 +458,30 @@ def _completeness_analysis(fname, years, mags, binw, ref_mag, ref_upp_mag, return save - -def completeness_analysis(fname_input_pattern, fname_config, folder_out_figs, - folder_in, folder_out, skip=''): +def read_compl_params(fname_config): """ - :param fname_input_pattern: - Pattern to the files with the subcatalogues - :param fname_config: - .toml configuration file - :param folder_out_figs: - Output folder for figures - :param folder_in: - Folder with the completeness windows - :param folder_out: - Folder where to store results - :param skip: - List with the IDs of the sources to skip """ - # Loading configuration config = toml.load(fname_config) # Read parameters for completeness analysis key = 'completeness' - mags = np.array(config[key]['mags']) - years = np.array(config[key]['years']) - binw = config.get('bin_width', 0.1) - ref_mag = config[key].get('ref_mag', 5.0) - ref_upp_mag = config[key].get('ref_upp_mag', None) + ms = np.array(config[key]['mags'], dtype=float) + yrs = np.array(config[key]['years']) + bw = config.get('bin_width', 0.1) + r_m = config[key].get('ref_mag', 5.0) + r_up_m = config[key].get('ref_upp_mag', None) bmin = config[key].get('bmin', 0.8) bmax = config[key].get('bmax', 1.2) # Options: 'largest_rate', 'match_rate', 'optimize' - criterion = config[key].get('optimization_criterion', 'optimize') - print(criterion) + crit = config[key].get('optimization_criterion', 'optimize') + + return ms, yrs, bw, r_m, r_up_m, bmin, bmax, crit + +def read_compl_data(folder_in): + """ + """ # Reading completeness data print(f'Reading completeness data from: {folder_in:s}') fname_disp = os.path.join(folder_in, 'dispositions.npy') @@ -510,12 +491,37 @@ def completeness_analysis(fname_input_pattern, fname_config, folder_out_figs, compl_tables = {'perms': perms, 'mags_chk': mags_chk, 'years_chk': years_chk} + return compl_tables, mags_chk, years_chk + + + +def completeness_analysis(fname_input_pattern, f_config, folder_out_figs, + folder_in, folder_out, skip=''): + """ + :param fname_input_pattern: + Pattern to the files with the subcatalogues + :param fname_config: + .toml configuration file + :param folder_out_figs: + Output folder for figures + :param folder_in: + Folder with the completeness windows + :param folder_out: + Folder where to store results + :param skip: + List with the IDs of the sources to skip + """ + + ms, yrs, bw, r_m, r_up_m, bmin, bmax, crit = read_compl_params(f_config) + + compl_tables, mags_chk, years_chk = read_compl_data(folder_in) + # Fixing sorting of years if np.all(np.diff(years)) >= 0: years = np.flipud(years) - np.testing.assert_array_equal(mags, mags_chk) - np.testing.assert_array_equal(years, years_chk) + np.testing.assert_array_equal(ms, mags_chk) + np.testing.assert_array_equal(yrs, years_chk) # Info if len(skip) > 0: @@ -537,8 +543,8 @@ def completeness_analysis(fname_input_pattern, fname_config, folder_out_figs, else: var = {} - res = _completeness_analysis(fname, years, mags, binw, ref_mag, - ref_upp_mag, [bmin, bmax], criterion, + res = _completeness_analysis(fname, yrs, ms, bw, r_m, + r_up_m, [bmin, bmax], crit, compl_tables, src_id, folder_out_figs=folder_out_figs, folder_out=folder_out, From f88a81b6d4f3a148624a1c840e909561c347369d Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Tue, 19 Mar 2024 10:59:04 +0100 Subject: [PATCH 06/29] small corrections and adding flexibility --- openquake/cat/completeness/generate.py | 2 +- openquake/cat/completeness/norms.py | 3 ++- openquake/mbi/ccl/decluster_multiple_TR.py | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/openquake/cat/completeness/generate.py b/openquake/cat/completeness/generate.py index f3b6dffd2..58a6bc772 100755 --- a/openquake/cat/completeness/generate.py +++ b/openquake/cat/completeness/generate.py @@ -50,7 +50,7 @@ def get_completenesses(fname_config, folder_out): create_folder(folder_out) config = toml.load(fname_config) key = 'completeness' - mags = np.array(config[key]['mags']) + mags = np.array(config[key]['mags'], dtype=np.float32) years = np.array(config[key]['years']) num_steps = config[key].get('num_steps', 0) min_mag_compl = config[key].get('min_mag_compl', None) diff --git a/openquake/cat/completeness/norms.py b/openquake/cat/completeness/norms.py index b721c047b..9c1d18fe6 100644 --- a/openquake/cat/completeness/norms.py +++ b/openquake/cat/completeness/norms.py @@ -98,7 +98,8 @@ def get_completeness_matrix(tcat, ctab, mbinw, ybinw): return oin, out, cmags, cyeas -def get_norm_optimize(tcat, aval, bval, ctab, cmag, n_obs, t_per, info=False): +def get_norm_optimize(tcat, aval, bval, ctab, cmag, n_obs, t_per, last_year, + info=False): """ :param aval: GR a-value diff --git a/openquake/mbi/ccl/decluster_multiple_TR.py b/openquake/mbi/ccl/decluster_multiple_TR.py index a471260a7..c357ebc65 100755 --- a/openquake/mbi/ccl/decluster_multiple_TR.py +++ b/openquake/mbi/ccl/decluster_multiple_TR.py @@ -67,6 +67,7 @@ def main(config_fname, *, root=None): fname_cat = os.path.join(root, config['main']['catalogue']) fname_reg = os.path.join(root, config['main']['tr_file']) fname_out = os.path.join(root, config['main']['output']) + create_sc = config['main']['create_subcatalogues'] save_afrs = config['main']['save_aftershocks'] add_deflt = config['main']['catalogue_add_defaults'] @@ -97,7 +98,6 @@ def main(config_fname, *, root=None): fname_reg, create_sc, 'csv', meth[2], save_afrs, cat_lab, add_deflt) - MSG = 'Path to the configuration fname - typically a .toml file' main.config_fname = MSG MSG = 'Root folder used as a reference for paths [pwd]' From 6a9b8e1bae32f2de27e4d5ed4c78fd13f51ce778 Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Tue, 19 Mar 2024 17:19:31 +0100 Subject: [PATCH 07/29] adding plots --- openquake/cat/completeness/mfd_eval_plots.py | 127 +++++++++++++++++ openquake/mbi/cat/MFDs_sample_mag_sigma.py | 140 +++++++++++-------- 2 files changed, 205 insertions(+), 62 deletions(-) create mode 100644 openquake/cat/completeness/mfd_eval_plots.py diff --git a/openquake/cat/completeness/mfd_eval_plots.py b/openquake/cat/completeness/mfd_eval_plots.py new file mode 100644 index 000000000..11774401e --- /dev/null +++ b/openquake/cat/completeness/mfd_eval_plots.py @@ -0,0 +1,127 @@ +import os +import pandas as pd +import numpy as np +from glob import glob +from openquake.baselib import sap +from matplotlib import pyplot as plt +from openquake.cat.completeness.analysis import read_compl_data, _make_ctab + +def _read_results(results_dir): + fils = glob(os.path.join(results_dir, 'full*')) + + # avals, bvals, weis = [], [], [] + for ii, fi in enumerate(fils): + df = pd.read_csv(fi) + idx = fi.split('_')[-1].replace('.csv', '') + df['idx'] = [idx] * len(df) + if ii==0: + df_all = df + else: + df_all = pd.concat([df, df_all]) + + return df_all.reset_index() + +def _get_best_results(df_all): + + for ii, idx in enumerate(set(df_all.idx)): + df_sub = df_all[df_all.idx == idx] + df_subB = df_sub[df_sub.norm == min(df_sub.norm)] + + if ii == 0: + df_best = df_subB + else: + df_best = pd.concat([df_subB, df_best]) + + return df_best.reset_index() + +def _make_a_b_histos(df_all, df_best, figsdir): + fig, ax = plt.subplots(1, 2, constrained_layout=True, figsize=(10,5)) + binsA = np.arange(min(df_all.agr), max(df_all.agr), 0.1) + binsB = np.arange(min(df_all.bgr), max(df_all.bgr), 0.02) + num_cats = len(set(df_all.idx)) + + color = 'tab:grey' + ax[0].set_xlabel('a-value') + ax[0].set_ylabel('Count, all', color=color) + ax[0].tick_params(axis='y', labelcolor=color) + + ax[1].set_xlabel('b-value') + ax[1].set_ylabel('Count, all', color=color) + ax[1].tick_params(axis='y', labelcolor=color) + + for ii, idx in enumerate(set(df_all.idx)): + df_sub = df_all[df_all.idx == idx] + ax[0].hist(df_sub.agr, bins=binsA, color='gray', alpha=10/num_cats) + ax[1].hist(df_sub.bgr, bins=binsB, color='gray', alpha=10/num_cats) + ax2a = ax[0].twinx() + ax2b = ax[1].twinx() + ax2a.hist(df_best.agr, bins=binsA, color='red', alpha=0.2) + ax2b.hist(df_best.bgr, bins=binsB, color='red', alpha=0.2) + + color = 'tab:red' + ax2a.set_ylabel('Count, best', color=color) + ax2a.tick_params(axis='y', labelcolor=color) + ax2b.set_ylabel('Count, best', color=color) + ax2b.tick_params(axis='y', labelcolor=color) + + figname = os.path.join(figsdir, 'a-b-histos.png') + fig.savefig(figname, dpi=300) + plt.close(fig) + +def plt_compl_tables(compdir, figdir, df_best): + ctabids = df_best.id.values + compl_tables, mags_chk, years_chk = read_compl_data(compdir) + + yrs, mgs = [],[] + for cid in ctabids: + ctab = _make_ctab(compl_tables['perms'][int(cid)], years_chk, mags_chk) + + for ii in range(len(ctab)-1): + plt.plot([ctab[ii][0], ctab[ii+1][0]], [ctab[ii][1], ctab[ii][1]], 'r', alpha=0.01) + plt.plot([ctab[ii+1][0], ctab[ii+1][0]], [ctab[ii][1], ctab[ii+1][1]], 'r', alpha=0.01) + plt.plot(ctab[ii+1][0], ctab[ii][1], 'ko', alpha=0.01) + + yrs.append(ctab[ii+1][0]) + mgs.append(ctab[ii][1]) + plt.title('Completeness tables: Best results/catalogue') + plt.xlabel('Year') + plt.ylabel('Lower magnitude threshold') + fout1 = os.path.join(figdir, 'completeness_v1.png') + plt.savefig(fout1, dpi=300) + plt.close() + + plt.hist2d(yrs, mgs) + plt.colorbar(label='Count') + fout2 = os.path.join(figdir, 'completeness_v2.png') + plt.savefig(fout2, dpi=300) + plt.close() + +def plt_a_b_density(df, figsdir, figname, weights=None): + plt.hist2d(df.agr, df.bgr, bins=(10,10), weights=weights) + plt.colorbar(label='Count') + fout = os.path.join(figsdir, figname) + plt.savefig(fout, dpi=300) + plt.close() + +def get_top_percent(df_all, fraction): + min_norm = min(df_all.norm) + max_norm = max(df_all.norm) + thresh = abs(max_norm - min_norm) * fraction + df_thresh = df_all[df_all.norm <= min_norm + thresh] + + return df_thresh + +def make_all_plots(resdir_base, compdir, figsdir_base, labels): + for label in labels: + resdir = os.path.join(resdir_base, label) + figsdir = os.path.join(figsdir_base, label) + df_all = _read_results(resdir) + df_best = _get_best_results(df_all) + _make_a_b_histos(df_all, df_best, figsdir) + plt_compl_tables(compdir, figsdir, df_best) + plt_a_b_density(df_best, figsdir, 'a-b-density_best.png') + plt_a_b_density(df_all, figsdir, 'a-b-density_all.png') + + df_thresh = get_top_percent(df_thresh, 0.2) + plt_a_b_density(df_all, figsdir, 'a-b-density_20percent.png') + diff --git a/openquake/mbi/cat/MFDs_sample_mag_sigma.py b/openquake/mbi/cat/MFDs_sample_mag_sigma.py index 1270e5d67..f0d7b78d3 100644 --- a/openquake/mbi/cat/MFDs_sample_mag_sigma.py +++ b/openquake/mbi/cat/MFDs_sample_mag_sigma.py @@ -20,6 +20,8 @@ from openquake.cat.completeness.analysis import (_completeness_analysis, read_compl_params, read_compl_data) +from openquake.cat.completeness.mfd_eval_plots import make_all_plots + def _get_truncated_normal(mean=0, sd=1, low=0, upp=10): return truncnorm( @@ -119,7 +121,7 @@ def _create_catalogue_versions(catfi, outdir, numcats=None, stype='random'): -def _decl_all_cats(outdir, cat, dcl_toml_tmp): +def _decl_all_cats(outdir, cat, dcl_toml_tmp, decdir): """ """ @@ -127,10 +129,9 @@ def _decl_all_cats(outdir, cat, dcl_toml_tmp): config = toml.load(dcl_toml_tmp) config['main']['save_aftershocks'] = False - dec_outdir = 'dec_' + outdir for cat in catvs: config['main']['catalogue'] = cat - config['main']['output'] = dec_outdir + config['main']['output'] = decdir tmpfi = 'tmp-config-dcl.toml' with open(tmpfi, 'w') as f: f.write(toml.dumps(config)) @@ -142,10 +143,9 @@ def _decl_all_cats(outdir, cat, dcl_toml_tmp): if re.search('^case', key): labels.extend(config[key]['regions']) - return dec_outdir, labels -def _gen_comple(compl_toml, dec_outdir): +def _gen_comple(compl_toml, dec_outdir, compdir, tmpfi): """ """ config = toml.load(compl_toml) @@ -162,34 +162,29 @@ def _gen_comple(compl_toml, dec_outdir): mags_cref = [c[1] for c in cref] mags_min = min(mags_cref) - mrange mags_max = max(mags_cref) + mrange - mags_lo = np.arange(mags_min, 5.8, 0.1) - mags_hi = np.arange(5.8, mags_max, 0.2) - mags_all = mags_lo.tolist() + mags_hi.tolist() + mags_all = np.arange(mags_min, mags_max, 0.2) + #mags_lo = np.arange(mags_min, 5.8, 0.1) + #mags_hi = np.arange(5.8, mags_max, 0.2) + #mags_all = mags_lo.tolist() + mags_hi.tolist() mags = list(set([round(m,2) for m in mags_all if m >= mmin_compl ])) mags.sort() print(mags) - + years = [c[0] for c in cref] years.sort() config['completeness']['mags'] = mags config['completeness']['years'] = years - - tmpfi = 'tmp-config-compl.toml' - + with open(tmpfi, 'w') as f: f.write(toml.dumps(config)) - - compdir = dec_outdir.replace('dec', 'compl') + get_completenesses(tmpfi, compdir) - - return compdir, tmpfi -def _compl_analysis(decdir, compdir, compl_toml, labels, fout): +def _compl_analysis(decdir, compdir, compl_toml, labels, fout, fout_figs): """ """ - fout_figs = fout + '_figs' ms, yrs, bw, r_m, r_up_m, bmin, bmax, crit = read_compl_params(compl_toml) compl_tables, mags_chk, years_chk = read_compl_data(compdir) @@ -199,59 +194,80 @@ def _compl_analysis(decdir, compdir, compl_toml, labels, fout): for lab in labels: dec_catvs = glob.glob(os.path.join(decdir, f'*{lab}*')) - fout = compdir.replace('dec_', 'compl_') + f'_{lab}' - fout_figs = fout + '_figs' + fout_lab = os.path.join(fout, lab) + fout_figs_lab = os.path.join(fout_figs, lab, 'mfds') for ii, cat in enumerate(dec_catvs): try: res = _completeness_analysis(cat, yrs, ms, bw, r_m, r_up_m, [bmin, bmax], crit, compl_tables, f'{ii}', - folder_out_figs=fout_figs, - folder_out=fout, + folder_out_figs=fout_figs_lab, + folder_out=fout_lab, rewrite=False) - except: + except: print(f'Impossible for catalogue {ii}') - return res +def main(configfile): + """ + """ + + config = toml.load(configfile) -def main(catfi, outdir, dcl_toml_tmpl, compl_toml, labels=None, - numcats=10, stype='random'): -# """ -# """ -# # create versions of catalogue that are sampling the sigma - - dec_outdir = 'dec_' + outdir - compdir = dec_outdir.replace('dec', 'compl') - #tmpconf = 'tmp-config-compl.toml' - - if labels: - labs = labels.split(',') - if 0: - ncats = int(numcats) - _create_catalogue_versions(catfi, outdir, numcats=ncats, - stype='random') - - # perform all the declustering possibilities - links TRTs - if 0: - dec_outdir, labels_all = _decl_all_cats(outdir, catfi, dcl_toml_tmpl) - - if 1: - if labels == None: - labs = labels_all - - compdir, tmpconf = _gen_comple(compl_toml, dec_outdir) - - res = _compl_analysis(dec_outdir, compdir, tmpconf, labs, compdir+'_res') - -main.catfi = 'Name of the original catalogue file, pkl or hmtk format' -main.outdir = 'Directory in which to save the iterations of the catalogue' -main.dcl_toml_tmpl = 'template declustering settings file' -main.compl_toml = 'completeness analysis settings file' -main.labels = 'list of trt labels, if not all' -main.numcats = 'number of catalogues to make' -main.stype = 'random or with an increment through magnitude sigma' - -if __name__ == '__main__': + # read basic inputs + catfi = config['main']['catalogue_filename'] + outdir = config['main']['output_directory'] + labs = config['mfds'].get('labels', None) + + + # make subdirs based on outdir name + catdir = os.path.join(outdir, 'catalogues') + decdir = os.path.join(outdir, 'declustered') + compdir = os.path.join(outdir, 'completeness') + resdir = os.path.join(outdir, 'results') + figdir = os.path.join(outdir, 'figures') + tmpconf = os.path.join(outdir, 'tmp-config-compl.toml') + + + # if create_cats, make the sampled catalogues + create_cats = config['catalogues'].get('create_catalogues', True) + if create_cats: + ncats = int(config['catalogues'].get('number', 1000)) + stype = config['catalogues'].get('sampling_type', 'random') + _create_catalogue_versions(catfi, catdir, numcats=ncats, stype=stype) + + # perform all the declustering possibilities - links TRTs following configs + decluster = config['decluster'].get('decluster_catalogues', True) + if decluster: + dcl_toml_tmpl = config['decluster']['decluster_settings'] + _decl_all_cats(catdir, catfi, dcl_toml_tmpl, decdir) + + + # generate the completeness tables + generate = config['completeness'].get('generate_completeness', True) + if generate: + compl_toml = config['completeness']['completeness_settings'] + _gen_comple(compl_toml, decdir, compdir, tmpconf) + + # create the mfds for the given TRT labels + mfds = config['mfds'].get('create_mfds', True) + if mfds: + if not labs: + print('Must specify the TRTs') + sys.exit() + + _compl_analysis(decdir, compdir, tmpconf, labs, resdir, figdir) + + plots = config['plot'].get('make_plots', True) + if plots: + if not labs: + print('Must specify the TRTs') + sys.exit() + make_all_plots(resdir, compdir, figdir, labs) + + +main.configfile = 'path to configuration file' + +if __name__ == '__main__': freeze_support() sap.run(main) From 413fa6ff47b6cfcdffaca5863ed4db7d717a6bee Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Tue, 19 Mar 2024 17:47:06 +0100 Subject: [PATCH 08/29] fixing bugs that broke tests --- openquake/cat/completeness/analysis.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/openquake/cat/completeness/analysis.py b/openquake/cat/completeness/analysis.py index da42c8b75..77b61e0ff 100644 --- a/openquake/cat/completeness/analysis.py +++ b/openquake/cat/completeness/analysis.py @@ -458,11 +458,9 @@ def _completeness_analysis(fname, years, mags, binw, ref_mag, ref_upp_mag, return save -def read_compl_params(fname_config): +def read_compl_params(config): """ """ - # Loading configuration - config = toml.load(fname_config) # Read parameters for completeness analysis key = 'completeness' @@ -512,16 +510,19 @@ def completeness_analysis(fname_input_pattern, f_config, folder_out_figs, List with the IDs of the sources to skip """ - ms, yrs, bw, r_m, r_up_m, bmin, bmax, crit = read_compl_params(f_config) + # Loading configuration + config = toml.load(f_config) + + ms, yrs, bw, r_m, r_up_m, bmin, bmax, crit = read_compl_params(config) compl_tables, mags_chk, years_chk = read_compl_data(folder_in) # Fixing sorting of years - if np.all(np.diff(years)) >= 0: - years = np.flipud(years) + if np.all(np.diff(yrs)) >= 0: + yrs = np.flipud(yrs) - np.testing.assert_array_equal(ms, mags_chk) - np.testing.assert_array_equal(yrs, years_chk) + np.testing.assert_array_almost_equal(ms, mags_chk) + np.testing.assert_array_almost_equal(yrs, years_chk) # Info if len(skip) > 0: @@ -569,6 +570,6 @@ def completeness_analysis(fname_input_pattern, f_config, folder_out_figs, # Updating configuration config['sources'][src_id] = var - with open(fname_config, 'w', encoding='utf-8') as fou: + with open(f_config, 'w', encoding='utf-8') as fou: fou.write(toml.dumps(config)) - print(f'Updated {fname_config:s}') + print(f'Updated {f_config:s}') From 173405b102612671f60c5bcc5ffdb77dea1eae69 Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Thu, 28 Mar 2024 13:20:15 +0100 Subject: [PATCH 09/29] adding more edits needed for classification and mfd stuff --- openquake/cat/completeness/generate.py | 15 +- openquake/cat/completeness/mfd_eval_plots.py | 291 ++++++++++++++++-- openquake/mbi/cat/MFDs_sample_mag_sigma.py | 10 +- openquake/mbi/ccl/change_class.py | 45 +++ openquake/mbi/sub/build_complex_surface.py | 1 + openquake/mbi/sub/create_2pt5_model.py | 8 +- .../mbt/tools/model_building/dclustering.py | 78 ++--- .../tools/tr/set_subduction_earthquakes.py | 38 ++- openquake/sub/create_2pt5_model.py | 9 +- openquake/sub/cross_sections.py | 62 +++- openquake/sub/plotting/plot_cross_section.py | 120 +++++++- openquake/sub/utils.py | 8 +- 12 files changed, 564 insertions(+), 121 deletions(-) create mode 100644 openquake/mbi/ccl/change_class.py diff --git a/openquake/cat/completeness/generate.py b/openquake/cat/completeness/generate.py index 58a6bc772..e98b82de2 100755 --- a/openquake/cat/completeness/generate.py +++ b/openquake/cat/completeness/generate.py @@ -57,14 +57,15 @@ def get_completenesses(fname_config, folder_out): apriori_conditions = config[key].get('apriori_conditions', {}) cref = config[key].get('completeness_ref', None) step = config[key].get('step', 8) + mrange = config['completeness'].get('deviation', 1.0) _get_completenesses(mags, years, folder_out, num_steps, min_mag_compl, - apriori_conditions, cref, step) + apriori_conditions, cref, step, mrange) def _get_completenesses(mags, years, folder_out=None, num_steps=0, min_mag_compl=None, apriori_conditions={}, - completeness_ref=None, step=6): + completeness_ref=None, step=6, mrange=1.0): """ :param mags: A list or numpy array in increasing order @@ -194,13 +195,7 @@ def _get_completenesses(mags, years, folder_out=None, num_steps=0, mags_ref = [c[1] for c in completeness_ref] rem = [] for iper, prm in enumerate(perms): - #tmp = [] - #for yea, j in zip(years, prm): - # if j >= -1e-10: - # tmp.append([yea, mags[int(j)]]) -# -# tmp = np.array(tmp) -# ctab = clean_completeness(tmp) + ctab = _make_ctab(prm, years, mags) for yr, mg in ctab: if not yr in years_ref: @@ -208,7 +203,7 @@ def _get_completenesses(mags, years, folder_out=None, num_steps=0, continue index = years_ref.index(yr) mdiff = abs(mags_ref[index] - mg) - if mdiff > 1: + if mdiff > mrange: rem.append(iper) continue diff --git a/openquake/cat/completeness/mfd_eval_plots.py b/openquake/cat/completeness/mfd_eval_plots.py index 11774401e..6681d4440 100644 --- a/openquake/cat/completeness/mfd_eval_plots.py +++ b/openquake/cat/completeness/mfd_eval_plots.py @@ -5,34 +5,53 @@ from openquake.baselib import sap from matplotlib import pyplot as plt from openquake.cat.completeness.analysis import read_compl_data, _make_ctab +from openquake.hazardlib.mfd import TruncatedGRMFD +import matplotlib.colors as mcolors +from matplotlib.patches import Ellipse def _read_results(results_dir): fils = glob(os.path.join(results_dir, 'full*')) - + print(f'Total instances: { len(fils)}') # avals, bvals, weis = [], [], [] + dfs = [] for ii, fi in enumerate(fils): + if ii%50==0: + print(f'reading instance {ii}') df = pd.read_csv(fi) idx = fi.split('_')[-1].replace('.csv', '') df['idx'] = [idx] * len(df) - if ii==0: - df_all = df - else: - df_all = pd.concat([df, df_all]) + dfs.append(df) + + df_all = pd.concat(dfs, ignore_index=True) + + mags, rates = [], [] + cm_rates = [] + for ii in range(len(df_all)): + row = df_all.iloc[ii] + mags.append([float(m) for m in row.mags[1:-1].split(', ')]) + rate = [float(m) for m in row.rates[1:-1].split(', ')] + rates.append(rate) + cm_rates.append([sum(rate[ii:]) for ii in range(len(rate))]) + + df_all['mags'] = mags + df_all['rates'] = rates + df_all['cm_rates'] = cm_rates - return df_all.reset_index() + + return df_all def _get_best_results(df_all): + dfs = [] for ii, idx in enumerate(set(df_all.idx)): df_sub = df_all[df_all.idx == idx] df_subB = df_sub[df_sub.norm == min(df_sub.norm)] + + dfs.append(df_subB) - if ii == 0: - df_best = df_subB - else: - df_best = pd.concat([df_subB, df_best]) + df_best = pd.concat(dfs, ignore_index=True) - return df_best.reset_index() + return df_best def _make_a_b_histos(df_all, df_best, figsdir): fig, ax = plt.subplots(1, 2, constrained_layout=True, figsize=(10,5)) @@ -74,15 +93,23 @@ def plt_compl_tables(compdir, figdir, df_best): yrs, mgs = [],[] for cid in ctabids: - ctab = _make_ctab(compl_tables['perms'][int(cid)], years_chk, mags_chk) + ctab = _make_ctab(compl_tables['perms'][int(cid)], + years_chk, mags_chk) + # add first + plt.plot(ctab[0][0], ctab[0][1], 'ko', alpha=0.03) + plt.plot([ctab[0][0], ctab[0][0]+10], [ctab[0][1], ctab[0][1]], 'r--', alpha=0.03) + yrs.append(ctab[0][0]) + mgs.append(ctab[0][1]) + for ii in range(len(ctab)-1): - plt.plot([ctab[ii][0], ctab[ii+1][0]], [ctab[ii][1], ctab[ii][1]], 'r', alpha=0.01) - plt.plot([ctab[ii+1][0], ctab[ii+1][0]], [ctab[ii][1], ctab[ii+1][1]], 'r', alpha=0.01) - plt.plot(ctab[ii+1][0], ctab[ii][1], 'ko', alpha=0.01) - + plt.plot([ctab[ii][0], ctab[ii+1][0]], [ctab[ii+1][1], ctab[ii+1][1]], 'r', alpha=0.03) + plt.plot([ctab[ii][0], ctab[ii][0]], [ctab[ii][1], ctab[ii+1][1]], 'r', alpha=0.03) + plt.plot(ctab[ii+1][0], ctab[ii+1][1], 'ko', alpha=0.03) + yrs.append(ctab[ii+1][0]) - mgs.append(ctab[ii][1]) + mgs.append(ctab[ii+1][1]) + plt.title('Completeness tables: Best results/catalogue') plt.xlabel('Year') plt.ylabel('Lower magnitude threshold') @@ -96,7 +123,7 @@ def plt_compl_tables(compdir, figdir, df_best): plt.savefig(fout2, dpi=300) plt.close() -def plt_a_b_density(df, figsdir, figname, weights=None): +def plt_a_b_density(df, figsdir, figname, weights=None, density=True): plt.hist2d(df.agr, df.bgr, bins=(10,10), weights=weights) plt.colorbar(label='Count') fout = os.path.join(figsdir, figname) @@ -108,20 +135,238 @@ def get_top_percent(df_all, fraction): max_norm = max(df_all.norm) thresh = abs(max_norm - min_norm) * fraction df_thresh = df_all[df_all.norm <= min_norm + thresh] - + return df_thresh + +def plot_best_mfds(df_best, figsdir): + num = len(df_best) + for ii in range(len(df_best)): + row = df_best.iloc[ii] + mfd = TruncatedGRMFD(4, 8.5, 0.2, df_best.agr.iloc[ii], df_best.bgr.iloc[ii]) + mgrts = mfd.get_annual_occurrence_rates() + mfd_m = [m[0] for m in mgrts] + mfd_r = [m[1] for m in mgrts] + mfd_cr = [sum(mfd_r[ii:]) for ii in range(len(mfd_r))] + if ii == 0: + plt.scatter(row.mags, row.rates, marker='_', color='r', + label='Incremental occurrence') + plt.scatter(row.mags, row.cm_rates, marker='.', color='b', + label='Cumulative occurrence') + plt.semilogy(mfd_m, mfd_r, color='r', linewidth=0.1, + zorder=0, label='Incremental MFD') + plt.semilogy(mfd_m, mfd_cr, color='b', + linewidth=0.1, zorder=0, label='Cumulative MFD') + + else: + plt.scatter(row.mags, row.rates, marker='_', color='r', + alpha=10/num) + plt.scatter(row.mags, row.cm_rates, marker='.', color='b', + alpha=10/num) + plt.semilogy(mfd_m, mfd_r, color='r', alpha=30/num, linewidth=0.1, + zorder=0) + plt.semilogy(mfd_m, mfd_cr, color='b', alpha=50/num, + linewidth=0.1, zorder=0) + + plt.xlabel('Magnitude') + plt.ylabel('Annual occurrence rates') + plt.legend() + fout = os.path.join(figsdir, 'mfds_best.png') + plt.savefig(fout, dpi=300) + plt.close() + +# Function to calculate normalized histogram for a group +def norm_histo(group, field='rates', bins=10): + counts, bin_edges = np.histogram(group[field], bins=bins, density=True) + # Normalize counts to ensure the area under the histogram equals 1 + bin_widths = np.diff(bin_edges) + normalized_counts = counts * bin_widths + bin_centers = [0.5*(bin_edges[ii] + bin_edges[ii+1]) for ii in range(len(bin_edges)-1)] + alpha = (normalized_counts - min(normalized_counts)) / (max(normalized_counts) - min(normalized_counts)) + + return bin_centers, alpha + +def weighted_mean(values, weights): + return np.sum(values * weights) / np.sum(weights) + +def weighted_covariance(x, y, weights): + mean_x = weighted_mean(x, weights) + mean_y = weighted_mean(y, weights) + cov_xy = np.sum(weights * (x - mean_x) * (y - mean_y)) / np.sum(weights) + cov_xx = np.sum(weights * (x - mean_x)**2) / np.sum(weights) + cov_yy = np.sum(weights * (y - mean_y)**2) / np.sum(weights) + return np.array([[cov_xx, cov_xy], [cov_xy, cov_yy]]) + + +def plot_weighted_covariance_ellipse(df, figdir, n_std=1.0, + figname='a-b-covariance.png'): + + # set up data + x = df.agr + y = df.bgr + wei = 1-df.norm + weights = (wei - min(wei)) / (max(wei) - min(wei)) + + # set up plot + fig, ax = plt.subplots() + hb = ax.hexbin(x, y, gridsize=20, cmap='Blues') + cb = fig.colorbar(hb, ax=ax) + cb.set_label('counts') + + # get covariance + cov_matrix = weighted_covariance(x, y, weights) + eigenvalues, eigenvectors = np.linalg.eigh(cov_matrix) + + # Sort the eigenvalues and eigenvectors + order = eigenvalues.argsort()[::-1] + eigenvalues, eigenvectors = eigenvalues[order], eigenvectors[:, order] + + # Get the index of the largest eigenvalue + largest_eigvec = eigenvectors[:, 0] + + angle = np.degrees(np.arctan2(largest_eigvec[1], largest_eigvec[0])) + width, height = 2 * n_std * np.sqrt(eigenvalues) + + ellipse = Ellipse(xy=(weighted_mean(x, weights), weighted_mean(y, weights)), + width=width, height=height, angle=angle, + facecolor='none', edgecolor='red') + + ax.add_patch(ellipse) + + angle_rad = np.radians(angle) # angle computed during the ellipse plotting + + center_x = weighted_mean(x, weights) + center_y = weighted_mean(y, weights) + + a = np.sqrt(eigenvalues[0]) # Length of semi-major axis + b = np.sqrt(eigenvalues[1]) # Length of semi-minor axis + + # For the semi-major axis (a) + major_x1 = center_x + a * np.cos(angle_rad) + major_y1 = center_y + a * np.sin(angle_rad) + + major_x2 = center_x - a * np.cos(angle_rad) + major_y2 = center_y - a * np.sin(angle_rad) + + + ax.scatter(center_x, center_y, c='white', marker='s', edgecolors='red') + ax.scatter(major_x1, major_y1, c='white', marker='o', edgecolors='red') + ax.scatter(major_x2, major_y2, c='white', marker='o', edgecolors='red') + + color = 'red' + ax.text(0.02, 0.98, f'a = {np.round(major_x1, 3)}, b = {np.round(major_y1, 3)}', + transform=ax.transAxes, verticalalignment='top', horizontalalignment='left', + fontsize=12, color=color) + ax.text(0.02, 0.94, f'a = {np.round(center_x, 3)}, b = {np.round(center_y, 3)}', + transform=ax.transAxes, verticalalignment='top', horizontalalignment='left', + fontsize=12, color=color) + ax.text(0.02, 0.90, f'a = {np.round(major_x2, 3)}, b = {np.round(major_y2, 3)}', + transform=ax.transAxes, verticalalignment='top', horizontalalignment='left', + fontsize=12, color=color) + + + ax.set_xlabel('a-value', fontsize=12) + ax.set_ylabel('b-value', fontsize=12) + ax.set_title(figname.replace('.png', '')) + + fout = os.path.join(figdir, figname) + plt.savefig(fout, dpi=300) + plt.close() + + + +def plot_all_mfds(df_all, df_best, figsdir, field='rates', bins=10, bw=0.2, figname=None): +# Group the DataFrame by the 'Category' column and apply the histogram calculation function + + fl_mags = [item for sublist in df_all.mags.values for item in sublist] + fl_rates = [item for sublist in df_all.rates.values for item in sublist] + fl_crates = [item for sublist in df_all.cm_rates.values for item in sublist] + fl_df = pd.DataFrame({'mags': fl_mags, 'rates': fl_rates, 'cm_rates': fl_crates}) + + grouped = fl_df.groupby('mags') + hist_data = grouped.apply(lambda g: norm_histo(g, field=field, bins=bins)) + mags = hist_data._mgr.axes[0].values + results = hist_data.values + + all_alpha = [] + for mag, rat in zip(mags, results): + m = [mag] * len(rat[0]) + nmc = rat[1] + all_alpha.extend(nmc) + alph_min = min(all_alpha) + alph_max = max(all_alpha) + + norm = mcolors.Normalize(vmin=alph_min, vmax=alph_max) + + # Choose a colormap + colormap = plt.cm.Purples + + for mag, rat in zip(mags, results): + m = [mag] * len(rat[0]) + alph = rat[1] + alph[alph<0.1]=0.2 + plt.semilogy([m[0], m[0]], [min(rat[0]), max(rat[0])], c='gray', linewidth=1, zorder=1) + plt.scatter(m, rat[0], 250, marker='_', c=alph, cmap=colormap, norm=norm, zorder=0) + + for index, row in df_best.iterrows(): + plt.scatter(row['mags'], row[field], 2, 'k', marker='s') + mfd = TruncatedGRMFD(min(mags)-bw, 8.5, bw, row.agr, row.bgr) + mgrts = mfd.get_annual_occurrence_rates() + mfd_m = [m[0] for m in mgrts] + mfd_r = [m[1] for m in mgrts] + if field == 'cm_rates': + mfd_cr = [sum(mfd_r[ii:]) for ii in range(len(mfd_r))] + plt.semilogy(mfd_m, mfd_cr, color='maroon', linewidth=0.2, zorder=10, alpha=30/len(df_best)) + else: + plt.semilogy(mfd_m, mfd_r, color='maroon', linewidth=0.2, zorder=10, alpha=30/len(df_best)) + + if figname==None: + figname = f'mfds_all_{field}.png' + + fout = os.path.join(figsdir, figname) + + + plt.xlabel('Magnitude') + plt.ylabel('annual occurrence rate') + plt.title(figname.replace('.png', '')) + plt.savefig(fout) + plt.close() + + def make_all_plots(resdir_base, compdir, figsdir_base, labels): for label in labels: + print(f'Running for {label}') resdir = os.path.join(resdir_base, label) figsdir = os.path.join(figsdir_base, label) + print('getting all results') df_all = _read_results(resdir) + print('getting best results') df_best = _get_best_results(df_all) + print('making histograms') _make_a_b_histos(df_all, df_best, figsdir) + print('plotting completeness') plt_compl_tables(compdir, figsdir, df_best) + print('plotting 2d histo') plt_a_b_density(df_best, figsdir, 'a-b-density_best.png') plt_a_b_density(df_all, figsdir, 'a-b-density_all.png') - - df_thresh = get_top_percent(df_thresh, 0.2) - plt_a_b_density(df_all, figsdir, 'a-b-density_20percent.png') - + df_thresh = get_top_percent(df_all, 0.2) + plt_a_b_density(df_thresh, figsdir, 'a-b-density_20percent.png') + nm = df_thresh.norm + nm_weight = (nm- min(nm))/(max(nm)-min(nm)) + plt_a_b_density(df_thresh, figsdir, 'a-b-density_20percent_w.png', + weights = nm_weight) + print('plotting mfds') + plot_best_mfds(df_best, figsdir) + plot_all_mfds(df_all, df_best, figsdir, field='rates', bins=60) + plot_all_mfds(df_all, df_best, figsdir, field='cm_rates', bins=60) + plot_all_mfds(df_best, df_best, figsdir, field='rates', bins=60, + figname='mfds_best_rates.png') + plot_all_mfds(df_best, df_best, figsdir, field='cm_rates', bins=60, + figname='mfds_best_cmrates.png') + plot_all_mfds(df_all, df_thresh, figsdir, field='rates', bins=60, + figname='mfds_thresh_rates.png') + plot_all_mfds(df_all, df_thresh, figsdir, field='cm_rates', bins=60, + figname='mfds_thresh_cmrates.png') + print('plotting covariance') + plot_weighted_covariance_ellipse(df_best, figsdir) + plot_weighted_covariance_ellipse(df_thresh, figsdir, figname=f'{label}-20percent.png') diff --git a/openquake/mbi/cat/MFDs_sample_mag_sigma.py b/openquake/mbi/cat/MFDs_sample_mag_sigma.py index f0d7b78d3..845856148 100644 --- a/openquake/mbi/cat/MFDs_sample_mag_sigma.py +++ b/openquake/mbi/cat/MFDs_sample_mag_sigma.py @@ -163,9 +163,6 @@ def _gen_comple(compl_toml, dec_outdir, compdir, tmpfi): mags_min = min(mags_cref) - mrange mags_max = max(mags_cref) + mrange mags_all = np.arange(mags_min, mags_max, 0.2) - #mags_lo = np.arange(mags_min, 5.8, 0.1) - #mags_hi = np.arange(5.8, mags_max, 0.2) - #mags_all = mags_lo.tolist() + mags_hi.tolist() mags = list(set([round(m,2) for m in mags_all if m >= mmin_compl ])) mags.sort() print(mags) @@ -185,7 +182,10 @@ def _gen_comple(compl_toml, dec_outdir, compdir, tmpfi): def _compl_analysis(decdir, compdir, compl_toml, labels, fout, fout_figs): """ """ - ms, yrs, bw, r_m, r_up_m, bmin, bmax, crit = read_compl_params(compl_toml) + # load configs + config = toml.load(compl_toml) + + ms, yrs, bw, r_m, r_up_m, bmin, bmax, crit = read_compl_params(config) compl_tables, mags_chk, years_chk = read_compl_data(compdir) # Fixing sorting of years @@ -193,7 +193,7 @@ def _compl_analysis(decdir, compdir, compl_toml, labels, fout, fout_figs): yrs = np.flipud(yrs) for lab in labels: - dec_catvs = glob.glob(os.path.join(decdir, f'*{lab}*')) + dec_catvs = glob.glob(os.path.join(decdir, f'*{lab}.csv')) fout_lab = os.path.join(fout, lab) fout_figs_lab = os.path.join(fout_figs, lab, 'mfds') for ii, cat in enumerate(dec_catvs): diff --git a/openquake/mbi/ccl/change_class.py b/openquake/mbi/ccl/change_class.py new file mode 100644 index 000000000..a98d67f87 --- /dev/null +++ b/openquake/mbi/ccl/change_class.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# ------------------- The OpenQuake Model Building Toolkit -------------------- +# Copyright (C) 2022 GEM Foundation +# _______ _______ __ __ _______ _______ ___ _ +# | || | | |_| || _ || || | | | +# | _ || _ | ____ | || |_| ||_ _|| |_| | +# | | | || | | ||____|| || | | | | _| +# | |_| || |_| | | || _ | | | | |_ +# | || | | ||_|| || |_| | | | | _ | +# |_______||____||_| |_| |_||_______| |___| |___| |_| +# +# This program is free software: you can redistribute it and/or modify it under +# the terms of the GNU Affero General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more +# details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# ----------------------------------------------------------------------------- +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# coding: utf-8 + +from openquake.mbt.tools.tr.change_class import change + +from openquake.baselib import sap + +def main(cat_pickle_filename, treg, eqlist): + + change(cat_pickle_filename, treg, eqlist) + + +msg = 'Pickled catalogue file' +main.cat_pickle_filename = msg +msg = 'hdf5 file with classifications' +main.treg = msg +msg = 'File of format ,' +main.eqlist = msg + +if __name__ == "__main__": + sap.run(main) diff --git a/openquake/mbi/sub/build_complex_surface.py b/openquake/mbi/sub/build_complex_surface.py index 0ed8c2382..5463cd29d 100755 --- a/openquake/mbi/sub/build_complex_surface.py +++ b/openquake/mbi/sub/build_complex_surface.py @@ -2,6 +2,7 @@ # coding: utf-8 import os +import sys from openquake.baselib import sap from openquake.sub.build_complex_surface import build_complex_surface diff --git a/openquake/mbi/sub/create_2pt5_model.py b/openquake/mbi/sub/create_2pt5_model.py index 148701ff2..719712ad0 100755 --- a/openquake/mbi/sub/create_2pt5_model.py +++ b/openquake/mbi/sub/create_2pt5_model.py @@ -33,7 +33,8 @@ from openquake.hazardlib.geo.geodetic import distance -def main(in_path, out_path, *, maximum_sampling_distance=25.): +def main(in_path, out_path, *, maximum_sampling_distance=25., + start=None, end=None): """ From a set of profiles it creates the top surface of the slab """ @@ -45,11 +46,14 @@ def main(in_path, out_path, *, maximum_sampling_distance=25.): print(tmps) exit(0) - create_2pt5_model(in_path, out_path, float(maximum_sampling_distance)) + create_2pt5_model(in_path, out_path, float(maximum_sampling_distance), + start, end) main.in_path = 'Folder with the profiles' main.out_path = 'Folder where to store the output' main.maximum_sampling_distance = 'Sampling distance [km]' +main.start = 'id of first profile to include' +main.end = 'id of last profile to include' if __name__ == "__main__": main(sys.argv[1:]) diff --git a/openquake/mbt/tools/model_building/dclustering.py b/openquake/mbt/tools/model_building/dclustering.py index 923d34019..31ce24b0a 100755 --- a/openquake/mbt/tools/model_building/dclustering.py +++ b/openquake/mbt/tools/model_building/dclustering.py @@ -62,9 +62,9 @@ def dec(declustering_params, declustering_meth, cat): def decluster(catalogue_hmtk_fname, declustering_meth, declustering_params, - output_path, labels=None, tr_fname=None, subcatalogues=False, - fmat='csv', olab='', save_af=False, out_fname_ext='', - fix_defaults=False): + output_path, labels=None, tr_fname=None, + subcatalogues=False, fmat='csv', olab='', save_af=False, + out_fname_ext='', fix_defaults=False): """ :param str catalogue_hmtk_fname: Full path to the file containing the initial catalogue @@ -78,6 +78,8 @@ def decluster(catalogue_hmtk_fname, declustering_meth, declustering_params, It can be a string or a list of strings :param str tr_fname: An .hdf5 file containing the TR classification of the catalogue + :param array mag_perm: + An array the length of the cat that will replaces the magnitudes :param bool subcatalogues: When true creates subcatalogues per tectonic region :param str fmat: @@ -124,7 +126,7 @@ def decluster(catalogue_hmtk_fname, declustering_meth, declustering_params, cat = _add_defaults(cat) cato = copy.deepcopy(cat) - # Select earthquakes belonging to a given TR. When necessary combining + # Select earthquakes belonging to a given TR. When necessary combining # multiple TRs, use label ,AND... idx = numpy.full(cat.data['magnitude'].shape, True, dtype=bool) sumchk = 0 @@ -216,40 +218,44 @@ def decluster(catalogue_hmtk_fname, declustering_meth, declustering_params, tmpsum1 = int(sum(idx_tmp)) tmpsum2 = int(sum(kkk)) logging.info(tmps.format(lab, tmpsum1, tmpsum2, pct)) - print(tmps.format(lab, tmpsum1, tmpsum2, pct)) - print(tmpr.format(min(ooo.data['magnitude']), + try: + print(tmps.format(lab, tmpsum1, tmpsum2, pct)) + print(tmpr.format(min(ooo.data['magnitude']), max(ooo.data['magnitude']))) - # - # Output filename - ext = '_dec_{:s}_{:s}.{:s}'.format(olab, lab, fmat) - tcat_fname = Path(os.path.basename(catalogue_hmtk_fname)).stem - tcat_fname = tcat_fname + ext - tmps = os.path.join(output_path, tcat_fname) - tcat_fname = os.path.abspath(tmps) - if save_af: - ext = '_dec_af_{:s}_{:s}.{:s}'.format(olab, lab, fmat) - tcataf_fname = Path( - os.path.basename(catalogue_hmtk_fname)).stem + ext - tmps = os.path.join(output_path, tcataf_fname) - tcataf_fname = os.path.abspath(tmps) - # - # Dumping data into the pickle file - if ooo is not None: - if fmat == 'csv': - ooo.write_catalogue(tcat_fname) - if save_af: - aaa.write_catalogue(tcataf_fname) - elif fmat == 'pkl': - fou = open(tcat_fname, 'wb') - pickle.dump(ooo, fou) - fou.close() - if save_af: - fou = open(tcataf_fname, 'wb') - pickle.dump(aaa, fou) + # + # Output filename + ext = '_dec_{:s}_{:s}.{:s}'.format(olab, lab, fmat) + tcat_fname = Path(os.path.basename(catalogue_hmtk_fname)).stem + tcat_fname = tcat_fname + ext + tmps = os.path.join(output_path, tcat_fname) + tcat_fname = os.path.abspath(tmps) + if save_af: + ext = '_dec_af_{:s}_{:s}.{:s}'.format(olab, lab, fmat) + tcataf_fname = Path( + os.path.basename(catalogue_hmtk_fname)).stem + ext + tmps = os.path.join(output_path, tcataf_fname) + tcataf_fname = os.path.abspath(tmps) + # + # Dumping data into the pickle file + if ooo is not None: + if fmat == 'csv': + ooo.write_catalogue(tcat_fname) + if save_af: + aaa.write_catalogue(tcataf_fname) + elif fmat == 'pkl': + fou = open(tcat_fname, 'wb') + pickle.dump(ooo, fou) fou.close() - else: - tstr = 'Catalogue for region {:s} is empty'.format(lab) - logging.warning(tstr) + if save_af: + fou = open(tcataf_fname, 'wb') + pickle.dump(aaa, fou) + fou.close() + else: + tstr = 'Catalogue for region {:s} is empty'.format(lab) + logging.warning(tstr) + except: + continue + f.close() outl = [out_fname] diff --git a/openquake/mbt/tools/tr/set_subduction_earthquakes.py b/openquake/mbt/tools/tr/set_subduction_earthquakes.py index 6e89e87b7..a04eda3df 100644 --- a/openquake/mbt/tools/tr/set_subduction_earthquakes.py +++ b/openquake/mbt/tools/tr/set_subduction_earthquakes.py @@ -146,17 +146,17 @@ def classify(self, compute_distances, remove_from): mesh = surface.mesh # Create polygon encompassing the mesh -# plo = list(mesh.lons[0, :]) -# pla = list(mesh.lats[0, :]) -# # -# plo += list(mesh.lons[:, -1]) -# pla += list(mesh.lats[:, -1]) -# # -# plo += list(mesh.lons[-1, ::-1]) -# pla += list(mesh.lats[-1, ::-1]) -# # -# plo += list(mesh.lons[::-1, 0]) -# pla += list(mesh.lats[::-1, 0]) + #plo = list(mesh.lons[0, :]) + #pla = list(mesh.lats[0, :]) + # + #plo += list(mesh.lons[:, -1]) + #pla += list(mesh.lats[:, -1]) + # + #plo += list(mesh.lons[-1, ::-1]) + #pla += list(mesh.lats[-1, ::-1]) + # + #plo += list(mesh.lons[::-1, 0]) + #pla += list(mesh.lats[::-1, 0]) plo = surface.surface_projection[0] pla = surface.surface_projection[1] @@ -265,16 +265,14 @@ def classify(self, compute_distances, remove_from): # Compute the depth of the top of the slab at every epicenter using # interpolation - from scipy.interpolate import griddata -# breakpoint() - val_red = values[~np.isnan(values)] - dat_red = data[~np.isnan(data)] - dat_red_fi = dat_red.reshape(len(val_red), 2) - #sub_depths = griddata(dat_red_fi, val_red, (points[:, 0], points[:, 1]), + # sub_depths = griddata(data, values, (points[:, 0], points[:, 1]), # method='cubic') - #breakpoint() - rbfi = RBFInterpolator(dat_red_fi[:, 0:2], val_red, kernel='multiquadric', - epsilon=1, neighbors=50) + val_red = values[~np.isnan(values)] + dat_red = data[~np.isnan(data)].reshape(-1, 2) +# dat_red_fi = dat_red.reshape(len(val_red), 2) + + rbfi = RBFInterpolator(dat_red[:, 0:2], val_red, kernel='multiquadric', + epsilon=1, neighbors=100) sub_depths = rbfi(points[:, 0:2]) # Save the distances to a file diff --git a/openquake/sub/create_2pt5_model.py b/openquake/sub/create_2pt5_model.py index 9ac7af76e..c6a4947be 100644 --- a/openquake/sub/create_2pt5_model.py +++ b/openquake/sub/create_2pt5_model.py @@ -1,6 +1,7 @@ #!/usr/bin/env python import os import re +import sys import glob import numpy @@ -299,14 +300,12 @@ def write_edges_csv(sps, foldername): dat = [] for key in sorted(sps): dat.append(sps[key][idx, :]) - print(sps[key][idx,:]) fname = os.path.join(foldername, 'edge_%03d.csv' % (idx)) - #if idx == 12: - #breakpoint() numpy.savetxt(fname, numpy.array(dat)) -def create_2pt5_model(in_path, out_path, maximum_sampling_distance=25.): +def create_2pt5_model(in_path, out_path, maximum_sampling_distance=25., + from_id=".*", to_id=".*"): """ :param in_path: Folder name with profiles @@ -325,7 +324,7 @@ def create_2pt5_model(in_path, out_path, maximum_sampling_distance=25.): exit(0) # Read profiles - sps, dmin, dmax = read_profiles_csv(in_path) + sps, dmin, dmax = read_profiles_csv(in_path, from_id, to_id) # Compute lengths lengths, longest_key, shortest_key = get_profiles_length(sps) diff --git a/openquake/sub/cross_sections.py b/openquake/sub/cross_sections.py index 5728c2d19..a98d6fde7 100644 --- a/openquake/sub/cross_sections.py +++ b/openquake/sub/cross_sections.py @@ -48,6 +48,7 @@ from scipy.interpolate import LinearNDInterpolator from openquake.hmtk.seismicity.selector import CatalogueSelector +from openquake.hmtk.parsers.catalogue.csv_catalogue_parser import CsvCatalogueParser from openquake.hmtk.parsers.catalogue.gcmt_ndk_parser import ParseNDKtoGCMT @@ -178,6 +179,9 @@ def __init__(self, cross_section): self.topo = None self.litho = None self.volc = None + self.cs = None + self.c_eqks = None + self.count = [0]*4 def set_trench_axis(self, filename): """ @@ -215,6 +219,58 @@ def set_catalogue(self, catalogue, bffer=75.): newcat = selector.select_catalogue(boo) self.ecat = newcat + + def set_catalogue_classified(self, classes, classlist, bffer=75.): + """ + """ + print('setting catalogue') + + types = classlist.split(',') + datal = [] + for file_n in types: + filen = os.path.join(classes.format(file_n)) + print(filen) + parser = CsvCatalogueParser(filen) + catalogueA = parser.read_file() + sel1 = CatalogueSelector(catalogueA, create_copy=True) + catalogue = sel1.within_magnitude_range(lower_mag=None,upper_mag=None) + print(len(catalogue.data['depth'])) + _,_,_,_,qual = self.csec.get_mm() + if qual==1: + idxs = self.csec.get_eqks_within_buffer_idl(catalogue, bffer) + else: + idxs = self.csec.get_eqks_within_buffer(catalogue, bffer) + boo = numpy.zeros_like(catalogue.data['magnitude'], dtype=int) + boo[idxs] = 1 + selector = CatalogueSelector(catalogue, create_copy=True) + selector = CatalogueSelector(catalogue, create_copy=True) + newcat = selector.select_catalogue(boo) + lon = newcat.data['longitude'] + lon = ([x+360 if x<0 else x for x in lon]) + lat = newcat.data['latitude'] + depth = newcat.data['depth'] + mag = newcat.data['magnitude'] + cl_len = len(lat) + if str.lower(filen).find('crustal')>0: + cla = [1]*cl_len + self.count[0] = cl_len + if str.lower(filen).find('int')>0: + cla = [2]*cl_len + self.count[1] = cl_len + if str.lower(filen).find('slab')>0: + cla = [3]*cl_len + self.count[2] = cl_len + if str.lower(filen).find('unc')>0: + cla = [4]*cl_len + self.count[3] = cl_len + for g in range(len(lat)): + datal.append([lon[g], lat[g], depth[g], cla[g], mag[g]]) + + dataa = numpy.array(datal) + if len(cla): + self.c_eqks = numpy.squeeze(dataa[:, :]) + + def set_slab1pt0(self, filename, bffer=2.0): """ :parameter filename: @@ -333,19 +389,19 @@ def set_gcmt(self, filename, gcmt_mag=0.0, bffer=75.): minlo, maxlo, minla, maxla, qual = self.csec.get_mm() if qual == 0: - idxs = self.csec.get_grd_nodes_within_buffer(loc, + idxs, _ = self.csec.get_grd_nodes_within_buffer(loc, lac, bffer, minlo, maxlo, minla, maxla) if qual == 1: - idxs = self.csec.get_grd_nodes_within_buffer_idl(loc, + idxs, _ = self.csec.get_grd_nodes_within_buffer_idl(loc, lac, bffer, minlo, maxlo, minla, maxla) if idxs is not None: - cmt_cat.select_catalogue_events(idxs[0]) + cmt_cat.select_catalogue_events(idxs) self.gcmt = cmt_cat def set_topo(self, filename, bffer=0.25): diff --git a/openquake/sub/plotting/plot_cross_section.py b/openquake/sub/plotting/plot_cross_section.py index 634663ee1..1f73bf416 100755 --- a/openquake/sub/plotting/plot_cross_section.py +++ b/openquake/sub/plotting/plot_cross_section.py @@ -1,7 +1,9 @@ #!/usr/bin/env python +import os import re import sys +import h5py import pickle import numpy import matplotlib @@ -12,7 +14,7 @@ from obspy.imaging.beachball import beach from matplotlib.backend_bases import KeyEvent -from matplotlib.patches import (Circle, Rectangle) +from matplotlib.patches import (Circle, Rectangle, Ellipse) from matplotlib.collections import PatchCollection from openquake.sub.cross_sections import (CrossSection, CrossSectionData) @@ -36,6 +38,11 @@ 'R-SS': 'goldenrod', 'SS-R': 'yellow'} +CLASSIFICATION = {'interface': 'blue', + 'crustal': 'purple', + 'slab': 'aquamarine', + 'unclassified': 'yellow'} + def onclick(event): print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f' % @@ -52,11 +59,15 @@ def _plot_h_eqk_histogram(axes, csda, dep_max=[], dis_max=[]): :param dep_max: :param dis_max: """ - if csda.ecat is None: + if (csda.ecat is None) and (csda.c_eqks is None): return plt.axes(axes) - newcat = csda.ecat + + if csda.ecat: + newcat = csda.ecat + else: + newcat = csda.c_eqks olo = csda.csec.olo ola = csda.csec.ola @@ -124,7 +135,7 @@ def _plot_v_eqk_histogram(axes, csda, dep_max=[], dis_max=[]): :param dis_max: """ - if csda.ecat is None: + if (csda.ecat is None) and (csda.c_eqks is None): return plt.axes(axes) @@ -394,6 +405,37 @@ def _plot_eqks(axes, csda): axes.add_collection(p) +def _plot_c_eqks(axes, csda): + """ + :parameter axes: + :parameter csda: + """ + + classcat = csda.c_eqks + + olo = csda.csec.olo + ola = csda.csec.ola + dsts = geodetic_distance(olo, ola, + classcat[:,0], + classcat[:,1]) + depths = classcat[:,2] + classes = classcat[:,3] + patches = [] + for dst, dep, cls, mag in zip(dsts, + classcat[:,2], + classcat[:,3], + classcat[:,4]): + circle = Circle((dst, dep), (mag*0.5)**1.5, ec='white') + patches.append(circle) + colors = classcat[:,3] + p = PatchCollection(patches, zorder=0, edgecolors='white') + p.set_alpha(0.6) + p.set_array(numpy.array(colors)) + p.set_clim([1,4]) + axes.add_collection(p) + + + def _print_legend(axes, depp, lnght): x = int(lnght / 2.) xstep = 40 @@ -418,7 +460,20 @@ def _print_legend(axes, depp, lnght): axes.add_patch(box) -def _print_info(axes, csec, depp): +def _print_legend2(axes, depp, lnght): + x = 7 + ystep=11 + y=depp-49 + patches = [] + for key in sorted(CLASSIFICATION): + box = matplotlib.patches.Ellipse(xy=(x, y), width=8, height=8, + color=CLASSIFICATION[key], clip_on=False) + y += ystep + box.set_alpha(0.5) + axes.add_patch(box) + + +def _print_info(axes, csec, depp, count): """ """ plt.axes(axes) @@ -430,12 +485,33 @@ def _print_info(axes, csec, depp): axes.annotate(note, xy=(0.0, depp+30), xycoords='data', annotation_clip=False, fontsize=8) - note = 'Cross-Section lenght: %.1f [km]' % (csec.length[0]) + note = 'Cross-Section length: %.1f [km]' % (csec.length[0]) plt.gca().annotate(note, xy=(0.0, depp+40), xycoords='data', annotation_clip=False, fontsize=8) + ystep=11 + xloc=17 + note = 'Classification:' + plt.gca().annotate(note, xy=(4, depp-5*ystep), xycoords='data', + annotation_clip=False, fontsize=8) + note = 'Crustal: %d' % (count[0]) + plt.gca().annotate(note, xy=(xloc, depp-4*ystep), xycoords='data', + annotation_clip=False, fontsize=8) + + note = 'Interface: %d' % (count[1]) + plt.gca().annotate(note, xy=(xloc, depp-3*ystep), xycoords='data', + annotation_clip=False, fontsize=8) + + note = 'Slab: %d' % (count[2]) + plt.gca().annotate(note, xy=(xloc, depp-2*ystep), xycoords='data', + annotation_clip=False, fontsize=8) + + note = 'Unclassified: %d' % (count[3]) + plt.gca().annotate(note, xy=(xloc, depp-1*ystep), xycoords='data', + annotation_clip=False, fontsize=8) + -def plot(csda, depp, lnght): +def plot(csda, depp, lnght, plottype): """ """ # Computing figure width @@ -458,11 +534,18 @@ def plot(csda, depp, lnght): plt.axes(ax3) ax3.xaxis.tick_top() - _print_info(ax3, csda.csec, depp) + _print_info(ax3, csda.csec, depp, csda.count) _print_legend(ax3, depp, lnght) + _print_legend2(ax3, depp, lnght) # Plotting - _plot_eqks(ax3, csda) +# _plot_eqks(ax3, csda) + if 'classification' in plottype: + _plot_c_eqks(plt.subplot(gs[3]), csda) + else: + _plot_eqks(plt.subplot(gs[3]), csda) + _plot_h_eqk_histogram(plt.subplot(gs[1]), csda, depp, lnght) + _plot_v_eqk_histogram(plt.subplot(gs[2]), csda, depp, lnght) _plot_moho(ax3, csda) _plot_litho(ax3, csda) _plot_topo(ax3, csda) @@ -472,8 +555,6 @@ def plot(csda, depp, lnght): _plot_focal_mech(ax3, csda) _plot_slab1pt0(ax3, csda) _plot_np_intersection(ax3, csda) - _plot_h_eqk_histogram(ax1, csda, depp, lnght) - _plot_v_eqk_histogram(ax2, csda, depp, lnght) # Main panel plt.axes(ax3) @@ -567,9 +648,22 @@ def plt_cs(olo, ola, depp, lnght, strike, ids, ini_filename): try: fname_trench = config['data']['trench_axis_filename'] csda.set_trench_axis(fname_trench) - except: + except: fname_trench = None fname_eqk_cat = config['data']['catalogue_pickle_filename'] + + plottype = ''; + if config.has_option('general','type'): + plottype = config['general']['type'] + if 'classification' in plottype: + fname_class = config['data']['class_base'] + fname_classlist = config['data']['class_list'] + csda.set_catalogue_classified(fname_class,fname_classlist) + else: + fname_eqk_cat = config['data']['catalogue_pickle_filename'] + cat = pickle.load(open(fname_eqk_cat, 'rb')) + csda.set_catalogue(cat,75.) + fname_slab = config['data']['slab1pt0_filename'] fname_crust = config['data']['crust1pt0_filename'] fname_gcmt = config['data']['gcmt_filename'] @@ -594,7 +688,7 @@ def plt_cs(olo, ola, depp, lnght, strike, ids, ini_filename): csda.set_litho_moho_depth(fname_litho) csda.set_volcano(fname_volc) - fig = plot(csda, depp, lnght) + fig = plot(csda, depp, lnght, plottype) return fig diff --git a/openquake/sub/utils.py b/openquake/sub/utils.py index ef3d4072e..3667f8dcc 100644 --- a/openquake/sub/utils.py +++ b/openquake/sub/utils.py @@ -368,12 +368,12 @@ def build_kite_surface_from_profiles(foldername): profiles = ProfileSet.from_files(foldername) # Kite fault source - from openquake.sub.profiles import get_kite_fault - kf_src = get_kite_fault(profiles) - surface = kf_src.surface + #from openquake.sub.profiles import get_kite_fault + # kf_src = get_kite_fault(profiles) + # surface = kf_src.surface # Build kite fault surface -# surface = KiteSurface.from_profiles(profiles.profiles, 5, 5) + surface = KiteSurface.from_profiles(profiles.profiles, 5, 5) return surface From 0c9fde6cc896e768fb388be2b454c79c4acaba74 Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Thu, 28 Mar 2024 14:14:02 +0100 Subject: [PATCH 10/29] fix accidental added params --- openquake/mbi/sub/create_2pt5_model.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/openquake/mbi/sub/create_2pt5_model.py b/openquake/mbi/sub/create_2pt5_model.py index 719712ad0..b97fad696 100755 --- a/openquake/mbi/sub/create_2pt5_model.py +++ b/openquake/mbi/sub/create_2pt5_model.py @@ -33,8 +33,7 @@ from openquake.hazardlib.geo.geodetic import distance -def main(in_path, out_path, *, maximum_sampling_distance=25., - start=None, end=None): +def main(in_path, out_path, *, maximum_sampling_distance=25.) """ From a set of profiles it creates the top surface of the slab """ @@ -52,8 +51,6 @@ def main(in_path, out_path, *, maximum_sampling_distance=25., main.in_path = 'Folder with the profiles' main.out_path = 'Folder where to store the output' main.maximum_sampling_distance = 'Sampling distance [km]' -main.start = 'id of first profile to include' -main.end = 'id of last profile to include' if __name__ == "__main__": main(sys.argv[1:]) From 272d64d16064db2a37e5c474a4516c4e5dab0443 Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Thu, 28 Mar 2024 14:15:18 +0100 Subject: [PATCH 11/29] typo --- openquake/mbi/sub/create_2pt5_model.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openquake/mbi/sub/create_2pt5_model.py b/openquake/mbi/sub/create_2pt5_model.py index b97fad696..d0144b51a 100755 --- a/openquake/mbi/sub/create_2pt5_model.py +++ b/openquake/mbi/sub/create_2pt5_model.py @@ -33,7 +33,7 @@ from openquake.hazardlib.geo.geodetic import distance -def main(in_path, out_path, *, maximum_sampling_distance=25.) +def main(in_path, out_path, *, maximum_sampling_distance=25.): """ From a set of profiles it creates the top surface of the slab """ From e863647efb527254c0e8a27f5f3bb986aa47e947 Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Thu, 28 Mar 2024 14:15:52 +0100 Subject: [PATCH 12/29] typo --- openquake/mbi/sub/create_2pt5_model.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/openquake/mbi/sub/create_2pt5_model.py b/openquake/mbi/sub/create_2pt5_model.py index d0144b51a..148701ff2 100755 --- a/openquake/mbi/sub/create_2pt5_model.py +++ b/openquake/mbi/sub/create_2pt5_model.py @@ -45,8 +45,7 @@ def main(in_path, out_path, *, maximum_sampling_distance=25.): print(tmps) exit(0) - create_2pt5_model(in_path, out_path, float(maximum_sampling_distance), - start, end) + create_2pt5_model(in_path, out_path, float(maximum_sampling_distance)) main.in_path = 'Folder with the profiles' main.out_path = 'Folder where to store the output' From 4c4aeb3d2fd8c9948a0e4536857eae0a955eb389 Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Thu, 28 Mar 2024 14:19:00 +0100 Subject: [PATCH 13/29] removing problematic changed params --- openquake/sub/create_2pt5_model.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/openquake/sub/create_2pt5_model.py b/openquake/sub/create_2pt5_model.py index c6a4947be..fe6aeaa5a 100644 --- a/openquake/sub/create_2pt5_model.py +++ b/openquake/sub/create_2pt5_model.py @@ -304,8 +304,7 @@ def write_edges_csv(sps, foldername): numpy.savetxt(fname, numpy.array(dat)) -def create_2pt5_model(in_path, out_path, maximum_sampling_distance=25., - from_id=".*", to_id=".*"): +def create_2pt5_model(in_path, out_path, maximum_sampling_distance=25.): """ :param in_path: Folder name with profiles @@ -324,7 +323,7 @@ def create_2pt5_model(in_path, out_path, maximum_sampling_distance=25., exit(0) # Read profiles - sps, dmin, dmax = read_profiles_csv(in_path, from_id, to_id) + sps, dmin, dmax = read_profiles_csv(in_path) # Compute lengths lengths, longest_key, shortest_key = get_profiles_length(sps) From 0bc25f5bdb6655daa63c9e5effad2e26e0a01fc9 Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Wed, 3 Apr 2024 12:18:26 +0200 Subject: [PATCH 14/29] making flexible to use kite or complex fault in classification --- openquake/mbi/ccl/classify.py | 7 ++- .../tools/tr/set_subduction_earthquakes.py | 44 +++++++++++-------- 2 files changed, 32 insertions(+), 19 deletions(-) diff --git a/openquake/mbi/ccl/classify.py b/openquake/mbi/ccl/classify.py index 554bbde85..d7e5bf7b8 100644 --- a/openquake/mbi/ccl/classify.py +++ b/openquake/mbi/ccl/classify.py @@ -157,6 +157,11 @@ def classify(ini_fname, compute_distances, rf=''): if 'upp_mag' in config[key]: upp_mag = float(config[key]['upp_mag']) + # specifying surface type + surftype = 'ComplexFault' + if 'surface_type' in config[key]: + surftype = config[key]['surface_type'] + # sse = SetSubductionEarthquakes(trlab, treg_filename, @@ -171,7 +176,7 @@ def classify(ini_fname, compute_distances, rf=''): upp_year, low_mag, upp_mag) - sse.classify(compute_distances, remove_from) + sse.classify(compute_distances, remove_from, surftype) # crustal earthquakes elif re.search('^crustal', key) or re.search('^volcanic', key): diff --git a/openquake/mbt/tools/tr/set_subduction_earthquakes.py b/openquake/mbt/tools/tr/set_subduction_earthquakes.py index a04eda3df..1426524f5 100644 --- a/openquake/mbt/tools/tr/set_subduction_earthquakes.py +++ b/openquake/mbt/tools/tr/set_subduction_earthquakes.py @@ -96,7 +96,7 @@ def __init__(self, label, treg_filename, distance_folder, edges_folder, self.low_mag = float(low_mag) self.upp_mag = float(upp_mag) - def classify(self, compute_distances, remove_from): + def classify(self, compute_distances, remove_from, surftype='ComplexFault'): """ :param bool compute_distances: A boolean indicating if distances between earthquakes and the @@ -141,24 +141,32 @@ def classify(self, compute_distances, remove_from): # Build the complex fault surface tedges = _read_edges(edges_folder) print(edges_folder) -# surface = build_complex_surface_from_edges(edges_folder) - surface = build_kite_surface_from_profiles(edges_folder) - mesh = surface.mesh + if surftype == 'ComplexFault': + surface = build_complex_surface_from_edges(edges_folder) + # Create polygon encompassing the mesh + mesh = surface.mesh + plo = list(mesh.lons[0, :]) + pla = list(mesh.lats[0, :]) + # + plo += list(mesh.lons[:, -1]) + pla += list(mesh.lats[:, -1]) + # + plo += list(mesh.lons[-1, ::-1]) + pla += list(mesh.lats[-1, ::-1]) + # + plo += list(mesh.lons[::-1, 0]) + pla += list(mesh.lats[::-1, 0]) + + elif surftype == 'KiteFault': + surface = build_kite_surface_from_profiles(edges_folder) + # Create polygon encompassing the mesh + mesh = surface.mesh + plo = surface.surface_projection[0] + pla = surface.surface_projection[1] + else: + msg = f'surface type {surftype} not supported' + raise ValueError(msg) - # Create polygon encompassing the mesh - #plo = list(mesh.lons[0, :]) - #pla = list(mesh.lats[0, :]) - # - #plo += list(mesh.lons[:, -1]) - #pla += list(mesh.lats[:, -1]) - # - #plo += list(mesh.lons[-1, ::-1]) - #pla += list(mesh.lats[-1, ::-1]) - # - #plo += list(mesh.lons[::-1, 0]) - #pla += list(mesh.lats[::-1, 0]) - plo = surface.surface_projection[0] - pla = surface.surface_projection[1] # Set variables used in griddata data = np.array([mesh.lons.flatten().T, mesh.lats.flatten().T]).T From adc352c7b8eadae5e3af9bbde3e86289910fdd9e Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Wed, 3 Apr 2024 13:17:38 +0200 Subject: [PATCH 15/29] fixing tests by adding option to tell sub that catalogue must be sorted --- openquake/mbi/sub/create_2pt5_model.py | 1 - openquake/sub/slab/rupture.py | 34 +++++++++++++------ openquake/sub/slab/rupture_utils.py | 4 ++- .../sub/tests/slab/rupture_smooth_test.py | 1 + openquake/sub/utils.py | 5 --- 5 files changed, 27 insertions(+), 18 deletions(-) diff --git a/openquake/mbi/sub/create_2pt5_model.py b/openquake/mbi/sub/create_2pt5_model.py index 148701ff2..f16fbae74 100755 --- a/openquake/mbi/sub/create_2pt5_model.py +++ b/openquake/mbi/sub/create_2pt5_model.py @@ -32,7 +32,6 @@ from openquake.sub.create_2pt5_model import create_2pt5_model from openquake.hazardlib.geo.geodetic import distance - def main(in_path, out_path, *, maximum_sampling_distance=25.): """ From a set of profiles it creates the top surface of the slab diff --git a/openquake/sub/slab/rupture.py b/openquake/sub/slab/rupture.py index 67aa1234f..34e561a36 100755 --- a/openquake/sub/slab/rupture.py +++ b/openquake/sub/slab/rupture.py @@ -16,10 +16,10 @@ import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D -# from mayavi import mlab +#from mayavi import mlab from pyproj import Proj -# from openquake.sub.plotting.tools import plot_mesh -# from openquake.sub.plotting.tools import plot_mesh_mayavi +##from openquake.sub.plotting.tools import plot_mesh +#from openquake.sub.plotting.tools import plot_mesh_mayavi from openquake.sub.misc.edge import create_from_profiles from openquake.sub.quad.msh import create_lower_surface_mesh @@ -41,10 +41,12 @@ from openquake.man.checks.catalogue import load_catalogue from openquake.wkf.utils import create_folder +#PLOTTING = True PLOTTING = False -def get_catalogue(cat_pickle_fname, treg_filename=None, label=''): +def get_catalogue(cat_pickle_fname, treg_filename=None, label='', + sort_cat=False): """ :param cat_pickle_fname: :param treg_filename: @@ -58,9 +60,9 @@ def get_catalogue(cat_pickle_fname, treg_filename=None, label=''): f.close() # # loading the catalogue - # catalogue = pickle.load(open(cat_pickle_fname, 'rb')) catalogue = load_catalogue(cat_pickle_fname) - catalogue.sort_catalogue_chronologically() + if sort_cat == True: + catalogue.sort_catalogue_chronologically() # # if a label and a TR are provided we filter the catalogue if treg_filename is not None: @@ -537,6 +539,10 @@ def calculate_ruptures(ini_fname, only_plt=False, ref_fdr=None, agr=None, # Catalogue cat_pickle_fname = config.get('main', 'catalogue_pickle_fname') cat_pickle_fname = os.path.abspath(os.path.join(ref_fdr, cat_pickle_fname)) + try: + sort_cat = bool(config.get('main', 'sort_catalogue')) + except: + sort_cat = False # Output hdf5_filename = config.get('main', 'out_hdf5_fname') @@ -586,12 +592,14 @@ def calculate_ruptures(ini_fname, only_plt=False, ref_fdr=None, agr=None, logging.info('Creating ruptures on virtual faults') print('Creating ruptures on virtual faults') ohs = create_inslab_meshes(msh, dips, slab_thickness, sampling) + #breakpoint() - if only_plt: - pass + #if only_plt: + # pass + if False: # TODO consider replacing wiith pyvista - """ + azim = 10. elev = 20. dist = 20. @@ -625,7 +633,7 @@ def calculate_ruptures(ini_fname, only_plt=False, ref_fdr=None, agr=None, mlab.show() exit(0) - """ + if PLOTTING: vsc = 0.01 @@ -671,6 +679,9 @@ def calculate_ruptures(ini_fname, only_plt=False, ref_fdr=None, agr=None, vspa) # mlo, mla, mde = msh3d.select_nodes_within_two_meshesa(omsh, olmsh) mlo, mla, mde = msh3d.get_coordinates_vectors() + if False: + df = pd.DataFrame({'mlo': mlo, 'mla': mla, 'mde': mde}) + df.to_csv('mesh_coords.csv') # save data on hdf5 file if os.path.exists(hdf5_filename): @@ -687,7 +698,8 @@ def calculate_ruptures(ini_fname, only_plt=False, ref_fdr=None, agr=None, fh5.close() # Get catalogue - catalogue = get_catalogue(cat_pickle_fname, treg_filename, label) + catalogue = get_catalogue(cat_pickle_fname, treg_filename, label, + sort_cat) # smoothing values, smooth = smoothing(mlo, mla, mde, catalogue, hspa, vspa, diff --git a/openquake/sub/slab/rupture_utils.py b/openquake/sub/slab/rupture_utils.py index 69da3a912..67af58c6b 100644 --- a/openquake/sub/slab/rupture_utils.py +++ b/openquake/sub/slab/rupture_utils.py @@ -196,5 +196,7 @@ def get_discrete_dimensions(area, sampling, aspr): wdt = None lng = None elif area_error > 0.25 and lng > 1e-10 and wdt > 1e-10: - raise ValueError('Area discrepancy: ', area, lng*wdt, lng, wdt, aspr) + print('Area discrepancy: ', area, lng*wdt, lng, wdt, aspr) + + #raise ValueError('Area discrepancy: ', area, lng*wdt, lng, wdt, aspr) return lng, wdt diff --git a/openquake/sub/tests/slab/rupture_smooth_test.py b/openquake/sub/tests/slab/rupture_smooth_test.py index c958cdc1e..e1e4553a2 100644 --- a/openquake/sub/tests/slab/rupture_smooth_test.py +++ b/openquake/sub/tests/slab/rupture_smooth_test.py @@ -50,6 +50,7 @@ def setUp(self): config['main']['profile_folder'] = self.out_path # Spatial distribution controlled by smoothing config['main']['uniform_fraction'] = '0.0' + config['main']['sort_catalogue'] = 'True' # Save the new .ini self.ini = os.path.join(self.out_path, 'test.ini') diff --git a/openquake/sub/utils.py b/openquake/sub/utils.py index 3667f8dcc..28a07c709 100644 --- a/openquake/sub/utils.py +++ b/openquake/sub/utils.py @@ -367,11 +367,6 @@ def build_kite_surface_from_profiles(foldername): # Read edges profiles = ProfileSet.from_files(foldername) - # Kite fault source - #from openquake.sub.profiles import get_kite_fault - # kf_src = get_kite_fault(profiles) - # surface = kf_src.surface - # Build kite fault surface surface = KiteSurface.from_profiles(profiles.profiles, 5, 5) From 50ef862ae670ff2cc34e9ad24ec254c9aa11d24f Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Wed, 3 Apr 2024 13:30:36 +0200 Subject: [PATCH 16/29] adding test data --- openquake/sub/tests/data/cs2/cs_3.csv | 5 +++++ openquake/sub/tests/data/cs2/cs_4.csv | 5 +++++ 2 files changed, 10 insertions(+) create mode 100644 openquake/sub/tests/data/cs2/cs_3.csv create mode 100644 openquake/sub/tests/data/cs2/cs_4.csv diff --git a/openquake/sub/tests/data/cs2/cs_3.csv b/openquake/sub/tests/data/cs2/cs_3.csv new file mode 100644 index 000000000..fc2bf8b28 --- /dev/null +++ b/openquake/sub/tests/data/cs2/cs_3.csv @@ -0,0 +1,5 @@ +10.0 45.0 1.0 +10.2 45.2 3.0 +10.3 45.3 7.0 +10.5 45.5 19.0 +10.7 45.7 40.0 diff --git a/openquake/sub/tests/data/cs2/cs_4.csv b/openquake/sub/tests/data/cs2/cs_4.csv new file mode 100644 index 000000000..5e598bd9d --- /dev/null +++ b/openquake/sub/tests/data/cs2/cs_4.csv @@ -0,0 +1,5 @@ +10.1 45.0 1.6 +10.3 45.2 2.0 +10.4 45.3 5.0 +10.6 45.5 9.0 +10.8 45.7 35.0 From 1f9248ea05e6c78d515b3e03e3593ca404c3609f Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Wed, 3 Apr 2024 15:01:49 +0200 Subject: [PATCH 17/29] adding test for sampling the magnitudes and making catalogues --- .../tests/tools/mfd_sample/data/catalogue.csv | 6 ++++ .../tests/tools/mfd_sample/data/test02.hdf5 | Bin 0 -> 5168 bytes .../mfd_sample/expected/v0_catalogue.csv | 6 ++++ .../mfd_sample/expected/v1_catalogue.csv | 6 ++++ .../tools/mfd_sample/expected/v_mags.csv | 5 +++ .../tests/tools/mfd_sample/mfd_sample_test.py | 29 ++++++++++++++++++ 6 files changed, 52 insertions(+) create mode 100644 openquake/mbt/tests/tools/mfd_sample/data/catalogue.csv create mode 100644 openquake/mbt/tests/tools/mfd_sample/data/test02.hdf5 create mode 100644 openquake/mbt/tests/tools/mfd_sample/expected/v0_catalogue.csv create mode 100644 openquake/mbt/tests/tools/mfd_sample/expected/v1_catalogue.csv create mode 100644 openquake/mbt/tests/tools/mfd_sample/expected/v_mags.csv create mode 100644 openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py diff --git a/openquake/mbt/tests/tools/mfd_sample/data/catalogue.csv b/openquake/mbt/tests/tools/mfd_sample/data/catalogue.csv new file mode 100644 index 000000000..d566cd6c3 --- /dev/null +++ b/openquake/mbt/tests/tools/mfd_sample/data/catalogue.csv @@ -0,0 +1,6 @@ +eventID,Identifier,year,month,day,hour,minute,second,latitude,longitude,depth,Agency,magnitude,sigmaMagnitude,note +EV0001,A,1900,1,1,0,0,0,45.1,10.1,5,AA,6.2,0.3,interface +EV0002,A,1900,1,1,0,0,0,45.1,10.1,50,AA,6.2,0.5,below interface +EV0003,A,1900,1,1,0,0,0,45.4,10.3,10,AA,6.2,0.1,crustal +EV0004,A,1900,1,1,0,0,0,45.5,10.5,31,AA,6.2,0.22,sub_crustal +EV0005,A,1900,1,1,0,0,0,45.6,10.6,20,AA,6.2,1.0,crustal diff --git a/openquake/mbt/tests/tools/mfd_sample/data/test02.hdf5 b/openquake/mbt/tests/tools/mfd_sample/data/test02.hdf5 new file mode 100644 index 0000000000000000000000000000000000000000..96e8580713a36bef76af15cf4ec25c464f5a46c0 GIT binary patch literal 5168 zcmeD5aB<`1lHy_j0S*oZ76t(@6Gr@p0s|3<2#gPtPk=HQp>zk7Ucm%mFfxE31A_!q zTo7tLy1I}cS62q0N|^aD8mf)KfCa)rbsbE0lpgLO;Nj{R0PoX6q(XoLLIX1wgPWsIFffrbgan0xD6j-8P#mO=iHQkpAP3ZZW<~{Wuo|HKsGtIs zOJ`oNpT7$u0~65I5TL+}-vO}vsvwMC-Kgwn2#kinXb6mkz-R~zt`GpWUm;cf;BwZe fYeqw0Gz3ONU^E0qLtr!nXchtk*Z)@?l>L7I157## literal 0 HcmV?d00001 diff --git a/openquake/mbt/tests/tools/mfd_sample/expected/v0_catalogue.csv b/openquake/mbt/tests/tools/mfd_sample/expected/v0_catalogue.csv new file mode 100644 index 000000000..85f3ad958 --- /dev/null +++ b/openquake/mbt/tests/tools/mfd_sample/expected/v0_catalogue.csv @@ -0,0 +1,6 @@ +eventID,Agency,year,month,day,hour,minute,second,timeError,longitude,latitude,SemiMajor90,SemiMinor90,ErrorStrike,depth,depthError,magnitude,sigmaMagnitude,magnitudeType +EV0001,AA,1900,1,1,0,0,0.0,0.0,10.1,45.1,45.1,45.1,45.1,5.0,5.0,6.012359563267861,0.3,0.3 +EV0002,AA,1900,1,1,0,0,0.0,0.0,10.1,45.1,45.1,45.1,45.1,50.0,50.0,5.887265938779767,0.5,0.5 +EV0003,AA,1900,1,1,0,0,0.0,0.0,10.3,45.4,45.4,45.4,45.4,10.0,10.0,6.137453187755954,0.1,0.1 +EV0004,AA,1900,1,1,0,0,0.0,0.0,10.5,45.5,45.5,45.5,45.5,31.0,31.0,6.062397013063098,0.22,0.22 +EV0005,AA,1900,1,1,0,0,0.0,0.0,10.6,45.6,45.6,45.6,45.6,20.0,20.0,5.574531877559534,1.0,1.0 diff --git a/openquake/mbt/tests/tools/mfd_sample/expected/v1_catalogue.csv b/openquake/mbt/tests/tools/mfd_sample/expected/v1_catalogue.csv new file mode 100644 index 000000000..c361cf742 --- /dev/null +++ b/openquake/mbt/tests/tools/mfd_sample/expected/v1_catalogue.csv @@ -0,0 +1,6 @@ +eventID,Agency,year,month,day,hour,minute,second,timeError,longitude,latitude,SemiMajor90,SemiMinor90,ErrorStrike,depth,depthError,magnitude,sigmaMagnitude,magnitudeType +EV0001,AA,1900,1,1,0,0,0.0,0.0,10.1,45.1,45.1,45.1,45.1,5.0,5.0,6.305972746393061,0.3,0.3 +EV0002,AA,1900,1,1,0,0,0.0,0.0,10.1,45.1,45.1,45.1,45.1,50.0,50.0,6.376621243988434,0.5,0.5 +EV0003,AA,1900,1,1,0,0,0.0,0.0,10.3,45.4,45.4,45.4,45.4,10.0,10.0,6.235324248797687,0.1,0.1 +EV0004,AA,1900,1,1,0,0,0.0,0.0,10.5,45.5,45.5,45.5,45.5,31.0,31.0,6.277713347354911,0.22,0.22 +EV0005,AA,1900,1,1,0,0,0.0,0.0,10.6,45.6,45.6,45.6,45.6,20.0,20.0,6.553242487976869,1.0,1.0 diff --git a/openquake/mbt/tests/tools/mfd_sample/expected/v_mags.csv b/openquake/mbt/tests/tools/mfd_sample/expected/v_mags.csv new file mode 100644 index 000000000..d9e27f3f7 --- /dev/null +++ b/openquake/mbt/tests/tools/mfd_sample/expected/v_mags.csv @@ -0,0 +1,5 @@ +6.01,6.31 +5.89,6.38 +6.14,6.24 +6.06,6.28 +5.57,6.55 diff --git a/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py b/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py new file mode 100644 index 000000000..07b822603 --- /dev/null +++ b/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py @@ -0,0 +1,29 @@ +import os +import unittest +import tempfile +import numpy as np +import pandas as pd + +from openquake.mbt.tools.mfd_sample.make_mfds import _create_catalogue_versions + +BASE_PATH = os.path.dirname(__file__) + +class TestWorkflow(unittest.TestCase): + + def setUp(self): + self.catfi = os.path.join(BASE_PATH, 'data', 'catalogue.csv') + # Create the temporary folder + self.tmpd = next(tempfile._get_candidate_names()) + + def test_generate_cats(self): + """ + Test calculation of exceedance rate for magnitude equal to bin limit + """ + _create_catalogue_versions(self.catfi, self.tmpd, 2, stype='random', + numstd=1, rseed=122) + + mags_exp_fi = os.path.join(BASE_PATH, 'expected', 'v_mags.csv') + mags_out_fi = os.path.join(self.tmpd, 'v_mags.csv') + expected = pd.read_csv(mags_exp_fi) + output = pd.read_csv(mags_out_fi) + assert expected.equals(output) From e8c19b498529ce39eda34333037159bff682560f Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Wed, 3 Apr 2024 15:16:58 +0200 Subject: [PATCH 18/29] changing some organisation --- openquake/mbi/cat/MFDs_sample_mag_sigma.py | 295 +++----------------- openquake/mbt/tools/mfd_sample/make_mfds.py | 274 ++++++++++++++++++ 2 files changed, 309 insertions(+), 260 deletions(-) create mode 100644 openquake/mbt/tools/mfd_sample/make_mfds.py diff --git a/openquake/mbi/cat/MFDs_sample_mag_sigma.py b/openquake/mbi/cat/MFDs_sample_mag_sigma.py index 845856148..2184e85d5 100644 --- a/openquake/mbi/cat/MFDs_sample_mag_sigma.py +++ b/openquake/mbi/cat/MFDs_sample_mag_sigma.py @@ -1,273 +1,48 @@ -#!/usr/bin/env python +# ------------------- The OpenQuake Model Building Toolkit -------------------- +# Copyright (C) 2022 GEM Foundation +# _______ _______ __ __ _______ _______ ___ _ +# | || | | |_| || _ || || | | | +# | _ || _ | ____ | || |_| ||_ _|| |_| | +# | | | || | | ||____|| || | | | | _| +# | |_| || |_| | | || _ | | | | |_ +# | || | | ||_|| || |_| | | | | _ | +# |_______||____||_| |_| |_||_______| |___| |___| |_| +# +# This program is free software: you can redistribute it and/or modify it under +# the terms of the GNU Affero General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) any +# later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS +# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more +# details. +# +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . +# ----------------------------------------------------------------------------- +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# coding: utf-8 -import os -import re -import sys -import numpy as np -import pandas as pd -import pickle -import glob -import time import toml - +import numpy as np from openquake.baselib import sap -from scipy.stats import truncnorm -from copy import deepcopy - -from openquake.hmtk.parsers.catalogue import CsvCatalogueParser -from openquake.mbi.ccl.decluster_multiple_TR import main as decluster_multiple_TR -from openquake.cat.completeness.generate import get_completenesses -from openquake.cat.completeness.analysis import (_completeness_analysis, - read_compl_params, - read_compl_data) -from openquake.cat.completeness.mfd_eval_plots import make_all_plots +from openquake.mbt.mfds_sample.make_mfds import make_many_mfds -def _get_truncated_normal(mean=0, sd=1, low=0, upp=10): - return truncnorm( - (low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd) -def _write_cat_instance(catalogue, format, fileout, cnum): - if format == 'hmtk': - catalogue.write_catalogue(fileout.format(cnum)) - elif format == 'pickle': - with open(fileout.format(cnum), 'wb') as f: - pickle.dump(catalogue, f) - -def _create_catalogue_versions(catfi, outdir, numcats=None, stype='random'): - """ - catfi: catalogue filename (pkl or hmtk/csv) - stype: - random - samples from truncnorm - even - same sample for every magnitude; -1 : 1 by 0.2 +def main(fname_config): """ - # check if output folder is empty - - if os.path.exists(outdir): - tmps = f'\nError: {outdir} already exists! ' - tmps += '\n Choose an empty directory.' - print(tmps) - sys.exit(1) - else: - os.makedirs(outdir) - - csvout = os.path.join(outdir, 'v{}_'+catfi.split('/')[-1]) - fileout = os.path.join(outdir, 'v_mags.csv') - factors = np.arange(-1,1,0.1) - - # read catalogue - if 'pkl' in catfi: - format = 'pickle' - catalogue = pd.read_pickle(catfi) - - elif ('csv' in catfi) or ('hmtk' in catfi): - parser = CsvCatalogueParser(catfi) # From .csv to hmtk - catalogue = parser.read_file() - format = 'hmtk' - - else: - print(sys.stderr, "Use a supported catalogue format.") - sys.exit(1) - - data = catalogue.data - - if numcats and stype == 'even': - print(sys.stderr, \ - "Cannot specify number of catalogues for even sampling.") - sys.exit(1) - - if stype == 'random': - - mags = [] - - time_strt = time.time() - for ii in np.arange(0,len(data['magnitude'])): - m=data['magnitude'][ii] - sd=data['sigmaMagnitude'][ii] - allm = _get_truncated_normal(mean=m, sd=sd, low=m-sd, upp=m+sd) - mags_perm = allm.rvs(size=numcats) - mags.append(mags_perm) - - time_end = time.time() - print('Run time for generating magnitudes: ') - print(time_end - time_strt) - - marr = np.array(mags) - np.savetxt(fileout, marr, delimiter=',', fmt='%.2f') - - for ii, ms in enumerate(marr.T): - time_strt = time.time() - catalogue = deepcopy(catalogue) - catalogue.data['magnitude'] = np.array(ms) - _write_cat_instance(catalogue, format, csvout, ii) - time_end = time.time() - print('Run time for writing catalogue: ') - print(time_end - time_strt) - - - elif stype == 'even': - full_mags = {} - for jj, f in enumerate(factors): - mags = [f * ms + m for m, ms in zip(data['magnitude'], data['sigmaMagnitude'])] - catalogue = deepcopy(catalogue) - catalogue.data['magnitude'] = np.array(mags) - _write_cat_instance(catalogue, format, fileout, jj) - full_mags[jj] = mags - pd.DataFrame(full_mags).to_csv(fileout, index=None) - - else: - print(sys.stderr, "Use a supported sampling type.") - sys.exit(1) - - - -def _decl_all_cats(outdir, cat, dcl_toml_tmp, decdir): - - """ - """ - catvs = glob.glob(os.path.join(outdir, '*pkl')) - - config = toml.load(dcl_toml_tmp) - config['main']['save_aftershocks'] = False - for cat in catvs: - config['main']['catalogue'] = cat - config['main']['output'] = decdir - tmpfi = 'tmp-config-dcl.toml' - with open(tmpfi, 'w') as f: - f.write(toml.dumps(config)) - - decluster_multiple_TR(tmpfi) - - labels = [] - for key in config: - if re.search('^case', key): - labels.extend(config[key]['regions']) - - - -def _gen_comple(compl_toml, dec_outdir, compdir, tmpfi): + Runs the workflow that samples the magnitude sigma for each event, creating + many catalogues, then declusters them all based on their TRT, then runs + completeness analysis that jointly solves for the MFD, and finally returns + three mfds based on the center and 1-sigma of the a-b covariance """ - """ - config = toml.load(compl_toml) - - cref = config['completeness'].get('completeness_ref', None) - - mmin_compl = config['completeness']['min_mag_compl'] - - if cref == None: - mags = np.arange(4, 8.0, 0.1) - years = np.arange(1900, 2020, 5.0) - else: - mrange = config['completeness'].get('deviation', 1.0) - mags_cref = [c[1] for c in cref] - mags_min = min(mags_cref) - mrange - mags_max = max(mags_cref) + mrange - mags_all = np.arange(mags_min, mags_max, 0.2) - mags = list(set([round(m,2) for m in mags_all if m >= mmin_compl ])) - mags.sort() - print(mags) - - years = [c[0] for c in cref] - years.sort() - - config['completeness']['mags'] = mags - config['completeness']['years'] = years - - with open(tmpfi, 'w') as f: - f.write(toml.dumps(config)) - - get_completenesses(tmpfi, compdir) - - -def _compl_analysis(decdir, compdir, compl_toml, labels, fout, fout_figs): - """ - """ - # load configs - config = toml.load(compl_toml) - - ms, yrs, bw, r_m, r_up_m, bmin, bmax, crit = read_compl_params(config) - compl_tables, mags_chk, years_chk = read_compl_data(compdir) - - # Fixing sorting of years - if np.all(np.diff(yrs)) >= 0: - yrs = np.flipud(yrs) - - for lab in labels: - dec_catvs = glob.glob(os.path.join(decdir, f'*{lab}.csv')) - fout_lab = os.path.join(fout, lab) - fout_figs_lab = os.path.join(fout_figs, lab, 'mfds') - for ii, cat in enumerate(dec_catvs): - try: - res = _completeness_analysis(cat, yrs, ms, bw, r_m, - r_up_m, [bmin, bmax], crit, - compl_tables, f'{ii}', - folder_out_figs=fout_figs_lab, - folder_out=fout_lab, - rewrite=False) - except: - print(f'Impossible for catalogue {ii}') - - -def main(configfile): - """ - """ - - config = toml.load(configfile) - - # read basic inputs - catfi = config['main']['catalogue_filename'] - outdir = config['main']['output_directory'] - labs = config['mfds'].get('labels', None) - - - # make subdirs based on outdir name - catdir = os.path.join(outdir, 'catalogues') - decdir = os.path.join(outdir, 'declustered') - compdir = os.path.join(outdir, 'completeness') - resdir = os.path.join(outdir, 'results') - figdir = os.path.join(outdir, 'figures') - tmpconf = os.path.join(outdir, 'tmp-config-compl.toml') - - - # if create_cats, make the sampled catalogues - create_cats = config['catalogues'].get('create_catalogues', True) - if create_cats: - ncats = int(config['catalogues'].get('number', 1000)) - stype = config['catalogues'].get('sampling_type', 'random') - _create_catalogue_versions(catfi, catdir, numcats=ncats, stype=stype) - - # perform all the declustering possibilities - links TRTs following configs - decluster = config['decluster'].get('decluster_catalogues', True) - if decluster: - dcl_toml_tmpl = config['decluster']['decluster_settings'] - _decl_all_cats(catdir, catfi, dcl_toml_tmpl, decdir) - - - # generate the completeness tables - generate = config['completeness'].get('generate_completeness', True) - if generate: - compl_toml = config['completeness']['completeness_settings'] - _gen_comple(compl_toml, decdir, compdir, tmpconf) - - # create the mfds for the given TRT labels - mfds = config['mfds'].get('create_mfds', True) - if mfds: - if not labs: - print('Must specify the TRTs') - sys.exit() - - _compl_analysis(decdir, compdir, tmpconf, labs, resdir, figdir) - - plots = config['plot'].get('make_plots', True) - if plots: - if not labs: - print('Must specify the TRTs') - sys.exit() - make_all_plots(resdir, compdir, figdir, labs) + make_many_mfds(fname_config) -main.configfile = 'path to configuration file' +msg = 'Name of the .toml file with configuration parameters' +main.fname_config = msg if __name__ == '__main__': - freeze_support() sap.run(main) diff --git a/openquake/mbt/tools/mfd_sample/make_mfds.py b/openquake/mbt/tools/mfd_sample/make_mfds.py new file mode 100644 index 000000000..9940ae935 --- /dev/null +++ b/openquake/mbt/tools/mfd_sample/make_mfds.py @@ -0,0 +1,274 @@ +#!/usr/bin/env python + +import os +import re +import sys +import numpy as np +import pandas as pd +import pickle +import glob +import time +import toml + +from openquake.baselib import sap +from scipy.stats import truncnorm +from copy import deepcopy + +from openquake.hmtk.parsers.catalogue import CsvCatalogueParser +from openquake.mbi.ccl.decluster_multiple_TR import main as decluster_multiple_TR +from openquake.cat.completeness.generate import get_completenesses +from openquake.cat.completeness.analysis import (_completeness_analysis, + read_compl_params, + read_compl_data) +from openquake.cat.completeness.mfd_eval_plots import make_all_plots + + +def _get_truncated_normal(mean=0, sd=1, low=0, upp=10): + return truncnorm( + (low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd) + +def _write_cat_instance(catalogue, format, fileout, cnum): + if format == 'hmtk': + catalogue.write_catalogue(fileout.format(cnum)) + elif format == 'pickle': + with open(fileout.format(cnum), 'wb') as f: + pickle.dump(catalogue, f) + +def _create_catalogue_versions(catfi, outdir, numcats=None, stype='random', + numstd=1, rseed=122): + """ + catfi: catalogue filename (pkl or hmtk/csv) + outdir: where to put the catalogues + numcats: number of catalogues to create, if stype is random + stype: + random - samples from truncnorm + even - same sample for every magnitude; -1 : 1 by 0.2 + rseed: random seed to control the cat generation + """ + # check if output folder is empty + + if os.path.exists(outdir): + tmps = f'\nError: {outdir} already exists! ' + tmps += '\n Choose an empty directory.' + print(tmps) + sys.exit(1) + else: + os.makedirs(outdir) + + csvout = os.path.join(outdir, 'v{}_'+catfi.split('/')[-1]) + fileout = os.path.join(outdir, 'v_mags.csv') + factors = np.arange(-1,1,0.1) + + # read catalogue + if 'pkl' in catfi: + format = 'pickle' + catalogue = pd.read_pickle(catfi) + + elif ('csv' in catfi) or ('hmtk' in catfi): + parser = CsvCatalogueParser(catfi) # From .csv to hmtk + catalogue = parser.read_file() + format = 'hmtk' + + else: + print(sys.stderr, "Use a supported catalogue format.") + sys.exit(1) + + data = catalogue.data + + if numcats and stype == 'even': + print(sys.stderr, \ + "Cannot specify number of catalogues for even sampling.") + sys.exit(1) + + if stype == 'random': + + mags = [] + + time_strt = time.time() + for ii in np.arange(0,len(data['magnitude'])): + m=data['magnitude'][ii] + sd=data['sigmaMagnitude'][ii] + allm = _get_truncated_normal(mean=m, sd=sd, + low=m-numstd*sd, upp=m+numstd*sd) + mags_perm = allm.rvs(size=numcats, random_state=rseed) + mags.append(mags_perm) + + marr = np.array(mags) + np.savetxt(fileout, marr, delimiter=',', fmt='%.2f') + + for ii, ms in enumerate(marr.T): + time_strt = time.time() + catalogue = deepcopy(catalogue) + catalogue.data['magnitude'] = np.array(ms) + _write_cat_instance(catalogue, format, csvout, ii) + time_end = time.time() + + + elif stype == 'even': + full_mags = {} + for jj, f in enumerate(factors): + mags = [f * ms + m for m, ms in zip(data['magnitude'], data['sigmaMagnitude'])] + catalogue = deepcopy(catalogue) + catalogue.data['magnitude'] = np.array(mags) + _write_cat_instance(catalogue, format, fileout, jj) + full_mags[jj] = mags + pd.DataFrame(full_mags).to_csv(fileout, index=None) + + else: + print(sys.stderr, "Use a supported sampling type.") + sys.exit(1) + + + +def _decl_all_cats(outdir, cat, dcl_toml_tmp, decdir): + + """ + """ + catvs = glob.glob(os.path.join(outdir, '*pkl')) + + config = toml.load(dcl_toml_tmp) + config['main']['save_aftershocks'] = False + for cat in catvs: + config['main']['catalogue'] = cat + config['main']['output'] = decdir + tmpfi = 'tmp-config-dcl.toml' + with open(tmpfi, 'w') as f: + f.write(toml.dumps(config)) + + decluster_multiple_TR(tmpfi) + + labels = [] + for key in config: + if re.search('^case', key): + labels.extend(config[key]['regions']) + + + +def _gen_comple(compl_toml, dec_outdir, compdir, tmpfi): + """ + """ + config = toml.load(compl_toml) + + cref = config['completeness'].get('completeness_ref', None) + + mmin_compl = config['completeness']['min_mag_compl'] + + if cref == None: + mags = np.arange(4, 8.0, 0.1) + years = np.arange(1900, 2020, 5.0) + else: + mrange = config['completeness'].get('deviation', 1.0) + mags_cref = [c[1] for c in cref] + mags_min = min(mags_cref) - mrange + mags_max = max(mags_cref) + mrange + mags_all = np.arange(mags_min, mags_max, 0.2) + mags = list(set([round(m,2) for m in mags_all if m >= mmin_compl ])) + mags.sort() + print(mags) + + years = [c[0] for c in cref] + years.sort() + + config['completeness']['mags'] = mags + config['completeness']['years'] = years + + with open(tmpfi, 'w') as f: + f.write(toml.dumps(config)) + + get_completenesses(tmpfi, compdir) + + +def _compl_analysis(decdir, compdir, compl_toml, labels, fout, fout_figs): + """ + """ + # load configs + config = toml.load(compl_toml) + + ms, yrs, bw, r_m, r_up_m, bmin, bmax, crit = read_compl_params(config) + compl_tables, mags_chk, years_chk = read_compl_data(compdir) + + # Fixing sorting of years + if np.all(np.diff(yrs)) >= 0: + yrs = np.flipud(yrs) + + for lab in labels: + dec_catvs = glob.glob(os.path.join(decdir, f'*{lab}.csv')) + fout_lab = os.path.join(fout, lab) + fout_figs_lab = os.path.join(fout_figs, lab, 'mfds') + for ii, cat in enumerate(dec_catvs): + try: + res = _completeness_analysis(cat, yrs, ms, bw, r_m, + r_up_m, [bmin, bmax], crit, + compl_tables, f'{ii}', + folder_out_figs=fout_figs_lab, + folder_out=fout_lab, + rewrite=False) + except: + print(f'Impossible for catalogue {ii}') + + +def make_many_mfds(configfile): + """ + """ + + config = toml.load(configfile) + + # read basic inputs + catfi = config['main']['catalogue_filename'] + outdir = config['main']['output_directory'] + labs = config['mfds'].get('labels', None) + + + # make subdirs based on outdir name + catdir = os.path.join(outdir, 'catalogues') + decdir = os.path.join(outdir, 'declustered') + compdir = os.path.join(outdir, 'completeness') + resdir = os.path.join(outdir, 'results') + figdir = os.path.join(outdir, 'figures') + tmpconf = os.path.join(outdir, 'tmp-config-compl.toml') + + + # if create_cats, make the sampled catalogues + create_cats = config['catalogues'].get('create_catalogues', True) + if create_cats: + ncats = int(config['catalogues'].get('number', 1000)) + stype = config['catalogues'].get('sampling_type', 'random') + nstd = config['catalogues'].get('num_std_devs', 1) + rseed = config['catalogues'].get('random_seed', 122) + _create_catalogue_versions(catfi, catdir, numcats=ncats, stype=stype, + numstd=nstd, rseed=rseed) + + # perform all the declustering possibilities - links TRTs following configs + decluster = config['decluster'].get('decluster_catalogues', True) + if decluster: + dcl_toml_tmpl = config['decluster']['decluster_settings'] + _decl_all_cats(catdir, catfi, dcl_toml_tmpl, decdir) + + + # generate the completeness tables + generate = config['completeness'].get('generate_completeness', True) + if generate: + compl_toml = config['completeness']['completeness_settings'] + _gen_comple(compl_toml, decdir, compdir, tmpconf) + + # create the mfds for the given TRT labels + mfds = config['mfds'].get('create_mfds', True) + if mfds: + if not labs: + print('Must specify the TRTs') + sys.exit() + + _compl_analysis(decdir, compdir, tmpconf, labs, resdir, figdir) + + plots = config['plot'].get('make_plots', True) + if plots: + if not labs: + print('Must specify the TRTs') + sys.exit() + make_all_plots(resdir, compdir, figdir, labs) + + +make_many_mfds.configfile = 'path to configuration file' + +if __name__ == '__main__': + sap.run(make_many_mfds) From 27ca787c15d92c2df8ce8670e17f3477d1ac00c3 Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Wed, 3 Apr 2024 17:14:07 +0200 Subject: [PATCH 19/29] adding test for full mfd workflow to ensure with same random seed results are the same --- openquake/cat/completeness/mfd_eval_plots.py | 43 ++++++++++++++---- openquake/mbi/cat/MFDs_sample_mag_sigma.py | 2 +- .../tests/tools/mfd_sample/config/compl.toml | 21 +++++++++ .../tests/tools/mfd_sample/config/dec.toml | 16 +++++++ .../tests/tools/mfd_sample/config/test.toml | 23 ++++++++++ .../mfd_sample/data2/classified_test.hdf5 | Bin 0 -> 7117 bytes .../tests/tools/mfd_sample/data2/sub_cat.pkl | Bin 0 -> 20266 bytes .../mfd_sample/expected2/mfd-results.csv | 7 +++ .../tests/tools/mfd_sample/mfd_sample_test.py | 23 +++++++++- 9 files changed, 123 insertions(+), 12 deletions(-) create mode 100644 openquake/mbt/tests/tools/mfd_sample/config/compl.toml create mode 100644 openquake/mbt/tests/tools/mfd_sample/config/dec.toml create mode 100644 openquake/mbt/tests/tools/mfd_sample/config/test.toml create mode 100644 openquake/mbt/tests/tools/mfd_sample/data2/classified_test.hdf5 create mode 100644 openquake/mbt/tests/tools/mfd_sample/data2/sub_cat.pkl create mode 100644 openquake/mbt/tests/tools/mfd_sample/expected2/mfd-results.csv diff --git a/openquake/cat/completeness/mfd_eval_plots.py b/openquake/cat/completeness/mfd_eval_plots.py index 6681d4440..a037dd754 100644 --- a/openquake/cat/completeness/mfd_eval_plots.py +++ b/openquake/cat/completeness/mfd_eval_plots.py @@ -69,9 +69,15 @@ def _make_a_b_histos(df_all, df_best, figsdir): ax[1].tick_params(axis='y', labelcolor=color) for ii, idx in enumerate(set(df_all.idx)): + df_sub = df_all[df_all.idx == idx] - ax[0].hist(df_sub.agr, bins=binsA, color='gray', alpha=10/num_cats) - ax[1].hist(df_sub.bgr, bins=binsB, color='gray', alpha=10/num_cats) + if num_cats < 10: + alpha = 0.1 + else: + alpha = 10/num_cats + + ax[0].hist(df_sub.agr, bins=binsA, color='gray', alpha=alpha) + ax[1].hist(df_sub.bgr, bins=binsB, color='gray', alpha=alpha) ax2a = ax[0].twinx() ax2b = ax[1].twinx() ax2a.hist(df_best.agr, bins=binsA, color='red', alpha=0.2) @@ -159,13 +165,18 @@ def plot_best_mfds(df_best, figsdir): linewidth=0.1, zorder=0, label='Cumulative MFD') else: + if num <= 10: + alpha1 = 0.1 + else: + alpha1 = 10/num + plt.scatter(row.mags, row.rates, marker='_', color='r', - alpha=10/num) + alpha=alpha1) plt.scatter(row.mags, row.cm_rates, marker='.', color='b', - alpha=10/num) - plt.semilogy(mfd_m, mfd_r, color='r', alpha=30/num, linewidth=0.1, + alpha=alpha1) + plt.semilogy(mfd_m, mfd_r, color='r', alpha=3*alpha1, linewidth=0.1, zorder=0) - plt.semilogy(mfd_m, mfd_cr, color='b', alpha=50/num, + plt.semilogy(mfd_m, mfd_cr, color='b', alpha=5*alpha1, linewidth=0.1, zorder=0) plt.xlabel('Magnitude') @@ -273,6 +284,8 @@ def plot_weighted_covariance_ellipse(df, figdir, n_std=1.0, plt.savefig(fout, dpi=300) plt.close() + return center_x, center_y, major_x1, major_y1, major_x2, major_y2 + def plot_all_mfds(df_all, df_best, figsdir, field='rates', bins=10, bw=0.2, figname=None): @@ -314,11 +327,16 @@ def plot_all_mfds(df_all, df_best, figsdir, field='rates', bins=10, bw=0.2, fign mgrts = mfd.get_annual_occurrence_rates() mfd_m = [m[0] for m in mgrts] mfd_r = [m[1] for m in mgrts] + if len(df_best) <=30: + alpha = 0.1 + else: + alpha=30/len(df_best) + if field == 'cm_rates': mfd_cr = [sum(mfd_r[ii:]) for ii in range(len(mfd_r))] - plt.semilogy(mfd_m, mfd_cr, color='maroon', linewidth=0.2, zorder=10, alpha=30/len(df_best)) + plt.semilogy(mfd_m, mfd_cr, color='maroon', linewidth=0.2, zorder=10, alpha=alpha) else: - plt.semilogy(mfd_m, mfd_r, color='maroon', linewidth=0.2, zorder=10, alpha=30/len(df_best)) + plt.semilogy(mfd_m, mfd_r, color='maroon', linewidth=0.2, zorder=10, alpha=alpha) if figname==None: figname = f'mfds_all_{field}.png' @@ -334,6 +352,7 @@ def plot_all_mfds(df_all, df_best, figsdir, field='rates', bins=10, bw=0.2, fign def make_all_plots(resdir_base, compdir, figsdir_base, labels): + agrs, bgrs, labs = [], [], [] for label in labels: print(f'Running for {label}') resdir = os.path.join(resdir_base, label) @@ -368,5 +387,11 @@ def make_all_plots(resdir_base, compdir, figsdir_base, labels): plot_all_mfds(df_all, df_thresh, figsdir, field='cm_rates', bins=60, figname='mfds_thresh_cmrates.png') print('plotting covariance') - plot_weighted_covariance_ellipse(df_best, figsdir) + cx, cy, mx1, my1, mx2, my2 = plot_weighted_covariance_ellipse(df_best, figsdir) plot_weighted_covariance_ellipse(df_thresh, figsdir, figname=f'{label}-20percent.png') + + labs.extend([f'{label}-center', f'{label}-low', f'{label}-high']) + agrs.extend([cx, mx1, mx2]) + bgrs.extend([cy, my1, my2]) + + return labs, agrs, bgrs diff --git a/openquake/mbi/cat/MFDs_sample_mag_sigma.py b/openquake/mbi/cat/MFDs_sample_mag_sigma.py index 2184e85d5..dbc45a9e5 100644 --- a/openquake/mbi/cat/MFDs_sample_mag_sigma.py +++ b/openquake/mbi/cat/MFDs_sample_mag_sigma.py @@ -28,7 +28,7 @@ import numpy as np from openquake.baselib import sap -from openquake.mbt.mfds_sample.make_mfds import make_many_mfds +from openquake.mbt.tools.mfd_sample.make_mfds import make_many_mfds def main(fname_config): diff --git a/openquake/mbt/tests/tools/mfd_sample/config/compl.toml b/openquake/mbt/tests/tools/mfd_sample/config/compl.toml new file mode 100644 index 000000000..19d35583d --- /dev/null +++ b/openquake/mbt/tests/tools/mfd_sample/config/compl.toml @@ -0,0 +1,21 @@ +name = "CAM test" +mmin = 4.0 +bin_width = 0.2 +rupture_mesh_spacing = 2.5 +kernel_maximum_distance = 120.0 +kernel_smoothing = [ [ 0.8, 20.0,], [ 0.15, 40.0,], [ 0.05, 80.0,],] +source_prefix = "L01" + +[completeness] +mags = [] +years = [] +completeness_ref = [ [ 1980.0, 4.0,], [ 1964.0, 5.0,], [ 1904.0, 7.0,],] +deviation = 1.7 +ref_mag = 2.0 +ref_upp_mag = 10.0 +min_mag_compl = 4.0 +bmin = 0.7 +bmax = 1.3 +flexible = true +num_steps = 2 +optimization_criterion = "optimize" diff --git a/openquake/mbt/tests/tools/mfd_sample/config/dec.toml b/openquake/mbt/tests/tools/mfd_sample/config/dec.toml new file mode 100644 index 000000000..ecaf037ba --- /dev/null +++ b/openquake/mbt/tests/tools/mfd_sample/config/dec.toml @@ -0,0 +1,16 @@ +[main] + +catalogue = './data2/sub_cat.pkl' +tr_file = './data2/classified_test.hdf5' + +create_subcatalogues = 'true' +catalogue_add_defaults = 'true' + +[method1] +name = 'GardnerKnopoffType1' +params = {'time_distance_window' = 'UhrhammerWindow', 'fs_time_prop' = 0.1} +label = 'UH' + +[case1] +regions = ['crustal', 'intA'] +label = 'int_cru' diff --git a/openquake/mbt/tests/tools/mfd_sample/config/test.toml b/openquake/mbt/tests/tools/mfd_sample/config/test.toml new file mode 100644 index 000000000..f2ba733cc --- /dev/null +++ b/openquake/mbt/tests/tools/mfd_sample/config/test.toml @@ -0,0 +1,23 @@ +[main] +catalogue_filename = './data2/sub_cat.pkl' +output_directory = 'test' + +[catalogues] +number = 3 +sample_type = 'random' +create_catalogues = true + +[decluster] +decluster_catalogues = true +decluster_settings = 'config/dec.toml' + +[completeness] +generate_completeness = true +completeness_settings = 'config/compl.toml' + +[mfds] +create_mfds = true +labels = ['intA','crustal'] + +[plot] +make_plots = true diff --git a/openquake/mbt/tests/tools/mfd_sample/data2/classified_test.hdf5 b/openquake/mbt/tests/tools/mfd_sample/data2/classified_test.hdf5 new file mode 100644 index 0000000000000000000000000000000000000000..6db4173ad6a2f91d264a764bb4db70ee64451ec2 GIT binary patch literal 7117 zcmeHLK}rKb5bW7aV2KD3Jp{e%(c^w0kc1pWC5hnC=t&43Oaz~xKk?!}yyQ(f)l)4& zVJ`|1t?S?;Pu+RI%LVuNVa46Y(WWK@faI zdaUnJ|0{1c84e{fP2Z4hQngZKI#zf59}bL%{d3&uC2zMPzHy&1mE~f7-H*x5{ljuK zUwq4N?^c-BJqbLBZ{;b_-jpkWv3+8aKJjCKmvm zXiK5IEX!1|pne|89p#8Ar%dG0g00~6^lT6$my)ZO`}LP${#FO|Jo{ajc|Ff(-2r#N z9dHNSfxqp5{@wrfEkHC0$H(p%;4vlPM#vhNaUPU3hyd*Z4-!;FQB_dVvi7QiaVUl! zB`FPY8cA7YxTb?U;10M0?tnYcLczxBX*H_zX`&qqOwTk2QRmE283(<15Qn9EgYDLjj@7jB>-${BT1^xQ@3r zHnlGAOlLFwtAp{z?1XjMZ)cCpae z)X`Ym)>gYZn<-w>mac1UY42$3tn0{b%x3y&!fd9XrQVQi%+8Ih&sO(Ynms>zV)n?n zeblsJK&D@P$LdvS#nN|KBD>)NvwBdb&oS8r3l_}zoId)$xsi_S(hb#-EP>3R_QvJS zwexG2w={Ni)~B-#MGXVByn`Y97bJZ6hN^~sk;qk7s?X!eF#OZGWxt&HrzaSHPXJH6 z`>^$Io$z8XH+=Ubk1g7A!>VInN}Aw4{RMfDugQwG^tukZui?J)@>S!q{Y(w*%K&q=75n>B-;3;3{pC)0=J z0T20De_&hVHEYk>ngq<09M)W6ZwuyYA2{Q?PwjXixmWz`Cr7W?p2YlH9Zzrn{EZiq z8SBBWAGK{wVvhB^se8^WdG*yKAn^Nxp8x2Q8*8^FuY0)g^ee^l$=}t#Q*!6n=f5z2 zcw}t*rd&PV;y)v z-~oZ}4eFD*fAfJI%Cq$8J4D{NN*$|1q&aklNPYI+T_z`-cJcpwc;s+~!=yQMhe~re z43iIicqCB_3+9HtJm6oZKf~o7MfTw^d7e7!&n=hg|FT~Wm%n>beM(twaR3e9AnEAO z0ExNLU(9^PynZmRmoh8~!EX6RAM}*4-2($?^bKM8WdVeK#C9>qmlhDboHgAaI9UqxJJ&|4bJO??dIIw|G)C=2E$jzgp_$J0=+ zh|iFK&g8FKzIJ;(^3;ttbyzCRWdiXi3+Be&!7O)B0M%|d?c!2tbl#1*adl*!ye!v7kJoh>ZpXzp+Rg{VF1t41u=l@y(qw&OKLu! zcl`o*hDXlzc;XlrY2+s92=UBicOXc(#P)%}}VTb3Wdm0Iu8 zK8tg@nmC^q=R8H@9THiJ< zf8AxDrgGo(|NFOR_n_C_yq3!8==EKk8#MX$Yu?I)B(!lB?|@IQX%9@BSg#i@5(Mx%2n8Q@MsqcCAU0 z+y^NvcfFO$eU!iCih0DJcM?UCp!S#IdirP8pX~ZPl{;;8<$HBx_t|G1GVoQh>(Eok zQ}XMCl~;Z1$E4@MpWe7Y9<(?&xw_)5*T^67_dT7j68*Km`}tGPlbmxOn=tiKlGE6_ zd;Q~)#kn^xyS?}b1%ZTKtaf$%$LP5FEb*|TwKpZVV> zd-X0Pe<$o3xXGeC>3ETUWc|<#>DBc@@z=YI>_dILwv!a@Oyxd&dCR(XviHT^f1PtO z*#-G0ymZK#TH^o4+eiQA2=ZgtiuoU0M0zGpy>#<;$d93!IdA3>{6XS{N16$it3Z+7 z&z_?KqWph4b=B`LAh~ZGddpvCksqi(9bb}DLt%f8{MJti^6$qFkG`#q>PMBy2#SN2 zNBVXCi2tdN7Hz-jgH%rMQlbOjy349BY$E^EcGyHv*ZmtThwMT=&$xGXabM!sap(ad zM?a0o&s!@fkt9dg2gP^m?@2NFqqfs6WQW#I^`@Vi$UEgRwf&~6AEx?KZkMV3QssIL z*{}CC?HjcnUM71GAFYSv!XM-V=guF9dM(AhU2O=GqxV1Y>+^^7dn%Bmk4~qP22p$t zr{k!a{J=iaw;!Uz`31fD^rtv#d&#bE)3I|eorj1Q?b6?WL~<4=$0`5d*Zz|p_^a!S z?9t}|)w@1VN#5>T=zt)+KD`I%lY3D|TjR=o{idVm`Q5KRy{e;OABTL8Ux%0{QMI{| zmAia1tKu8qu~NrAGR5Fet_tSB|0A5v>wZ9IZj4ntKkf_b5&fCT_yogw4Ebi$g>`B0 zU4V0#j^`1`-_5_4iSP$_mKPb#eB(k;@Z2&GdLlfJfF5xf!SsOBc)oyPgg-~}yxPFP zAD8j4cOtLXFdlM4unX~t@TZdL5r38w0c1Ua-eZ^^H=^-8=Qww<}p zDujm~*ui?b<}w}P2YH;AF4&(4(Lo>bhdRJKwBE&fyG{t%1$sd6Mfd~%W6a0->YB~- zI75zCpoi@a@*$WmtOxmnUc>=;hkmR__ya!FA8v@)U!X%hgC2O)Dda=WQPNNVPT@KH zEa5riGJmiikT1z}oKM87YZl|f_6WwqPs9Op>>IA5NW{=XZUpvKupH2FJc7E#I^ydw zJ^Vr(Blz$-_Al&l7;iI#zwB2~xA1eSq3fE@^oXlxZr%myv(qgdv!`cIr(de~kJ&NT zx4eW)zjUW@%#FHsRE;g)@g2+d==bGz%<~+dux1oXWHb40%yANG;Kgj;jj2IA?pux- zBwU*Yam)2>wG{WFwo4icZOeBZZ+Vj*I+p9|VIpDMzGH@dJm&a%=-ZB+u+*@BENVIN zxE1yFIO@7?G#b}q+wzo1L*vKD18scNa^se*bdyuQAGJuo87CYop{!JR&yIVZ!6$rD zY4iK29D0AGc!KW3aa>(fG>XQQWSiv1V)gj-h3b1hX-g~Vg|3ERYx%=O&3E-Ol=J`4p$P zrMSr$a!n1?9#UKhs{S~IkOpx}xu2hiIi9B^tAU}3`4nf3@#D%DikwZ^P|4D9@e{tP zoVZP)GTHK|TzumzmA^x)RBl$3cA8I?VXSgz#)BpHZy~~3Ud&O)2PGsPr@|+FG*o8|v(aHhZYiuok*BosFzTr@Mq_C+YY9<`ucNjO zB`+SMTBisUs)NLFU0n%Ol)k4@N?3}A4v`ur$al>~wpy0T1Yteeg$cvvSy8f?v9^`a z4p~mZj;d3V&M-RrXs_}xN;Rp61gX#|sI&DKP*qeE9pa{u>soq? z#(XMHRfh#J$~x7o8c{{Mdi%tpR4_4>8ZV}Hh2mA=^`mObt5L#~Ce;j`X&M(*=PZw$ z?v1Vjqqn@L_Cf(ApCYd;r2{YKnCgxWkbrX5Yk4S7mqDqzP`QfW+) zBJ<3Nsm6FjsWF{9prP}eY*aEGst8p(!Pv;rc(oS^uXXWht4XUD#N>QO(?lx6xh_V|-=!-#$S!C3zVczDEoEQa|E1w)(R0ETX8 zK85F{49gfM7>-~Fd3ikV$8Zco$ORsJQC@c#_GLJjVKKvz45u*!KlC2T_(Fz9GK75i zJ(}^be-zIrF@*dg^E5gARToUhoHU!H@Z9=0_bJ#B=x$e^7UbGwdy5dgMRsFZ_ePK1`2z!G6>; z;tPGK2l$V5=t12go}iz~dVuF?1o42Mh%@{@B4poTJcs>=EBpXG>_c8W<~y7r_5+q!Vt-;fa<;Q!uQ7a;;Zq^_5YKZA9|*y_d43ne#~5y6cniZHGhEB?b%uXt2zz>i zu(LP#Czb;Ue_mnymxce!e1L!C`4)!Hhv0KO2Yi<2fY0#!5Bq^n^ZJ3paK6HG*bVqH z%kK@o#Or`tdHy0p`2PaWd-D%<0RFJwPcr{zhL48egFHvwh3gFU_DiM%yp`v@LHG^3 z!uBKX@PB_0at;(Cj$w$n{;MJK6o#nZ-s%-`2}9KJzZ!BI z=RP3hz(2?XJ>mj+umkqNAKW7lKiC8Nfd@bG06CzC9>fjyh3Ud`*bDhsM?8@y$Va^4 zFX91u$bp@}V-CLnu?{_es5|IGoFE5t)C1}q_8`9S2Xf&@7=k|>Pw+tx;)=WgLJ#ai z{*e#hL5KLkf9wO`;UDY*9`w+Q`onz|aYG!S4}8csAmV^I;s*ZkI_v-)>_*(c2fc_F z{6Rc`hkQWz3%?)-_M>jF90TBGK7Z7qV*XMD5?8fq~1LT9?)~tuY{* z`TsUAn7yca&Eo14>6bqAfA6FI)s?AAqFh+HKgu3*{<-WPs|zb3`2a`Vy!#rFm5`@= zQZ|$Q z>c+S43aJ}Hw+X5GMZKR0dGE^pd#}4inD+dXkVhy%R|%;b<2DO9X4GGsP9l2U2)9H? z-4J*m>zyQ|ZVVhHkRw&+W}mT=+BFw-^Dx_|-JVHp--+MX9zhc|Q^Vg4LJ2FD*#?+^T z)Q!G>?uH9hXK$U5H(#wfK84hcju!~|ZeqsFwv&a_4X-6aRz0=s`8SG$)Qx?!g>2h< z@XlY8UfpQ?O(9Qx_P*_9_X~Od_Oe4CKU~Ns9;*69<60pn>wt6oj-$L^Ph4F^eQfWzi0b*0wVzIPNFi#RvH-0?a)pbsB(RKE7 zA$0@wDMIS@{Oh~x9{X1HrLSAe`>0Gv-FTZ8@>%Nd{^nlZKeSJDBkCDK>W0xpf?s#) zu3uHXqjw9b8=t=?w2vR5>T4Lx=fv^6zpHqk-NbcVETn0m9V37K z9(9^86jC>?Zs2`n390&TpP_xL8*5(`Qa7ZJV7mTPujPFVHnEplH zhrbX~H>&UH-j_O`j|+KWlhVZd<+r?lZtu>AJ|AcwsD9)7Tp?Ghj{c*B)D6Q=^L`{h z^|`W1NY%G{A+761;d_Pr4?0fXnAV*i^V*k?s_*~lrGlHY>ROwdsXdE+6wCB$Zf)tc zbC&;XfgiVvOqL^<7xEl1pXc~-H;?Cl#XRpD!h;`t19%T z7jnW7@&Lgfwgdh^ALyY6@qr)E138cj2s+4te5{8d*1;Es&=0)_aU8;UtRpUA2zvMh zIiLf7*dD|Me6SCGAP$fNJm?`0a$pDaVGam>$OB(*_CgQh4G4O`usqO(A?gi&VjcFu zAK>8!?1nwi3;RKjcmW^ghkQWD0fZmDLC6i;5BneodSMsVVILsuKwJP35731n;s6La zfUpz#VNaMI_J$$sfS-Vn3wt0JbI9)v5Bm_0FbvCqUC@iU`88R)`a+5NeO#-1yvtzT zHAwQ?rV~SNp^lS!!&9%ep_|9^=>|q@gCEcM2;ev)uYEed8!?vYM>BL;PK~*KuA0ko zDh*y%@p`4XZk#`Z?VHbb%r)~5s#yM1BWL$4!$&>%+TI;A_*y@h`6jb`*vY-lat_P6 zC#kQ)*0Di6%-gVsvpsP`ck)~V%WGI}1@k8uKZEIN_??iWn2z6W-f@hXkC)6>ZhkD; zE)9KVChPZ@e**ipJE?!XAv(M*8_}O}{MPAEe*1BZS--o7&$wVL7q+hIIM5Bfy*j55BzRV`JCXLA5$m1PsW3OBIkE9`^S2g zjbnU__0{m4>t$Y)<-Tpc0;PPKq07x@I>GBn1B+)Fn1Nodk2_|vUu#d9l*gEUEbE(R;JjJPmtYuW|5)D5uw$N)bMkzS8`}&2is!Olb3^j0jNI|l zLwcsMUDMbumS4{H(>pVIU(mT@-q9Mt_ReHJ&&b<>b7Djw-{cSbbP~(qxE5EN`5n`k z9{Ttklkm5K`M93j5#K6vT^<;GwL?mMV-H=Li}dSe2l8UE`4*wZ7X2+kU;MV7p6{?7 zR8LE!Sf09_;V80@n@sKOh1s}5QdOHi0622 zg1?0EhcUz)>(C3n5lnYT2$u64ba-x#Wce;kcmg5Fhx3I71)SVK4L`zK{>SfQSd^Q6I23tOxd@PM{Bbun+v$Cx{31qMkqx zdGG^#h#%?(aYmh^E?~zf-tWKzLNDS8z2HN90Fh_tg+B1(+(N$8J-jnhm~N?GVtP6Y zGx_axwN2FCx$%Iy@2d;ZX_@^2ed$}KsJ)}Mt-}ag@h+LO;(hwBx^NX|+)(vj)z(^< zZf{Sw>o1@yu20uBb+&h;+ZtPzXBT9O>9;nkn$jKV7NVhf5q Date: Wed, 3 Apr 2024 17:26:04 +0200 Subject: [PATCH 20/29] tryign to deal with paths --- openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py | 3 ++- openquake/mbt/tests/tools/mfd_sample/{config => }/test.toml | 0 2 files changed, 2 insertions(+), 1 deletion(-) rename openquake/mbt/tests/tools/mfd_sample/{config => }/test.toml (100%) diff --git a/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py b/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py index 7c09b91e9..6dbc521dd 100644 --- a/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py +++ b/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py @@ -35,7 +35,8 @@ def test_generate_cats(self): class TestWorkflow(unittest.TestCase): def test_full_wkflow(self): - config_fi = os.path.join(BASE_PATH, 'config', 'test.toml') + config_fi = os.path.join(BASE_PATH, 'test.toml') + #config_fi = os.path.join(BASE_PATH, 'config', 'test.toml') make_many_mfds(config_fi) expected_fi = os.path.join(BASE_PATH, 'expected2', 'mfd-results.csv') diff --git a/openquake/mbt/tests/tools/mfd_sample/config/test.toml b/openquake/mbt/tests/tools/mfd_sample/test.toml similarity index 100% rename from openquake/mbt/tests/tools/mfd_sample/config/test.toml rename to openquake/mbt/tests/tools/mfd_sample/test.toml From 62a3c7626e6f548a719fb88fd202d42c81ec1375 Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Wed, 3 Apr 2024 17:46:57 +0200 Subject: [PATCH 21/29] fixing paths again --- .../tools/mfd_sample/{ => config}/test.toml | 0 .../tests/tools/mfd_sample/mfd_sample_test.py | 19 +++++++++++++++++-- 2 files changed, 17 insertions(+), 2 deletions(-) rename openquake/mbt/tests/tools/mfd_sample/{ => config}/test.toml (100%) diff --git a/openquake/mbt/tests/tools/mfd_sample/test.toml b/openquake/mbt/tests/tools/mfd_sample/config/test.toml similarity index 100% rename from openquake/mbt/tests/tools/mfd_sample/test.toml rename to openquake/mbt/tests/tools/mfd_sample/config/test.toml diff --git a/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py b/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py index 6dbc521dd..abd7cf51e 100644 --- a/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py +++ b/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py @@ -1,4 +1,5 @@ import os +import toml import shutil import unittest import tempfile @@ -35,8 +36,22 @@ def test_generate_cats(self): class TestWorkflow(unittest.TestCase): def test_full_wkflow(self): - config_fi = os.path.join(BASE_PATH, 'test.toml') - #config_fi = os.path.join(BASE_PATH, 'config', 'test.toml') + config_fi = os.path.join(BASE_PATH, 'config', 'test.toml') + config = toml.load(config_fi) + cat_fi_name = config['main']['catalogue_filename'] + cat_fi_name_new = os.path.join(BASE_PATH, cat_fi_name) + config['main']['catalogue_filename'] = cat_fi_name_new + dec_toml = config['decluster']['decluster_settings'] + dec_toml_new = os.path.join(BASE_PATH, dec_toml) + config['decluster']['decluster_settings'] = dec_toml_new + comp_toml = config['completeness']['completeness_settings'] + comp_toml_new = os.path.join(BASE_PATH, comp_toml) + config['completeness']['completeness_settings'] = comp_toml_new + + config_fi_new = os.path.join(BASE_PATH, 'test_tmp.toml') + file=open(config_fi_new,"w") + toml.dump(config, file) + make_many_mfds(config_fi) expected_fi = os.path.join(BASE_PATH, 'expected2', 'mfd-results.csv') From 2c5d89b186e49784c7cf7162ee226275cc4260ef Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Wed, 3 Apr 2024 17:59:43 +0200 Subject: [PATCH 22/29] fixing --- openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py b/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py index abd7cf51e..1800e5898 100644 --- a/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py +++ b/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py @@ -51,8 +51,9 @@ def test_full_wkflow(self): config_fi_new = os.path.join(BASE_PATH, 'test_tmp.toml') file=open(config_fi_new,"w") toml.dump(config, file) + file.close() - make_many_mfds(config_fi) + make_many_mfds(config_fi_new) expected_fi = os.path.join(BASE_PATH, 'expected2', 'mfd-results.csv') output_fi = os.path.join(BASE_PATH, 'test', 'mfd-results.csv') From d7a395891da8179136c63d08bbb58c371fce13b6 Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Wed, 3 Apr 2024 18:20:27 +0200 Subject: [PATCH 23/29] fixing paths again --- .../mbt/tests/tools/mfd_sample/mfd_sample_test.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py b/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py index 1800e5898..1db45bcb9 100644 --- a/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py +++ b/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py @@ -36,6 +36,7 @@ def test_generate_cats(self): class TestWorkflow(unittest.TestCase): def test_full_wkflow(self): + # reassign main configs config_fi = os.path.join(BASE_PATH, 'config', 'test.toml') config = toml.load(config_fi) cat_fi_name = config['main']['catalogue_filename'] @@ -43,16 +44,28 @@ def test_full_wkflow(self): config['main']['catalogue_filename'] = cat_fi_name_new dec_toml = config['decluster']['decluster_settings'] dec_toml_new = os.path.join(BASE_PATH, dec_toml) - config['decluster']['decluster_settings'] = dec_toml_new comp_toml = config['completeness']['completeness_settings'] comp_toml_new = os.path.join(BASE_PATH, comp_toml) config['completeness']['completeness_settings'] = comp_toml_new + # reassign decluster configs + config_dec = toml.load(dec_toml_new) + config_dec['main']['catalogue'] = cat_fi_name_new + dec_tr = config_dec['main']['tr_file'] + dec_tr_new = os.path.join(BASE_PATH, dec_tr) + config_dec['main']['tr_file'] = dec_tr_new + config_dec_fi_new = os.path.join(BASE_PATH, 'test_dec_tmp.toml') + config['decluster']['decluster_settings'] = config_dec_fi_new + config_fi_new = os.path.join(BASE_PATH, 'test_tmp.toml') file=open(config_fi_new,"w") toml.dump(config, file) file.close() + decfile=open(config_dec_fi_new,"w") + toml.dump(config_dec, decfile) + decfile.close() + make_many_mfds(config_fi_new) expected_fi = os.path.join(BASE_PATH, 'expected2', 'mfd-results.csv') From f19b3706c8c00d5d5445f257c9c9cde49e618866 Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Wed, 3 Apr 2024 18:33:52 +0200 Subject: [PATCH 24/29] fixing paths so tests pass on windows/remote --- .../mbt/tests/tools/mfd_sample/mfd_sample_test.py | 2 +- openquake/mbt/tools/mfd_sample/make_mfds.py | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py b/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py index 1db45bcb9..5a444963b 100644 --- a/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py +++ b/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py @@ -66,7 +66,7 @@ def test_full_wkflow(self): toml.dump(config_dec, decfile) decfile.close() - make_many_mfds(config_fi_new) + make_many_mfds(config_fi_new, BASE_PATH) expected_fi = os.path.join(BASE_PATH, 'expected2', 'mfd-results.csv') output_fi = os.path.join(BASE_PATH, 'test', 'mfd-results.csv') diff --git a/openquake/mbt/tools/mfd_sample/make_mfds.py b/openquake/mbt/tools/mfd_sample/make_mfds.py index 9940ae935..31f0877e1 100644 --- a/openquake/mbt/tools/mfd_sample/make_mfds.py +++ b/openquake/mbt/tools/mfd_sample/make_mfds.py @@ -22,7 +22,6 @@ read_compl_data) from openquake.cat.completeness.mfd_eval_plots import make_all_plots - def _get_truncated_normal(mean=0, sd=1, low=0, upp=10): return truncnorm( (low - mean) / sd, (upp - mean) / sd, loc=mean, scale=sd) @@ -207,7 +206,7 @@ def _compl_analysis(decdir, compdir, compl_toml, labels, fout, fout_figs): print(f'Impossible for catalogue {ii}') -def make_many_mfds(configfile): +def make_many_mfds(configfile, basedir=None): """ """ @@ -265,8 +264,12 @@ def make_many_mfds(configfile): if not labs: print('Must specify the TRTs') sys.exit() - make_all_plots(resdir, compdir, figdir, labs) - + labels, agrs, bgrs = make_all_plots(resdir, compdir, figdir, labs) + fin = pd.DataFrame({'label': labels, 'a-values': agrs, 'b-values': bgrs}) + if basedir: + fin.to_csv(os.path.join(basedir, outdir, 'mfd-results.csv')) + else: + fin.to_csv(os.path.join(outdir, 'mfd-results.csv')) make_many_mfds.configfile = 'path to configuration file' From 52201d6ce017422cf6f78a78b8bb652b8da3d8a8 Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Thu, 4 Apr 2024 10:01:21 +0200 Subject: [PATCH 25/29] fixing path --- openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py b/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py index 5a444963b..470d860b0 100644 --- a/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py +++ b/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py @@ -41,7 +41,10 @@ def test_full_wkflow(self): config = toml.load(config_fi) cat_fi_name = config['main']['catalogue_filename'] cat_fi_name_new = os.path.join(BASE_PATH, cat_fi_name) + outdir_name = config['main']['output_directory'] + outdir_name_new = os.path.join(BASE_PATH, outdir_name) config['main']['catalogue_filename'] = cat_fi_name_new + config['main']['output_directory'] = outdir_name_new dec_toml = config['decluster']['decluster_settings'] dec_toml_new = os.path.join(BASE_PATH, dec_toml) comp_toml = config['completeness']['completeness_settings'] From 1838d0acbe2c795447e65a331ce9581075995a48 Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Thu, 4 Apr 2024 11:55:06 +0200 Subject: [PATCH 26/29] fixing rounding --- openquake/cat/completeness/mfd_eval_plots.py | 2 +- .../tests/tools/mfd_sample/expected2/mfd-results.csv | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/openquake/cat/completeness/mfd_eval_plots.py b/openquake/cat/completeness/mfd_eval_plots.py index a037dd754..4c4a84be6 100644 --- a/openquake/cat/completeness/mfd_eval_plots.py +++ b/openquake/cat/completeness/mfd_eval_plots.py @@ -394,4 +394,4 @@ def make_all_plots(resdir_base, compdir, figsdir_base, labels): agrs.extend([cx, mx1, mx2]) bgrs.extend([cy, my1, my2]) - return labs, agrs, bgrs + return labs, np.round(agrs, 3), np.round(bgrs, 3) diff --git a/openquake/mbt/tests/tools/mfd_sample/expected2/mfd-results.csv b/openquake/mbt/tests/tools/mfd_sample/expected2/mfd-results.csv index 471572f9e..0e7a810c0 100644 --- a/openquake/mbt/tests/tools/mfd_sample/expected2/mfd-results.csv +++ b/openquake/mbt/tests/tools/mfd_sample/expected2/mfd-results.csv @@ -1,7 +1,7 @@ ,label,a-values,b-values -0,intA-center,3.4221566716867344,0.8477886165624219 -1,intA-low,3.3341265324448983,0.8035538508320615 -2,intA-high,3.5101868109285705,0.8920233822927822 -3,crustal-center,3.350288725907407,0.8778145185036605 -4,crustal-low,3.263790223525028,0.8485030837041351 -5,crustal-high,3.436787228289786,0.907125953303186 +0,intA-center,3.422,0.848 +1,intA-low,3.334,0.804 +2,intA-high,3.51,0.892 +3,crustal-center,3.35,0.878 +4,crustal-low,3.264,0.849 +5,crustal-high,3.437,0.907 From 5e3b5e52d281abf41fd61a03d53969af35d24d48 Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Thu, 4 Apr 2024 12:17:44 +0200 Subject: [PATCH 27/29] trying a different approach for the paths --- .../mbt/tests/tools/mfd_sample/mfd_sample_test.py | 11 ++++++++--- openquake/mbt/tools/mfd_sample/make_mfds.py | 10 +++++----- 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py b/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py index 470d860b0..ce9a85305 100644 --- a/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py +++ b/openquake/mbt/tests/tools/mfd_sample/mfd_sample_test.py @@ -35,7 +35,11 @@ def test_generate_cats(self): class TestWorkflow(unittest.TestCase): + def setUp(self): + self.tmpd = tempfile.mkdtemp() + def test_full_wkflow(self): + # reassign main configs config_fi = os.path.join(BASE_PATH, 'config', 'test.toml') config = toml.load(config_fi) @@ -44,7 +48,8 @@ def test_full_wkflow(self): outdir_name = config['main']['output_directory'] outdir_name_new = os.path.join(BASE_PATH, outdir_name) config['main']['catalogue_filename'] = cat_fi_name_new - config['main']['output_directory'] = outdir_name_new + #config['main']['output_directory'] = outdir_name_new + config['main']['output_directory'] = self.tmpd dec_toml = config['decluster']['decluster_settings'] dec_toml_new = os.path.join(BASE_PATH, dec_toml) comp_toml = config['completeness']['completeness_settings'] @@ -72,10 +77,10 @@ def test_full_wkflow(self): make_many_mfds(config_fi_new, BASE_PATH) expected_fi = os.path.join(BASE_PATH, 'expected2', 'mfd-results.csv') - output_fi = os.path.join(BASE_PATH, 'test', 'mfd-results.csv') + output_fi = os.path.join(self.tmpd, 'mfd-results.csv') expected = pd.read_csv(expected_fi) output = pd.read_csv(output_fi) assert expected.equals(output) - shutil.rmtree('test') + shutil.rmtree(self.tmpd) os.remove('tmp-config-dcl.toml') diff --git a/openquake/mbt/tools/mfd_sample/make_mfds.py b/openquake/mbt/tools/mfd_sample/make_mfds.py index 31f0877e1..e78b21ece 100644 --- a/openquake/mbt/tools/mfd_sample/make_mfds.py +++ b/openquake/mbt/tools/mfd_sample/make_mfds.py @@ -47,12 +47,12 @@ def _create_catalogue_versions(catfi, outdir, numcats=None, stype='random', # check if output folder is empty if os.path.exists(outdir): - tmps = f'\nError: {outdir} already exists! ' - tmps += '\n Choose an empty directory.' + tmps = f'\nWarning: {outdir} already exists! ' + tmps += '\n Overwriting files.' print(tmps) - sys.exit(1) - else: - os.makedirs(outdir) + # sys.exit(1) + #else: + os.makedirs(outdir) csvout = os.path.join(outdir, 'v{}_'+catfi.split('/')[-1]) fileout = os.path.join(outdir, 'v_mags.csv') From e3a7a54bb7348614baeeb0c01f8c9aa95ec40f80 Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Thu, 4 Apr 2024 14:44:36 +0200 Subject: [PATCH 28/29] fixing path issue with splitting by / --- openquake/mbt/tools/mfd_sample/make_mfds.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/openquake/mbt/tools/mfd_sample/make_mfds.py b/openquake/mbt/tools/mfd_sample/make_mfds.py index e78b21ece..cabca33d5 100644 --- a/openquake/mbt/tools/mfd_sample/make_mfds.py +++ b/openquake/mbt/tools/mfd_sample/make_mfds.py @@ -51,10 +51,10 @@ def _create_catalogue_versions(catfi, outdir, numcats=None, stype='random', tmps += '\n Overwriting files.' print(tmps) # sys.exit(1) - #else: - os.makedirs(outdir) + else: + os.makedirs(outdir) - csvout = os.path.join(outdir, 'v{}_'+catfi.split('/')[-1]) + csvout = os.path.join(outdir, 'v{}_catalogue.pkl') fileout = os.path.join(outdir, 'v_mags.csv') factors = np.arange(-1,1,0.1) From 88c330e3ee99a10978857ed6e5168824a943bdd3 Mon Sep 17 00:00:00 2001 From: Kendra Johnson Date: Thu, 4 Apr 2024 17:13:29 +0200 Subject: [PATCH 29/29] cleaning up useless stuff --- openquake/mbt/tools/mfd_sample/make_mfds.py | 4 ++-- openquake/mbt/tools/model_building/dclustering.py | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/openquake/mbt/tools/mfd_sample/make_mfds.py b/openquake/mbt/tools/mfd_sample/make_mfds.py index cabca33d5..1c0427910 100644 --- a/openquake/mbt/tools/mfd_sample/make_mfds.py +++ b/openquake/mbt/tools/mfd_sample/make_mfds.py @@ -119,7 +119,7 @@ def _create_catalogue_versions(catfi, outdir, numcats=None, stype='random', -def _decl_all_cats(outdir, cat, dcl_toml_tmp, decdir): +def _decl_all_cats(outdir, dcl_toml_tmp, decdir): """ """ @@ -241,7 +241,7 @@ def make_many_mfds(configfile, basedir=None): decluster = config['decluster'].get('decluster_catalogues', True) if decluster: dcl_toml_tmpl = config['decluster']['decluster_settings'] - _decl_all_cats(catdir, catfi, dcl_toml_tmpl, decdir) + _decl_all_cats(catdir, dcl_toml_tmpl, decdir) # generate the completeness tables diff --git a/openquake/mbt/tools/model_building/dclustering.py b/openquake/mbt/tools/model_building/dclustering.py index 31ce24b0a..e530c2cd4 100755 --- a/openquake/mbt/tools/model_building/dclustering.py +++ b/openquake/mbt/tools/model_building/dclustering.py @@ -78,8 +78,6 @@ def decluster(catalogue_hmtk_fname, declustering_meth, declustering_params, It can be a string or a list of strings :param str tr_fname: An .hdf5 file containing the TR classification of the catalogue - :param array mag_perm: - An array the length of the cat that will replaces the magnitudes :param bool subcatalogues: When true creates subcatalogues per tectonic region :param str fmat: